1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
29 /** Parser token indices. */
52 /* Top-level command. */
54 /* Sub-leve commands. */
59 /* Top-level command. */
61 /* Sub-level commands. */
70 /* Destroy arguments. */
73 /* Query arguments. */
79 /* Validate/create arguments. */
86 /* Validate/create pattern. */
123 ITEM_VLAN_INNER_TYPE,
155 ITEM_E_TAG_GRP_ECID_B,
164 ITEM_GRE_C_RSVD0_VER,
180 ITEM_ARP_ETH_IPV4_SHA,
181 ITEM_ARP_ETH_IPV4_SPA,
182 ITEM_ARP_ETH_IPV4_THA,
183 ITEM_ARP_ETH_IPV4_TPA,
185 ITEM_IPV6_EXT_NEXT_HDR,
190 ITEM_ICMP6_ND_NS_TARGET_ADDR,
192 ITEM_ICMP6_ND_NA_TARGET_ADDR,
194 ITEM_ICMP6_ND_OPT_TYPE,
195 ITEM_ICMP6_ND_OPT_SLA_ETH,
196 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
197 ITEM_ICMP6_ND_OPT_TLA_ETH,
198 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
211 ITEM_HIGIG2_CLASSIFICATION,
217 ITEM_L2TPV3OIP_SESSION_ID,
219 /* Validate/create actions. */
239 ACTION_RSS_FUNC_DEFAULT,
240 ACTION_RSS_FUNC_TOEPLITZ,
241 ACTION_RSS_FUNC_SIMPLE_XOR,
242 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
254 ACTION_PHY_PORT_ORIGINAL,
255 ACTION_PHY_PORT_INDEX,
257 ACTION_PORT_ID_ORIGINAL,
261 ACTION_OF_SET_MPLS_TTL,
262 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
263 ACTION_OF_DEC_MPLS_TTL,
264 ACTION_OF_SET_NW_TTL,
265 ACTION_OF_SET_NW_TTL_NW_TTL,
266 ACTION_OF_DEC_NW_TTL,
267 ACTION_OF_COPY_TTL_OUT,
268 ACTION_OF_COPY_TTL_IN,
271 ACTION_OF_PUSH_VLAN_ETHERTYPE,
272 ACTION_OF_SET_VLAN_VID,
273 ACTION_OF_SET_VLAN_VID_VLAN_VID,
274 ACTION_OF_SET_VLAN_PCP,
275 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
277 ACTION_OF_POP_MPLS_ETHERTYPE,
279 ACTION_OF_PUSH_MPLS_ETHERTYPE,
286 ACTION_MPLSOGRE_ENCAP,
287 ACTION_MPLSOGRE_DECAP,
288 ACTION_MPLSOUDP_ENCAP,
289 ACTION_MPLSOUDP_DECAP,
291 ACTION_SET_IPV4_SRC_IPV4_SRC,
293 ACTION_SET_IPV4_DST_IPV4_DST,
295 ACTION_SET_IPV6_SRC_IPV6_SRC,
297 ACTION_SET_IPV6_DST_IPV6_DST,
299 ACTION_SET_TP_SRC_TP_SRC,
301 ACTION_SET_TP_DST_TP_DST,
307 ACTION_SET_MAC_SRC_MAC_SRC,
309 ACTION_SET_MAC_DST_MAC_DST,
311 ACTION_INC_TCP_SEQ_VALUE,
313 ACTION_DEC_TCP_SEQ_VALUE,
315 ACTION_INC_TCP_ACK_VALUE,
317 ACTION_DEC_TCP_ACK_VALUE,
320 ACTION_RAW_ENCAP_INDEX,
321 ACTION_RAW_ENCAP_INDEX_VALUE,
322 ACTION_RAW_DECAP_INDEX,
323 ACTION_RAW_DECAP_INDEX_VALUE,
326 ACTION_SET_TAG_INDEX,
329 ACTION_SET_META_DATA,
330 ACTION_SET_META_MASK,
331 ACTION_SET_IPV4_DSCP,
332 ACTION_SET_IPV4_DSCP_VALUE,
333 ACTION_SET_IPV6_DSCP,
334 ACTION_SET_IPV6_DSCP_VALUE,
337 /** Maximum size for pattern in struct rte_flow_item_raw. */
338 #define ITEM_RAW_PATTERN_SIZE 40
340 /** Storage size for struct rte_flow_item_raw including pattern. */
341 #define ITEM_RAW_SIZE \
342 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
344 /** Maximum number of queue indices in struct rte_flow_action_rss. */
345 #define ACTION_RSS_QUEUE_NUM 128
347 /** Storage for struct rte_flow_action_rss including external data. */
348 struct action_rss_data {
349 struct rte_flow_action_rss conf;
350 uint8_t key[RSS_HASH_KEY_LENGTH];
351 uint16_t queue[ACTION_RSS_QUEUE_NUM];
354 /** Maximum data size in struct rte_flow_action_raw_encap. */
355 #define ACTION_RAW_ENCAP_MAX_DATA 128
356 #define RAW_ENCAP_CONFS_MAX_NUM 8
358 /** Storage for struct rte_flow_action_raw_encap. */
359 struct raw_encap_conf {
360 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
361 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
365 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
367 /** Storage for struct rte_flow_action_raw_encap including external data. */
368 struct action_raw_encap_data {
369 struct rte_flow_action_raw_encap conf;
370 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
371 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
375 /** Storage for struct rte_flow_action_raw_decap. */
376 struct raw_decap_conf {
377 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
381 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
383 /** Storage for struct rte_flow_action_raw_decap including external data. */
384 struct action_raw_decap_data {
385 struct rte_flow_action_raw_decap conf;
386 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
390 struct vxlan_encap_conf vxlan_encap_conf = {
394 .vni = "\x00\x00\x00",
396 .udp_dst = RTE_BE16(4789),
397 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
398 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
399 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
400 "\x00\x00\x00\x00\x00\x00\x00\x01",
401 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
402 "\x00\x00\x00\x00\x00\x00\x11\x11",
406 .eth_src = "\x00\x00\x00\x00\x00\x00",
407 .eth_dst = "\xff\xff\xff\xff\xff\xff",
410 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
411 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
413 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
414 struct action_vxlan_encap_data {
415 struct rte_flow_action_vxlan_encap conf;
416 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
417 struct rte_flow_item_eth item_eth;
418 struct rte_flow_item_vlan item_vlan;
420 struct rte_flow_item_ipv4 item_ipv4;
421 struct rte_flow_item_ipv6 item_ipv6;
423 struct rte_flow_item_udp item_udp;
424 struct rte_flow_item_vxlan item_vxlan;
427 struct nvgre_encap_conf nvgre_encap_conf = {
430 .tni = "\x00\x00\x00",
431 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
432 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
433 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
434 "\x00\x00\x00\x00\x00\x00\x00\x01",
435 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
436 "\x00\x00\x00\x00\x00\x00\x11\x11",
438 .eth_src = "\x00\x00\x00\x00\x00\x00",
439 .eth_dst = "\xff\xff\xff\xff\xff\xff",
442 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
443 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
445 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
446 struct action_nvgre_encap_data {
447 struct rte_flow_action_nvgre_encap conf;
448 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
449 struct rte_flow_item_eth item_eth;
450 struct rte_flow_item_vlan item_vlan;
452 struct rte_flow_item_ipv4 item_ipv4;
453 struct rte_flow_item_ipv6 item_ipv6;
455 struct rte_flow_item_nvgre item_nvgre;
458 struct l2_encap_conf l2_encap_conf;
460 struct l2_decap_conf l2_decap_conf;
462 struct mplsogre_encap_conf mplsogre_encap_conf;
464 struct mplsogre_decap_conf mplsogre_decap_conf;
466 struct mplsoudp_encap_conf mplsoudp_encap_conf;
468 struct mplsoudp_decap_conf mplsoudp_decap_conf;
470 /** Maximum number of subsequent tokens and arguments on the stack. */
471 #define CTX_STACK_SIZE 16
473 /** Parser context. */
475 /** Stack of subsequent token lists to process. */
476 const enum index *next[CTX_STACK_SIZE];
477 /** Arguments for stacked tokens. */
478 const void *args[CTX_STACK_SIZE];
479 enum index curr; /**< Current token index. */
480 enum index prev; /**< Index of the last token seen. */
481 int next_num; /**< Number of entries in next[]. */
482 int args_num; /**< Number of entries in args[]. */
483 uint32_t eol:1; /**< EOL has been detected. */
484 uint32_t last:1; /**< No more arguments. */
485 portid_t port; /**< Current port ID (for completions). */
486 uint32_t objdata; /**< Object-specific data. */
487 void *object; /**< Address of current object for relative offsets. */
488 void *objmask; /**< Object a full mask must be written to. */
491 /** Token argument. */
493 uint32_t hton:1; /**< Use network byte ordering. */
494 uint32_t sign:1; /**< Value is signed. */
495 uint32_t bounded:1; /**< Value is bounded. */
496 uintmax_t min; /**< Minimum value if bounded. */
497 uintmax_t max; /**< Maximum value if bounded. */
498 uint32_t offset; /**< Relative offset from ctx->object. */
499 uint32_t size; /**< Field size. */
500 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
503 /** Parser token definition. */
505 /** Type displayed during completion (defaults to "TOKEN"). */
507 /** Help displayed during completion (defaults to token name). */
509 /** Private data used by parser functions. */
512 * Lists of subsequent tokens to push on the stack. Each call to the
513 * parser consumes the last entry of that stack.
515 const enum index *const *next;
516 /** Arguments stack for subsequent tokens that need them. */
517 const struct arg *const *args;
519 * Token-processing callback, returns -1 in case of error, the
520 * length of the matched string otherwise. If NULL, attempts to
521 * match the token name.
523 * If buf is not NULL, the result should be stored in it according
524 * to context. An error is returned if not large enough.
526 int (*call)(struct context *ctx, const struct token *token,
527 const char *str, unsigned int len,
528 void *buf, unsigned int size);
530 * Callback that provides possible values for this token, used for
531 * completion. Returns -1 in case of error, the number of possible
532 * values otherwise. If NULL, the token name is used.
534 * If buf is not NULL, entry index ent is written to buf and the
535 * full length of the entry is returned (same behavior as
538 int (*comp)(struct context *ctx, const struct token *token,
539 unsigned int ent, char *buf, unsigned int size);
540 /** Mandatory token name, no default value. */
544 /** Static initializer for the next field. */
545 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
547 /** Static initializer for a NEXT() entry. */
548 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
550 /** Static initializer for the args field. */
551 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
553 /** Static initializer for ARGS() to target a field. */
554 #define ARGS_ENTRY(s, f) \
555 (&(const struct arg){ \
556 .offset = offsetof(s, f), \
557 .size = sizeof(((s *)0)->f), \
560 /** Static initializer for ARGS() to target a bit-field. */
561 #define ARGS_ENTRY_BF(s, f, b) \
562 (&(const struct arg){ \
564 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
567 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
568 #define ARGS_ENTRY_MASK(s, f, m) \
569 (&(const struct arg){ \
570 .offset = offsetof(s, f), \
571 .size = sizeof(((s *)0)->f), \
572 .mask = (const void *)(m), \
575 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
576 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
577 (&(const struct arg){ \
579 .offset = offsetof(s, f), \
580 .size = sizeof(((s *)0)->f), \
581 .mask = (const void *)(m), \
584 /** Static initializer for ARGS() to target a pointer. */
585 #define ARGS_ENTRY_PTR(s, f) \
586 (&(const struct arg){ \
587 .size = sizeof(*((s *)0)->f), \
590 /** Static initializer for ARGS() with arbitrary offset and size. */
591 #define ARGS_ENTRY_ARB(o, s) \
592 (&(const struct arg){ \
597 /** Same as ARGS_ENTRY_ARB() with bounded values. */
598 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
599 (&(const struct arg){ \
607 /** Same as ARGS_ENTRY() using network byte ordering. */
608 #define ARGS_ENTRY_HTON(s, f) \
609 (&(const struct arg){ \
611 .offset = offsetof(s, f), \
612 .size = sizeof(((s *)0)->f), \
615 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
616 #define ARG_ENTRY_HTON(s) \
617 (&(const struct arg){ \
623 /** Parser output buffer layout expected by cmd_flow_parsed(). */
625 enum index command; /**< Flow command. */
626 portid_t port; /**< Affected port ID. */
629 struct rte_flow_attr attr;
630 struct rte_flow_item *pattern;
631 struct rte_flow_action *actions;
635 } vc; /**< Validate/create arguments. */
639 } destroy; /**< Destroy arguments. */
642 struct rte_flow_action action;
643 } query; /**< Query arguments. */
647 } list; /**< List arguments. */
650 } isolate; /**< Isolated mode arguments. */
651 } args; /**< Command arguments. */
654 /** Private data for pattern items. */
655 struct parse_item_priv {
656 enum rte_flow_item_type type; /**< Item type. */
657 uint32_t size; /**< Size of item specification structure. */
660 #define PRIV_ITEM(t, s) \
661 (&(const struct parse_item_priv){ \
662 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
666 /** Private data for actions. */
667 struct parse_action_priv {
668 enum rte_flow_action_type type; /**< Action type. */
669 uint32_t size; /**< Size of action configuration structure. */
672 #define PRIV_ACTION(t, s) \
673 (&(const struct parse_action_priv){ \
674 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
678 static const enum index next_vc_attr[] = {
688 static const enum index next_destroy_attr[] = {
694 static const enum index next_list_attr[] = {
700 static const enum index item_param[] = {
709 static const enum index next_item[] = {
745 ITEM_ICMP6_ND_OPT_SLA_ETH,
746 ITEM_ICMP6_ND_OPT_TLA_ETH,
760 static const enum index item_fuzzy[] = {
766 static const enum index item_any[] = {
772 static const enum index item_vf[] = {
778 static const enum index item_phy_port[] = {
784 static const enum index item_port_id[] = {
790 static const enum index item_mark[] = {
796 static const enum index item_raw[] = {
806 static const enum index item_eth[] = {
814 static const enum index item_vlan[] = {
819 ITEM_VLAN_INNER_TYPE,
824 static const enum index item_ipv4[] = {
834 static const enum index item_ipv6[] = {
845 static const enum index item_icmp[] = {
852 static const enum index item_udp[] = {
859 static const enum index item_tcp[] = {
867 static const enum index item_sctp[] = {
876 static const enum index item_vxlan[] = {
882 static const enum index item_e_tag[] = {
883 ITEM_E_TAG_GRP_ECID_B,
888 static const enum index item_nvgre[] = {
894 static const enum index item_mpls[] = {
902 static const enum index item_gre[] = {
904 ITEM_GRE_C_RSVD0_VER,
912 static const enum index item_gre_key[] = {
918 static const enum index item_gtp[] = {
924 static const enum index item_geneve[] = {
931 static const enum index item_vxlan_gpe[] = {
937 static const enum index item_arp_eth_ipv4[] = {
938 ITEM_ARP_ETH_IPV4_SHA,
939 ITEM_ARP_ETH_IPV4_SPA,
940 ITEM_ARP_ETH_IPV4_THA,
941 ITEM_ARP_ETH_IPV4_TPA,
946 static const enum index item_ipv6_ext[] = {
947 ITEM_IPV6_EXT_NEXT_HDR,
952 static const enum index item_icmp6[] = {
959 static const enum index item_icmp6_nd_ns[] = {
960 ITEM_ICMP6_ND_NS_TARGET_ADDR,
965 static const enum index item_icmp6_nd_na[] = {
966 ITEM_ICMP6_ND_NA_TARGET_ADDR,
971 static const enum index item_icmp6_nd_opt[] = {
972 ITEM_ICMP6_ND_OPT_TYPE,
977 static const enum index item_icmp6_nd_opt_sla_eth[] = {
978 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
983 static const enum index item_icmp6_nd_opt_tla_eth[] = {
984 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
989 static const enum index item_meta[] = {
995 static const enum index item_gtp_psc[] = {
1002 static const enum index item_pppoed[] = {
1008 static const enum index item_pppoes[] = {
1014 static const enum index item_pppoe_proto_id[] = {
1015 ITEM_PPPOE_PROTO_ID,
1020 static const enum index item_higig2[] = {
1021 ITEM_HIGIG2_CLASSIFICATION,
1027 static const enum index next_set_raw[] = {
1033 static const enum index item_tag[] = {
1040 static const enum index item_l2tpv3oip[] = {
1041 ITEM_L2TPV3OIP_SESSION_ID,
1046 static const enum index next_action[] = {
1062 ACTION_OF_SET_MPLS_TTL,
1063 ACTION_OF_DEC_MPLS_TTL,
1064 ACTION_OF_SET_NW_TTL,
1065 ACTION_OF_DEC_NW_TTL,
1066 ACTION_OF_COPY_TTL_OUT,
1067 ACTION_OF_COPY_TTL_IN,
1069 ACTION_OF_PUSH_VLAN,
1070 ACTION_OF_SET_VLAN_VID,
1071 ACTION_OF_SET_VLAN_PCP,
1073 ACTION_OF_PUSH_MPLS,
1080 ACTION_MPLSOGRE_ENCAP,
1081 ACTION_MPLSOGRE_DECAP,
1082 ACTION_MPLSOUDP_ENCAP,
1083 ACTION_MPLSOUDP_DECAP,
1084 ACTION_SET_IPV4_SRC,
1085 ACTION_SET_IPV4_DST,
1086 ACTION_SET_IPV6_SRC,
1087 ACTION_SET_IPV6_DST,
1103 ACTION_SET_IPV4_DSCP,
1104 ACTION_SET_IPV6_DSCP,
1108 static const enum index action_mark[] = {
1114 static const enum index action_queue[] = {
1120 static const enum index action_count[] = {
1122 ACTION_COUNT_SHARED,
1127 static const enum index action_rss[] = {
1138 static const enum index action_vf[] = {
1145 static const enum index action_phy_port[] = {
1146 ACTION_PHY_PORT_ORIGINAL,
1147 ACTION_PHY_PORT_INDEX,
1152 static const enum index action_port_id[] = {
1153 ACTION_PORT_ID_ORIGINAL,
1159 static const enum index action_meter[] = {
1165 static const enum index action_of_set_mpls_ttl[] = {
1166 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1171 static const enum index action_of_set_nw_ttl[] = {
1172 ACTION_OF_SET_NW_TTL_NW_TTL,
1177 static const enum index action_of_push_vlan[] = {
1178 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1183 static const enum index action_of_set_vlan_vid[] = {
1184 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1189 static const enum index action_of_set_vlan_pcp[] = {
1190 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1195 static const enum index action_of_pop_mpls[] = {
1196 ACTION_OF_POP_MPLS_ETHERTYPE,
1201 static const enum index action_of_push_mpls[] = {
1202 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1207 static const enum index action_set_ipv4_src[] = {
1208 ACTION_SET_IPV4_SRC_IPV4_SRC,
1213 static const enum index action_set_mac_src[] = {
1214 ACTION_SET_MAC_SRC_MAC_SRC,
1219 static const enum index action_set_ipv4_dst[] = {
1220 ACTION_SET_IPV4_DST_IPV4_DST,
1225 static const enum index action_set_ipv6_src[] = {
1226 ACTION_SET_IPV6_SRC_IPV6_SRC,
1231 static const enum index action_set_ipv6_dst[] = {
1232 ACTION_SET_IPV6_DST_IPV6_DST,
1237 static const enum index action_set_tp_src[] = {
1238 ACTION_SET_TP_SRC_TP_SRC,
1243 static const enum index action_set_tp_dst[] = {
1244 ACTION_SET_TP_DST_TP_DST,
1249 static const enum index action_set_ttl[] = {
1255 static const enum index action_jump[] = {
1261 static const enum index action_set_mac_dst[] = {
1262 ACTION_SET_MAC_DST_MAC_DST,
1267 static const enum index action_inc_tcp_seq[] = {
1268 ACTION_INC_TCP_SEQ_VALUE,
1273 static const enum index action_dec_tcp_seq[] = {
1274 ACTION_DEC_TCP_SEQ_VALUE,
1279 static const enum index action_inc_tcp_ack[] = {
1280 ACTION_INC_TCP_ACK_VALUE,
1285 static const enum index action_dec_tcp_ack[] = {
1286 ACTION_DEC_TCP_ACK_VALUE,
1291 static const enum index action_raw_encap[] = {
1292 ACTION_RAW_ENCAP_INDEX,
1297 static const enum index action_raw_decap[] = {
1298 ACTION_RAW_DECAP_INDEX,
1303 static const enum index action_set_tag[] = {
1304 ACTION_SET_TAG_DATA,
1305 ACTION_SET_TAG_INDEX,
1306 ACTION_SET_TAG_MASK,
1311 static const enum index action_set_meta[] = {
1312 ACTION_SET_META_DATA,
1313 ACTION_SET_META_MASK,
1318 static const enum index action_set_ipv4_dscp[] = {
1319 ACTION_SET_IPV4_DSCP_VALUE,
1324 static const enum index action_set_ipv6_dscp[] = {
1325 ACTION_SET_IPV6_DSCP_VALUE,
1330 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1331 const char *, unsigned int,
1332 void *, unsigned int);
1333 static int parse_set_init(struct context *, const struct token *,
1334 const char *, unsigned int,
1335 void *, unsigned int);
1336 static int parse_init(struct context *, const struct token *,
1337 const char *, unsigned int,
1338 void *, unsigned int);
1339 static int parse_vc(struct context *, const struct token *,
1340 const char *, unsigned int,
1341 void *, unsigned int);
1342 static int parse_vc_spec(struct context *, const struct token *,
1343 const char *, unsigned int, void *, unsigned int);
1344 static int parse_vc_conf(struct context *, const struct token *,
1345 const char *, unsigned int, void *, unsigned int);
1346 static int parse_vc_action_rss(struct context *, const struct token *,
1347 const char *, unsigned int, void *,
1349 static int parse_vc_action_rss_func(struct context *, const struct token *,
1350 const char *, unsigned int, void *,
1352 static int parse_vc_action_rss_type(struct context *, const struct token *,
1353 const char *, unsigned int, void *,
1355 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1356 const char *, unsigned int, void *,
1358 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1359 const char *, unsigned int, void *,
1361 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1362 const char *, unsigned int, void *,
1364 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1365 const char *, unsigned int, void *,
1367 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1368 const char *, unsigned int, void *,
1370 static int parse_vc_action_mplsogre_encap(struct context *,
1371 const struct token *, const char *,
1372 unsigned int, void *, unsigned int);
1373 static int parse_vc_action_mplsogre_decap(struct context *,
1374 const struct token *, const char *,
1375 unsigned int, void *, unsigned int);
1376 static int parse_vc_action_mplsoudp_encap(struct context *,
1377 const struct token *, const char *,
1378 unsigned int, void *, unsigned int);
1379 static int parse_vc_action_mplsoudp_decap(struct context *,
1380 const struct token *, const char *,
1381 unsigned int, void *, unsigned int);
1382 static int parse_vc_action_raw_encap(struct context *,
1383 const struct token *, const char *,
1384 unsigned int, void *, unsigned int);
1385 static int parse_vc_action_raw_decap(struct context *,
1386 const struct token *, const char *,
1387 unsigned int, void *, unsigned int);
1388 static int parse_vc_action_raw_encap_index(struct context *,
1389 const struct token *, const char *,
1390 unsigned int, void *, unsigned int);
1391 static int parse_vc_action_raw_decap_index(struct context *,
1392 const struct token *, const char *,
1393 unsigned int, void *, unsigned int);
1394 static int parse_vc_action_set_meta(struct context *ctx,
1395 const struct token *token, const char *str,
1396 unsigned int len, void *buf,
1398 static int parse_destroy(struct context *, const struct token *,
1399 const char *, unsigned int,
1400 void *, unsigned int);
1401 static int parse_flush(struct context *, const struct token *,
1402 const char *, unsigned int,
1403 void *, unsigned int);
1404 static int parse_query(struct context *, const struct token *,
1405 const char *, unsigned int,
1406 void *, unsigned int);
1407 static int parse_action(struct context *, const struct token *,
1408 const char *, unsigned int,
1409 void *, unsigned int);
1410 static int parse_list(struct context *, const struct token *,
1411 const char *, unsigned int,
1412 void *, unsigned int);
1413 static int parse_isolate(struct context *, const struct token *,
1414 const char *, unsigned int,
1415 void *, unsigned int);
1416 static int parse_int(struct context *, const struct token *,
1417 const char *, unsigned int,
1418 void *, unsigned int);
1419 static int parse_prefix(struct context *, const struct token *,
1420 const char *, unsigned int,
1421 void *, unsigned int);
1422 static int parse_boolean(struct context *, const struct token *,
1423 const char *, unsigned int,
1424 void *, unsigned int);
1425 static int parse_string(struct context *, const struct token *,
1426 const char *, unsigned int,
1427 void *, unsigned int);
1428 static int parse_hex(struct context *ctx, const struct token *token,
1429 const char *str, unsigned int len,
1430 void *buf, unsigned int size);
1431 static int parse_mac_addr(struct context *, const struct token *,
1432 const char *, unsigned int,
1433 void *, unsigned int);
1434 static int parse_ipv4_addr(struct context *, const struct token *,
1435 const char *, unsigned int,
1436 void *, unsigned int);
1437 static int parse_ipv6_addr(struct context *, const struct token *,
1438 const char *, unsigned int,
1439 void *, unsigned int);
1440 static int parse_port(struct context *, const struct token *,
1441 const char *, unsigned int,
1442 void *, unsigned int);
1443 static int comp_none(struct context *, const struct token *,
1444 unsigned int, char *, unsigned int);
1445 static int comp_boolean(struct context *, const struct token *,
1446 unsigned int, char *, unsigned int);
1447 static int comp_action(struct context *, const struct token *,
1448 unsigned int, char *, unsigned int);
1449 static int comp_port(struct context *, const struct token *,
1450 unsigned int, char *, unsigned int);
1451 static int comp_rule_id(struct context *, const struct token *,
1452 unsigned int, char *, unsigned int);
1453 static int comp_vc_action_rss_type(struct context *, const struct token *,
1454 unsigned int, char *, unsigned int);
1455 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1456 unsigned int, char *, unsigned int);
1457 static int comp_set_raw_index(struct context *, const struct token *,
1458 unsigned int, char *, unsigned int);
1460 /** Token definitions. */
1461 static const struct token token_list[] = {
1462 /* Special tokens. */
1465 .help = "null entry, abused as the entry point",
1466 .next = NEXT(NEXT_ENTRY(FLOW)),
1471 .help = "command may end here",
1474 .name = "START_SET",
1475 .help = "null entry, abused as the entry point for set",
1476 .next = NEXT(NEXT_ENTRY(SET)),
1481 .help = "set command may end here",
1483 /* Common tokens. */
1487 .help = "integer value",
1492 .name = "{unsigned}",
1494 .help = "unsigned integer value",
1501 .help = "prefix length for bit-mask",
1502 .call = parse_prefix,
1506 .name = "{boolean}",
1508 .help = "any boolean value",
1509 .call = parse_boolean,
1510 .comp = comp_boolean,
1515 .help = "fixed string",
1516 .call = parse_string,
1522 .help = "fixed string",
1527 .name = "{MAC address}",
1529 .help = "standard MAC address notation",
1530 .call = parse_mac_addr,
1534 .name = "{IPv4 address}",
1535 .type = "IPV4 ADDRESS",
1536 .help = "standard IPv4 address notation",
1537 .call = parse_ipv4_addr,
1541 .name = "{IPv6 address}",
1542 .type = "IPV6 ADDRESS",
1543 .help = "standard IPv6 address notation",
1544 .call = parse_ipv6_addr,
1548 .name = "{rule id}",
1550 .help = "rule identifier",
1552 .comp = comp_rule_id,
1555 .name = "{port_id}",
1557 .help = "port identifier",
1562 .name = "{group_id}",
1564 .help = "group identifier",
1568 [PRIORITY_LEVEL] = {
1571 .help = "priority level",
1575 /* Top-level command. */
1578 .type = "{command} {port_id} [{arg} [...]]",
1579 .help = "manage ingress/egress flow rules",
1580 .next = NEXT(NEXT_ENTRY
1590 /* Sub-level commands. */
1593 .help = "check whether a flow rule can be created",
1594 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1595 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1600 .help = "create a flow rule",
1601 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1602 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1607 .help = "destroy specific flow rules",
1608 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1609 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1610 .call = parse_destroy,
1614 .help = "destroy all flow rules",
1615 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1616 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1617 .call = parse_flush,
1621 .help = "query an existing flow rule",
1622 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1623 NEXT_ENTRY(RULE_ID),
1624 NEXT_ENTRY(PORT_ID)),
1625 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1626 ARGS_ENTRY(struct buffer, args.query.rule),
1627 ARGS_ENTRY(struct buffer, port)),
1628 .call = parse_query,
1632 .help = "list existing flow rules",
1633 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1634 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1639 .help = "restrict ingress traffic to the defined flow rules",
1640 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1641 NEXT_ENTRY(PORT_ID)),
1642 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1643 ARGS_ENTRY(struct buffer, port)),
1644 .call = parse_isolate,
1646 /* Destroy arguments. */
1649 .help = "specify a rule identifier",
1650 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1651 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1652 .call = parse_destroy,
1654 /* Query arguments. */
1658 .help = "action to query, must be part of the rule",
1659 .call = parse_action,
1660 .comp = comp_action,
1662 /* List arguments. */
1665 .help = "specify a group",
1666 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1667 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1670 /* Validate/create attributes. */
1673 .help = "specify a group",
1674 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1675 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1680 .help = "specify a priority level",
1681 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1682 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1687 .help = "affect rule to ingress",
1688 .next = NEXT(next_vc_attr),
1693 .help = "affect rule to egress",
1694 .next = NEXT(next_vc_attr),
1699 .help = "apply rule directly to endpoints found in pattern",
1700 .next = NEXT(next_vc_attr),
1703 /* Validate/create pattern. */
1706 .help = "submit a list of pattern items",
1707 .next = NEXT(next_item),
1712 .help = "match value perfectly (with full bit-mask)",
1713 .call = parse_vc_spec,
1715 [ITEM_PARAM_SPEC] = {
1717 .help = "match value according to configured bit-mask",
1718 .call = parse_vc_spec,
1720 [ITEM_PARAM_LAST] = {
1722 .help = "specify upper bound to establish a range",
1723 .call = parse_vc_spec,
1725 [ITEM_PARAM_MASK] = {
1727 .help = "specify bit-mask with relevant bits set to one",
1728 .call = parse_vc_spec,
1730 [ITEM_PARAM_PREFIX] = {
1732 .help = "generate bit-mask from a prefix length",
1733 .call = parse_vc_spec,
1737 .help = "specify next pattern item",
1738 .next = NEXT(next_item),
1742 .help = "end list of pattern items",
1743 .priv = PRIV_ITEM(END, 0),
1744 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1749 .help = "no-op pattern item",
1750 .priv = PRIV_ITEM(VOID, 0),
1751 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1756 .help = "perform actions when pattern does not match",
1757 .priv = PRIV_ITEM(INVERT, 0),
1758 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1763 .help = "match any protocol for the current layer",
1764 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1765 .next = NEXT(item_any),
1770 .help = "number of layers covered",
1771 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1772 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1776 .help = "match traffic from/to the physical function",
1777 .priv = PRIV_ITEM(PF, 0),
1778 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1783 .help = "match traffic from/to a virtual function ID",
1784 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1785 .next = NEXT(item_vf),
1791 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1792 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1796 .help = "match traffic from/to a specific physical port",
1797 .priv = PRIV_ITEM(PHY_PORT,
1798 sizeof(struct rte_flow_item_phy_port)),
1799 .next = NEXT(item_phy_port),
1802 [ITEM_PHY_PORT_INDEX] = {
1804 .help = "physical port index",
1805 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1806 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1810 .help = "match traffic from/to a given DPDK port ID",
1811 .priv = PRIV_ITEM(PORT_ID,
1812 sizeof(struct rte_flow_item_port_id)),
1813 .next = NEXT(item_port_id),
1816 [ITEM_PORT_ID_ID] = {
1818 .help = "DPDK port ID",
1819 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1820 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1824 .help = "match traffic against value set in previously matched rule",
1825 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1826 .next = NEXT(item_mark),
1831 .help = "Integer value to match against",
1832 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1833 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1837 .help = "match an arbitrary byte string",
1838 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1839 .next = NEXT(item_raw),
1842 [ITEM_RAW_RELATIVE] = {
1844 .help = "look for pattern after the previous item",
1845 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1846 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1849 [ITEM_RAW_SEARCH] = {
1851 .help = "search pattern from offset (see also limit)",
1852 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1853 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1856 [ITEM_RAW_OFFSET] = {
1858 .help = "absolute or relative offset for pattern",
1859 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1860 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1862 [ITEM_RAW_LIMIT] = {
1864 .help = "search area limit for start of pattern",
1865 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1866 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1868 [ITEM_RAW_PATTERN] = {
1870 .help = "byte string to look for",
1871 .next = NEXT(item_raw,
1873 NEXT_ENTRY(ITEM_PARAM_IS,
1876 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1877 ARGS_ENTRY(struct rte_flow_item_raw, length),
1878 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1879 ITEM_RAW_PATTERN_SIZE)),
1883 .help = "match Ethernet header",
1884 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1885 .next = NEXT(item_eth),
1890 .help = "destination MAC",
1891 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1892 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1896 .help = "source MAC",
1897 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1898 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1902 .help = "EtherType",
1903 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1904 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1908 .help = "match 802.1Q/ad VLAN tag",
1909 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1910 .next = NEXT(item_vlan),
1915 .help = "tag control information",
1916 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1917 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1921 .help = "priority code point",
1922 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1923 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1928 .help = "drop eligible indicator",
1929 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1930 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1935 .help = "VLAN identifier",
1936 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1937 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1940 [ITEM_VLAN_INNER_TYPE] = {
1941 .name = "inner_type",
1942 .help = "inner EtherType",
1943 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1944 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1949 .help = "match IPv4 header",
1950 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1951 .next = NEXT(item_ipv4),
1956 .help = "type of service",
1957 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1958 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1959 hdr.type_of_service)),
1963 .help = "time to live",
1964 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1965 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1968 [ITEM_IPV4_PROTO] = {
1970 .help = "next protocol ID",
1971 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1972 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1973 hdr.next_proto_id)),
1977 .help = "source address",
1978 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1979 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1984 .help = "destination address",
1985 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1986 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1991 .help = "match IPv6 header",
1992 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1993 .next = NEXT(item_ipv6),
1998 .help = "traffic class",
1999 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2000 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2002 "\x0f\xf0\x00\x00")),
2004 [ITEM_IPV6_FLOW] = {
2006 .help = "flow label",
2007 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2008 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2010 "\x00\x0f\xff\xff")),
2012 [ITEM_IPV6_PROTO] = {
2014 .help = "protocol (next header)",
2015 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2016 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2021 .help = "hop limit",
2022 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2023 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2028 .help = "source address",
2029 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2030 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2035 .help = "destination address",
2036 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2037 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2042 .help = "match ICMP header",
2043 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2044 .next = NEXT(item_icmp),
2047 [ITEM_ICMP_TYPE] = {
2049 .help = "ICMP packet type",
2050 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2051 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2054 [ITEM_ICMP_CODE] = {
2056 .help = "ICMP packet code",
2057 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2058 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2063 .help = "match UDP header",
2064 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2065 .next = NEXT(item_udp),
2070 .help = "UDP source port",
2071 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2072 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2077 .help = "UDP destination port",
2078 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2079 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2084 .help = "match TCP header",
2085 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2086 .next = NEXT(item_tcp),
2091 .help = "TCP source port",
2092 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2093 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2098 .help = "TCP destination port",
2099 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2100 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2103 [ITEM_TCP_FLAGS] = {
2105 .help = "TCP flags",
2106 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2107 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2112 .help = "match SCTP header",
2113 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2114 .next = NEXT(item_sctp),
2119 .help = "SCTP source port",
2120 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2121 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2126 .help = "SCTP destination port",
2127 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2128 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2133 .help = "validation tag",
2134 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2135 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2138 [ITEM_SCTP_CKSUM] = {
2141 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2142 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2147 .help = "match VXLAN header",
2148 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2149 .next = NEXT(item_vxlan),
2152 [ITEM_VXLAN_VNI] = {
2154 .help = "VXLAN identifier",
2155 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2156 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2160 .help = "match E-Tag header",
2161 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2162 .next = NEXT(item_e_tag),
2165 [ITEM_E_TAG_GRP_ECID_B] = {
2166 .name = "grp_ecid_b",
2167 .help = "GRP and E-CID base",
2168 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2169 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2175 .help = "match NVGRE header",
2176 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2177 .next = NEXT(item_nvgre),
2180 [ITEM_NVGRE_TNI] = {
2182 .help = "virtual subnet ID",
2183 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2184 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2188 .help = "match MPLS header",
2189 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2190 .next = NEXT(item_mpls),
2193 [ITEM_MPLS_LABEL] = {
2195 .help = "MPLS label",
2196 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2197 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2203 .help = "MPLS Traffic Class",
2204 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2205 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2211 .help = "MPLS Bottom-of-Stack",
2212 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2213 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2219 .help = "match GRE header",
2220 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2221 .next = NEXT(item_gre),
2224 [ITEM_GRE_PROTO] = {
2226 .help = "GRE protocol type",
2227 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2228 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2231 [ITEM_GRE_C_RSVD0_VER] = {
2232 .name = "c_rsvd0_ver",
2234 "checksum (1b), undefined (1b), key bit (1b),"
2235 " sequence number (1b), reserved 0 (9b),"
2237 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2238 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2241 [ITEM_GRE_C_BIT] = {
2243 .help = "checksum bit (C)",
2244 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2245 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2247 "\x80\x00\x00\x00")),
2249 [ITEM_GRE_S_BIT] = {
2251 .help = "sequence number bit (S)",
2252 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2253 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2255 "\x10\x00\x00\x00")),
2257 [ITEM_GRE_K_BIT] = {
2259 .help = "key bit (K)",
2260 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2261 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2263 "\x20\x00\x00\x00")),
2267 .help = "fuzzy pattern match, expect faster than default",
2268 .priv = PRIV_ITEM(FUZZY,
2269 sizeof(struct rte_flow_item_fuzzy)),
2270 .next = NEXT(item_fuzzy),
2273 [ITEM_FUZZY_THRESH] = {
2275 .help = "match accuracy threshold",
2276 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2277 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2282 .help = "match GTP header",
2283 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2284 .next = NEXT(item_gtp),
2289 .help = "tunnel endpoint identifier",
2290 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2291 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2295 .help = "match GTP header",
2296 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2297 .next = NEXT(item_gtp),
2302 .help = "match GTP header",
2303 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2304 .next = NEXT(item_gtp),
2309 .help = "match GENEVE header",
2310 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2311 .next = NEXT(item_geneve),
2314 [ITEM_GENEVE_VNI] = {
2316 .help = "virtual network identifier",
2317 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2318 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2320 [ITEM_GENEVE_PROTO] = {
2322 .help = "GENEVE protocol type",
2323 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2324 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2327 [ITEM_VXLAN_GPE] = {
2328 .name = "vxlan-gpe",
2329 .help = "match VXLAN-GPE header",
2330 .priv = PRIV_ITEM(VXLAN_GPE,
2331 sizeof(struct rte_flow_item_vxlan_gpe)),
2332 .next = NEXT(item_vxlan_gpe),
2335 [ITEM_VXLAN_GPE_VNI] = {
2337 .help = "VXLAN-GPE identifier",
2338 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2339 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2342 [ITEM_ARP_ETH_IPV4] = {
2343 .name = "arp_eth_ipv4",
2344 .help = "match ARP header for Ethernet/IPv4",
2345 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2346 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2347 .next = NEXT(item_arp_eth_ipv4),
2350 [ITEM_ARP_ETH_IPV4_SHA] = {
2352 .help = "sender hardware address",
2353 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2355 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2358 [ITEM_ARP_ETH_IPV4_SPA] = {
2360 .help = "sender IPv4 address",
2361 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2363 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2366 [ITEM_ARP_ETH_IPV4_THA] = {
2368 .help = "target hardware address",
2369 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2371 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2374 [ITEM_ARP_ETH_IPV4_TPA] = {
2376 .help = "target IPv4 address",
2377 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2379 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2384 .help = "match presence of any IPv6 extension header",
2385 .priv = PRIV_ITEM(IPV6_EXT,
2386 sizeof(struct rte_flow_item_ipv6_ext)),
2387 .next = NEXT(item_ipv6_ext),
2390 [ITEM_IPV6_EXT_NEXT_HDR] = {
2392 .help = "next header",
2393 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2394 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2399 .help = "match any ICMPv6 header",
2400 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2401 .next = NEXT(item_icmp6),
2404 [ITEM_ICMP6_TYPE] = {
2406 .help = "ICMPv6 type",
2407 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2408 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2411 [ITEM_ICMP6_CODE] = {
2413 .help = "ICMPv6 code",
2414 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2415 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2418 [ITEM_ICMP6_ND_NS] = {
2419 .name = "icmp6_nd_ns",
2420 .help = "match ICMPv6 neighbor discovery solicitation",
2421 .priv = PRIV_ITEM(ICMP6_ND_NS,
2422 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2423 .next = NEXT(item_icmp6_nd_ns),
2426 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2427 .name = "target_addr",
2428 .help = "target address",
2429 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2431 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2434 [ITEM_ICMP6_ND_NA] = {
2435 .name = "icmp6_nd_na",
2436 .help = "match ICMPv6 neighbor discovery advertisement",
2437 .priv = PRIV_ITEM(ICMP6_ND_NA,
2438 sizeof(struct rte_flow_item_icmp6_nd_na)),
2439 .next = NEXT(item_icmp6_nd_na),
2442 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2443 .name = "target_addr",
2444 .help = "target address",
2445 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2447 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2450 [ITEM_ICMP6_ND_OPT] = {
2451 .name = "icmp6_nd_opt",
2452 .help = "match presence of any ICMPv6 neighbor discovery"
2454 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2455 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2456 .next = NEXT(item_icmp6_nd_opt),
2459 [ITEM_ICMP6_ND_OPT_TYPE] = {
2461 .help = "ND option type",
2462 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2464 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2467 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2468 .name = "icmp6_nd_opt_sla_eth",
2469 .help = "match ICMPv6 neighbor discovery source Ethernet"
2470 " link-layer address option",
2472 (ICMP6_ND_OPT_SLA_ETH,
2473 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2474 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2477 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2479 .help = "source Ethernet LLA",
2480 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2482 .args = ARGS(ARGS_ENTRY_HTON
2483 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2485 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2486 .name = "icmp6_nd_opt_tla_eth",
2487 .help = "match ICMPv6 neighbor discovery target Ethernet"
2488 " link-layer address option",
2490 (ICMP6_ND_OPT_TLA_ETH,
2491 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2492 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2495 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2497 .help = "target Ethernet LLA",
2498 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2500 .args = ARGS(ARGS_ENTRY_HTON
2501 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2505 .help = "match metadata header",
2506 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2507 .next = NEXT(item_meta),
2510 [ITEM_META_DATA] = {
2512 .help = "metadata value",
2513 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2514 .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta,
2515 data, "\xff\xff\xff\xff")),
2519 .help = "match GRE key",
2520 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2521 .next = NEXT(item_gre_key),
2524 [ITEM_GRE_KEY_VALUE] = {
2526 .help = "key value",
2527 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2528 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2532 .help = "match GTP extension header with type 0x85",
2533 .priv = PRIV_ITEM(GTP_PSC,
2534 sizeof(struct rte_flow_item_gtp_psc)),
2535 .next = NEXT(item_gtp_psc),
2538 [ITEM_GTP_PSC_QFI] = {
2540 .help = "QoS flow identifier",
2541 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2542 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2545 [ITEM_GTP_PSC_PDU_T] = {
2548 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2549 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2554 .help = "match PPPoE session header",
2555 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2556 .next = NEXT(item_pppoes),
2561 .help = "match PPPoE discovery header",
2562 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2563 .next = NEXT(item_pppoed),
2566 [ITEM_PPPOE_SEID] = {
2568 .help = "session identifier",
2569 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2570 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2573 [ITEM_PPPOE_PROTO_ID] = {
2575 .help = "match PPPoE session protocol identifier",
2576 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2577 sizeof(struct rte_flow_item_pppoe_proto_id)),
2578 .next = NEXT(item_pppoe_proto_id),
2583 .help = "matches higig2 header",
2584 .priv = PRIV_ITEM(HIGIG2,
2585 sizeof(struct rte_flow_item_higig2_hdr)),
2586 .next = NEXT(item_higig2),
2589 [ITEM_HIGIG2_CLASSIFICATION] = {
2590 .name = "classification",
2591 .help = "matches classification of higig2 header",
2592 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2593 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2594 hdr.ppt1.classification)),
2596 [ITEM_HIGIG2_VID] = {
2598 .help = "matches vid of higig2 header",
2599 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2600 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2605 .help = "match tag value",
2606 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
2607 .next = NEXT(item_tag),
2612 .help = "tag value to match",
2613 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
2614 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)),
2616 [ITEM_TAG_INDEX] = {
2618 .help = "index of tag array to match",
2619 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
2620 NEXT_ENTRY(ITEM_PARAM_IS)),
2621 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
2623 [ITEM_L2TPV3OIP] = {
2624 .name = "l2tpv3oip",
2625 .help = "match L2TPv3 over IP header",
2626 .priv = PRIV_ITEM(L2TPV3OIP,
2627 sizeof(struct rte_flow_item_l2tpv3oip)),
2628 .next = NEXT(item_l2tpv3oip),
2631 [ITEM_L2TPV3OIP_SESSION_ID] = {
2632 .name = "session_id",
2633 .help = "session identifier",
2634 .next = NEXT(item_l2tpv3oip, NEXT_ENTRY(UNSIGNED), item_param),
2635 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_l2tpv3oip,
2639 /* Validate/create actions. */
2642 .help = "submit a list of associated actions",
2643 .next = NEXT(next_action),
2648 .help = "specify next action",
2649 .next = NEXT(next_action),
2653 .help = "end list of actions",
2654 .priv = PRIV_ACTION(END, 0),
2659 .help = "no-op action",
2660 .priv = PRIV_ACTION(VOID, 0),
2661 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2664 [ACTION_PASSTHRU] = {
2666 .help = "let subsequent rule process matched packets",
2667 .priv = PRIV_ACTION(PASSTHRU, 0),
2668 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2673 .help = "redirect traffic to a given group",
2674 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2675 .next = NEXT(action_jump),
2678 [ACTION_JUMP_GROUP] = {
2680 .help = "group to redirect traffic to",
2681 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2682 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2683 .call = parse_vc_conf,
2687 .help = "attach 32 bit value to packets",
2688 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2689 .next = NEXT(action_mark),
2692 [ACTION_MARK_ID] = {
2694 .help = "32 bit value to return with packets",
2695 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2696 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2697 .call = parse_vc_conf,
2701 .help = "flag packets",
2702 .priv = PRIV_ACTION(FLAG, 0),
2703 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2708 .help = "assign packets to a given queue index",
2709 .priv = PRIV_ACTION(QUEUE,
2710 sizeof(struct rte_flow_action_queue)),
2711 .next = NEXT(action_queue),
2714 [ACTION_QUEUE_INDEX] = {
2716 .help = "queue index to use",
2717 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2718 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2719 .call = parse_vc_conf,
2723 .help = "drop packets (note: passthru has priority)",
2724 .priv = PRIV_ACTION(DROP, 0),
2725 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2730 .help = "enable counters for this rule",
2731 .priv = PRIV_ACTION(COUNT,
2732 sizeof(struct rte_flow_action_count)),
2733 .next = NEXT(action_count),
2736 [ACTION_COUNT_ID] = {
2737 .name = "identifier",
2738 .help = "counter identifier to use",
2739 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2740 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2741 .call = parse_vc_conf,
2743 [ACTION_COUNT_SHARED] = {
2745 .help = "shared counter",
2746 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2747 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2749 .call = parse_vc_conf,
2753 .help = "spread packets among several queues",
2754 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2755 .next = NEXT(action_rss),
2756 .call = parse_vc_action_rss,
2758 [ACTION_RSS_FUNC] = {
2760 .help = "RSS hash function to apply",
2761 .next = NEXT(action_rss,
2762 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2763 ACTION_RSS_FUNC_TOEPLITZ,
2764 ACTION_RSS_FUNC_SIMPLE_XOR,
2765 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2767 [ACTION_RSS_FUNC_DEFAULT] = {
2769 .help = "default hash function",
2770 .call = parse_vc_action_rss_func,
2772 [ACTION_RSS_FUNC_TOEPLITZ] = {
2774 .help = "Toeplitz hash function",
2775 .call = parse_vc_action_rss_func,
2777 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2778 .name = "simple_xor",
2779 .help = "simple XOR hash function",
2780 .call = parse_vc_action_rss_func,
2782 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2783 .name = "symmetric_toeplitz",
2784 .help = "Symmetric Toeplitz hash function",
2785 .call = parse_vc_action_rss_func,
2787 [ACTION_RSS_LEVEL] = {
2789 .help = "encapsulation level for \"types\"",
2790 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2791 .args = ARGS(ARGS_ENTRY_ARB
2792 (offsetof(struct action_rss_data, conf) +
2793 offsetof(struct rte_flow_action_rss, level),
2794 sizeof(((struct rte_flow_action_rss *)0)->
2797 [ACTION_RSS_TYPES] = {
2799 .help = "specific RSS hash types",
2800 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2802 [ACTION_RSS_TYPE] = {
2804 .help = "RSS hash type",
2805 .call = parse_vc_action_rss_type,
2806 .comp = comp_vc_action_rss_type,
2808 [ACTION_RSS_KEY] = {
2810 .help = "RSS hash key",
2811 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2812 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2814 (offsetof(struct action_rss_data, conf) +
2815 offsetof(struct rte_flow_action_rss, key_len),
2816 sizeof(((struct rte_flow_action_rss *)0)->
2818 ARGS_ENTRY(struct action_rss_data, key)),
2820 [ACTION_RSS_KEY_LEN] = {
2822 .help = "RSS hash key length in bytes",
2823 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2824 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2825 (offsetof(struct action_rss_data, conf) +
2826 offsetof(struct rte_flow_action_rss, key_len),
2827 sizeof(((struct rte_flow_action_rss *)0)->
2830 RSS_HASH_KEY_LENGTH)),
2832 [ACTION_RSS_QUEUES] = {
2834 .help = "queue indices to use",
2835 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2836 .call = parse_vc_conf,
2838 [ACTION_RSS_QUEUE] = {
2840 .help = "queue index",
2841 .call = parse_vc_action_rss_queue,
2842 .comp = comp_vc_action_rss_queue,
2846 .help = "direct traffic to physical function",
2847 .priv = PRIV_ACTION(PF, 0),
2848 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2853 .help = "direct traffic to a virtual function ID",
2854 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2855 .next = NEXT(action_vf),
2858 [ACTION_VF_ORIGINAL] = {
2860 .help = "use original VF ID if possible",
2861 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2862 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2864 .call = parse_vc_conf,
2869 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2870 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2871 .call = parse_vc_conf,
2873 [ACTION_PHY_PORT] = {
2875 .help = "direct packets to physical port index",
2876 .priv = PRIV_ACTION(PHY_PORT,
2877 sizeof(struct rte_flow_action_phy_port)),
2878 .next = NEXT(action_phy_port),
2881 [ACTION_PHY_PORT_ORIGINAL] = {
2883 .help = "use original port index if possible",
2884 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2885 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2887 .call = parse_vc_conf,
2889 [ACTION_PHY_PORT_INDEX] = {
2891 .help = "physical port index",
2892 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2893 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2895 .call = parse_vc_conf,
2897 [ACTION_PORT_ID] = {
2899 .help = "direct matching traffic to a given DPDK port ID",
2900 .priv = PRIV_ACTION(PORT_ID,
2901 sizeof(struct rte_flow_action_port_id)),
2902 .next = NEXT(action_port_id),
2905 [ACTION_PORT_ID_ORIGINAL] = {
2907 .help = "use original DPDK port ID if possible",
2908 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2909 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2911 .call = parse_vc_conf,
2913 [ACTION_PORT_ID_ID] = {
2915 .help = "DPDK port ID",
2916 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2917 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2918 .call = parse_vc_conf,
2922 .help = "meter the directed packets at given id",
2923 .priv = PRIV_ACTION(METER,
2924 sizeof(struct rte_flow_action_meter)),
2925 .next = NEXT(action_meter),
2928 [ACTION_METER_ID] = {
2930 .help = "meter id to use",
2931 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2932 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2933 .call = parse_vc_conf,
2935 [ACTION_OF_SET_MPLS_TTL] = {
2936 .name = "of_set_mpls_ttl",
2937 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2940 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2941 .next = NEXT(action_of_set_mpls_ttl),
2944 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2947 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2948 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2950 .call = parse_vc_conf,
2952 [ACTION_OF_DEC_MPLS_TTL] = {
2953 .name = "of_dec_mpls_ttl",
2954 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2955 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2956 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2959 [ACTION_OF_SET_NW_TTL] = {
2960 .name = "of_set_nw_ttl",
2961 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2964 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2965 .next = NEXT(action_of_set_nw_ttl),
2968 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2971 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2972 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2974 .call = parse_vc_conf,
2976 [ACTION_OF_DEC_NW_TTL] = {
2977 .name = "of_dec_nw_ttl",
2978 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2979 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2980 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2983 [ACTION_OF_COPY_TTL_OUT] = {
2984 .name = "of_copy_ttl_out",
2985 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2986 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2987 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2990 [ACTION_OF_COPY_TTL_IN] = {
2991 .name = "of_copy_ttl_in",
2992 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2993 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2994 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2997 [ACTION_OF_POP_VLAN] = {
2998 .name = "of_pop_vlan",
2999 .help = "OpenFlow's OFPAT_POP_VLAN",
3000 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
3001 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3004 [ACTION_OF_PUSH_VLAN] = {
3005 .name = "of_push_vlan",
3006 .help = "OpenFlow's OFPAT_PUSH_VLAN",
3009 sizeof(struct rte_flow_action_of_push_vlan)),
3010 .next = NEXT(action_of_push_vlan),
3013 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
3014 .name = "ethertype",
3015 .help = "EtherType",
3016 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
3017 .args = ARGS(ARGS_ENTRY_HTON
3018 (struct rte_flow_action_of_push_vlan,
3020 .call = parse_vc_conf,
3022 [ACTION_OF_SET_VLAN_VID] = {
3023 .name = "of_set_vlan_vid",
3024 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
3027 sizeof(struct rte_flow_action_of_set_vlan_vid)),
3028 .next = NEXT(action_of_set_vlan_vid),
3031 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
3034 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
3035 .args = ARGS(ARGS_ENTRY_HTON
3036 (struct rte_flow_action_of_set_vlan_vid,
3038 .call = parse_vc_conf,
3040 [ACTION_OF_SET_VLAN_PCP] = {
3041 .name = "of_set_vlan_pcp",
3042 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
3045 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
3046 .next = NEXT(action_of_set_vlan_pcp),
3049 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
3051 .help = "VLAN priority",
3052 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
3053 .args = ARGS(ARGS_ENTRY_HTON
3054 (struct rte_flow_action_of_set_vlan_pcp,
3056 .call = parse_vc_conf,
3058 [ACTION_OF_POP_MPLS] = {
3059 .name = "of_pop_mpls",
3060 .help = "OpenFlow's OFPAT_POP_MPLS",
3061 .priv = PRIV_ACTION(OF_POP_MPLS,
3062 sizeof(struct rte_flow_action_of_pop_mpls)),
3063 .next = NEXT(action_of_pop_mpls),
3066 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3067 .name = "ethertype",
3068 .help = "EtherType",
3069 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3070 .args = ARGS(ARGS_ENTRY_HTON
3071 (struct rte_flow_action_of_pop_mpls,
3073 .call = parse_vc_conf,
3075 [ACTION_OF_PUSH_MPLS] = {
3076 .name = "of_push_mpls",
3077 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3080 sizeof(struct rte_flow_action_of_push_mpls)),
3081 .next = NEXT(action_of_push_mpls),
3084 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3085 .name = "ethertype",
3086 .help = "EtherType",
3087 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3088 .args = ARGS(ARGS_ENTRY_HTON
3089 (struct rte_flow_action_of_push_mpls,
3091 .call = parse_vc_conf,
3093 [ACTION_VXLAN_ENCAP] = {
3094 .name = "vxlan_encap",
3095 .help = "VXLAN encapsulation, uses configuration set by \"set"
3097 .priv = PRIV_ACTION(VXLAN_ENCAP,
3098 sizeof(struct action_vxlan_encap_data)),
3099 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3100 .call = parse_vc_action_vxlan_encap,
3102 [ACTION_VXLAN_DECAP] = {
3103 .name = "vxlan_decap",
3104 .help = "Performs a decapsulation action by stripping all"
3105 " headers of the VXLAN tunnel network overlay from the"
3107 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3108 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3111 [ACTION_NVGRE_ENCAP] = {
3112 .name = "nvgre_encap",
3113 .help = "NVGRE encapsulation, uses configuration set by \"set"
3115 .priv = PRIV_ACTION(NVGRE_ENCAP,
3116 sizeof(struct action_nvgre_encap_data)),
3117 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3118 .call = parse_vc_action_nvgre_encap,
3120 [ACTION_NVGRE_DECAP] = {
3121 .name = "nvgre_decap",
3122 .help = "Performs a decapsulation action by stripping all"
3123 " headers of the NVGRE tunnel network overlay from the"
3125 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3126 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3129 [ACTION_L2_ENCAP] = {
3131 .help = "l2 encap, uses configuration set by"
3132 " \"set l2_encap\"",
3133 .priv = PRIV_ACTION(RAW_ENCAP,
3134 sizeof(struct action_raw_encap_data)),
3135 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3136 .call = parse_vc_action_l2_encap,
3138 [ACTION_L2_DECAP] = {
3140 .help = "l2 decap, uses configuration set by"
3141 " \"set l2_decap\"",
3142 .priv = PRIV_ACTION(RAW_DECAP,
3143 sizeof(struct action_raw_decap_data)),
3144 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3145 .call = parse_vc_action_l2_decap,
3147 [ACTION_MPLSOGRE_ENCAP] = {
3148 .name = "mplsogre_encap",
3149 .help = "mplsogre encapsulation, uses configuration set by"
3150 " \"set mplsogre_encap\"",
3151 .priv = PRIV_ACTION(RAW_ENCAP,
3152 sizeof(struct action_raw_encap_data)),
3153 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3154 .call = parse_vc_action_mplsogre_encap,
3156 [ACTION_MPLSOGRE_DECAP] = {
3157 .name = "mplsogre_decap",
3158 .help = "mplsogre decapsulation, uses configuration set by"
3159 " \"set mplsogre_decap\"",
3160 .priv = PRIV_ACTION(RAW_DECAP,
3161 sizeof(struct action_raw_decap_data)),
3162 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3163 .call = parse_vc_action_mplsogre_decap,
3165 [ACTION_MPLSOUDP_ENCAP] = {
3166 .name = "mplsoudp_encap",
3167 .help = "mplsoudp encapsulation, uses configuration set by"
3168 " \"set mplsoudp_encap\"",
3169 .priv = PRIV_ACTION(RAW_ENCAP,
3170 sizeof(struct action_raw_encap_data)),
3171 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3172 .call = parse_vc_action_mplsoudp_encap,
3174 [ACTION_MPLSOUDP_DECAP] = {
3175 .name = "mplsoudp_decap",
3176 .help = "mplsoudp decapsulation, uses configuration set by"
3177 " \"set mplsoudp_decap\"",
3178 .priv = PRIV_ACTION(RAW_DECAP,
3179 sizeof(struct action_raw_decap_data)),
3180 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3181 .call = parse_vc_action_mplsoudp_decap,
3183 [ACTION_SET_IPV4_SRC] = {
3184 .name = "set_ipv4_src",
3185 .help = "Set a new IPv4 source address in the outermost"
3187 .priv = PRIV_ACTION(SET_IPV4_SRC,
3188 sizeof(struct rte_flow_action_set_ipv4)),
3189 .next = NEXT(action_set_ipv4_src),
3192 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3193 .name = "ipv4_addr",
3194 .help = "new IPv4 source address to set",
3195 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3196 .args = ARGS(ARGS_ENTRY_HTON
3197 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3198 .call = parse_vc_conf,
3200 [ACTION_SET_IPV4_DST] = {
3201 .name = "set_ipv4_dst",
3202 .help = "Set a new IPv4 destination address in the outermost"
3204 .priv = PRIV_ACTION(SET_IPV4_DST,
3205 sizeof(struct rte_flow_action_set_ipv4)),
3206 .next = NEXT(action_set_ipv4_dst),
3209 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3210 .name = "ipv4_addr",
3211 .help = "new IPv4 destination address to set",
3212 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3213 .args = ARGS(ARGS_ENTRY_HTON
3214 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3215 .call = parse_vc_conf,
3217 [ACTION_SET_IPV6_SRC] = {
3218 .name = "set_ipv6_src",
3219 .help = "Set a new IPv6 source address in the outermost"
3221 .priv = PRIV_ACTION(SET_IPV6_SRC,
3222 sizeof(struct rte_flow_action_set_ipv6)),
3223 .next = NEXT(action_set_ipv6_src),
3226 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3227 .name = "ipv6_addr",
3228 .help = "new IPv6 source address to set",
3229 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3230 .args = ARGS(ARGS_ENTRY_HTON
3231 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3232 .call = parse_vc_conf,
3234 [ACTION_SET_IPV6_DST] = {
3235 .name = "set_ipv6_dst",
3236 .help = "Set a new IPv6 destination address in the outermost"
3238 .priv = PRIV_ACTION(SET_IPV6_DST,
3239 sizeof(struct rte_flow_action_set_ipv6)),
3240 .next = NEXT(action_set_ipv6_dst),
3243 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3244 .name = "ipv6_addr",
3245 .help = "new IPv6 destination address to set",
3246 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3247 .args = ARGS(ARGS_ENTRY_HTON
3248 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3249 .call = parse_vc_conf,
3251 [ACTION_SET_TP_SRC] = {
3252 .name = "set_tp_src",
3253 .help = "set a new source port number in the outermost"
3255 .priv = PRIV_ACTION(SET_TP_SRC,
3256 sizeof(struct rte_flow_action_set_tp)),
3257 .next = NEXT(action_set_tp_src),
3260 [ACTION_SET_TP_SRC_TP_SRC] = {
3262 .help = "new source port number to set",
3263 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3264 .args = ARGS(ARGS_ENTRY_HTON
3265 (struct rte_flow_action_set_tp, port)),
3266 .call = parse_vc_conf,
3268 [ACTION_SET_TP_DST] = {
3269 .name = "set_tp_dst",
3270 .help = "set a new destination port number in the outermost"
3272 .priv = PRIV_ACTION(SET_TP_DST,
3273 sizeof(struct rte_flow_action_set_tp)),
3274 .next = NEXT(action_set_tp_dst),
3277 [ACTION_SET_TP_DST_TP_DST] = {
3279 .help = "new destination port number to set",
3280 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3281 .args = ARGS(ARGS_ENTRY_HTON
3282 (struct rte_flow_action_set_tp, port)),
3283 .call = parse_vc_conf,
3285 [ACTION_MAC_SWAP] = {
3287 .help = "Swap the source and destination MAC addresses"
3288 " in the outermost Ethernet header",
3289 .priv = PRIV_ACTION(MAC_SWAP, 0),
3290 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3293 [ACTION_DEC_TTL] = {
3295 .help = "decrease network TTL if available",
3296 .priv = PRIV_ACTION(DEC_TTL, 0),
3297 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3300 [ACTION_SET_TTL] = {
3302 .help = "set ttl value",
3303 .priv = PRIV_ACTION(SET_TTL,
3304 sizeof(struct rte_flow_action_set_ttl)),
3305 .next = NEXT(action_set_ttl),
3308 [ACTION_SET_TTL_TTL] = {
3309 .name = "ttl_value",
3310 .help = "new ttl value to set",
3311 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3312 .args = ARGS(ARGS_ENTRY_HTON
3313 (struct rte_flow_action_set_ttl, ttl_value)),
3314 .call = parse_vc_conf,
3316 [ACTION_SET_MAC_SRC] = {
3317 .name = "set_mac_src",
3318 .help = "set source mac address",
3319 .priv = PRIV_ACTION(SET_MAC_SRC,
3320 sizeof(struct rte_flow_action_set_mac)),
3321 .next = NEXT(action_set_mac_src),
3324 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3326 .help = "new source mac address",
3327 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3328 .args = ARGS(ARGS_ENTRY_HTON
3329 (struct rte_flow_action_set_mac, mac_addr)),
3330 .call = parse_vc_conf,
3332 [ACTION_SET_MAC_DST] = {
3333 .name = "set_mac_dst",
3334 .help = "set destination mac address",
3335 .priv = PRIV_ACTION(SET_MAC_DST,
3336 sizeof(struct rte_flow_action_set_mac)),
3337 .next = NEXT(action_set_mac_dst),
3340 [ACTION_SET_MAC_DST_MAC_DST] = {
3342 .help = "new destination mac address to set",
3343 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3344 .args = ARGS(ARGS_ENTRY_HTON
3345 (struct rte_flow_action_set_mac, mac_addr)),
3346 .call = parse_vc_conf,
3348 [ACTION_INC_TCP_SEQ] = {
3349 .name = "inc_tcp_seq",
3350 .help = "increase TCP sequence number",
3351 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3352 .next = NEXT(action_inc_tcp_seq),
3355 [ACTION_INC_TCP_SEQ_VALUE] = {
3357 .help = "the value to increase TCP sequence number by",
3358 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3359 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3360 .call = parse_vc_conf,
3362 [ACTION_DEC_TCP_SEQ] = {
3363 .name = "dec_tcp_seq",
3364 .help = "decrease TCP sequence number",
3365 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3366 .next = NEXT(action_dec_tcp_seq),
3369 [ACTION_DEC_TCP_SEQ_VALUE] = {
3371 .help = "the value to decrease TCP sequence number by",
3372 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3373 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3374 .call = parse_vc_conf,
3376 [ACTION_INC_TCP_ACK] = {
3377 .name = "inc_tcp_ack",
3378 .help = "increase TCP acknowledgment number",
3379 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3380 .next = NEXT(action_inc_tcp_ack),
3383 [ACTION_INC_TCP_ACK_VALUE] = {
3385 .help = "the value to increase TCP acknowledgment number by",
3386 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3387 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3388 .call = parse_vc_conf,
3390 [ACTION_DEC_TCP_ACK] = {
3391 .name = "dec_tcp_ack",
3392 .help = "decrease TCP acknowledgment number",
3393 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3394 .next = NEXT(action_dec_tcp_ack),
3397 [ACTION_DEC_TCP_ACK_VALUE] = {
3399 .help = "the value to decrease TCP acknowledgment number by",
3400 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3401 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3402 .call = parse_vc_conf,
3404 [ACTION_RAW_ENCAP] = {
3405 .name = "raw_encap",
3406 .help = "encapsulation data, defined by set raw_encap",
3407 .priv = PRIV_ACTION(RAW_ENCAP,
3408 sizeof(struct action_raw_encap_data)),
3409 .next = NEXT(action_raw_encap),
3410 .call = parse_vc_action_raw_encap,
3412 [ACTION_RAW_ENCAP_INDEX] = {
3414 .help = "the index of raw_encap_confs",
3415 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
3417 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
3420 .help = "unsigned integer value",
3421 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3422 .call = parse_vc_action_raw_encap_index,
3423 .comp = comp_set_raw_index,
3425 [ACTION_RAW_DECAP] = {
3426 .name = "raw_decap",
3427 .help = "decapsulation data, defined by set raw_encap",
3428 .priv = PRIV_ACTION(RAW_DECAP,
3429 sizeof(struct action_raw_decap_data)),
3430 .next = NEXT(action_raw_decap),
3431 .call = parse_vc_action_raw_decap,
3433 [ACTION_RAW_DECAP_INDEX] = {
3435 .help = "the index of raw_encap_confs",
3436 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
3438 [ACTION_RAW_DECAP_INDEX_VALUE] = {
3441 .help = "unsigned integer value",
3442 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3443 .call = parse_vc_action_raw_decap_index,
3444 .comp = comp_set_raw_index,
3446 /* Top level command. */
3449 .help = "set raw encap/decap data",
3450 .type = "set raw_encap|raw_decap <index> <pattern>",
3451 .next = NEXT(NEXT_ENTRY
3454 .call = parse_set_init,
3456 /* Sub-level commands. */
3458 .name = "raw_encap",
3459 .help = "set raw encap data",
3460 .next = NEXT(next_set_raw),
3461 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3462 (offsetof(struct buffer, port),
3463 sizeof(((struct buffer *)0)->port),
3464 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3465 .call = parse_set_raw_encap_decap,
3468 .name = "raw_decap",
3469 .help = "set raw decap data",
3470 .next = NEXT(next_set_raw),
3471 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3472 (offsetof(struct buffer, port),
3473 sizeof(((struct buffer *)0)->port),
3474 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3475 .call = parse_set_raw_encap_decap,
3480 .help = "index of raw_encap/raw_decap data",
3481 .next = NEXT(next_item),
3484 [ACTION_SET_TAG] = {
3487 .priv = PRIV_ACTION(SET_TAG,
3488 sizeof(struct rte_flow_action_set_tag)),
3489 .next = NEXT(action_set_tag),
3492 [ACTION_SET_TAG_INDEX] = {
3494 .help = "index of tag array",
3495 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3496 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
3497 .call = parse_vc_conf,
3499 [ACTION_SET_TAG_DATA] = {
3501 .help = "tag value",
3502 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3503 .args = ARGS(ARGS_ENTRY
3504 (struct rte_flow_action_set_tag, data)),
3505 .call = parse_vc_conf,
3507 [ACTION_SET_TAG_MASK] = {
3509 .help = "mask for tag value",
3510 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3511 .args = ARGS(ARGS_ENTRY
3512 (struct rte_flow_action_set_tag, mask)),
3513 .call = parse_vc_conf,
3515 [ACTION_SET_META] = {
3517 .help = "set metadata",
3518 .priv = PRIV_ACTION(SET_META,
3519 sizeof(struct rte_flow_action_set_meta)),
3520 .next = NEXT(action_set_meta),
3521 .call = parse_vc_action_set_meta,
3523 [ACTION_SET_META_DATA] = {
3525 .help = "metadata value",
3526 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
3527 .args = ARGS(ARGS_ENTRY
3528 (struct rte_flow_action_set_meta, data)),
3529 .call = parse_vc_conf,
3531 [ACTION_SET_META_MASK] = {
3533 .help = "mask for metadata value",
3534 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
3535 .args = ARGS(ARGS_ENTRY
3536 (struct rte_flow_action_set_meta, mask)),
3537 .call = parse_vc_conf,
3539 [ACTION_SET_IPV4_DSCP] = {
3540 .name = "set_ipv4_dscp",
3541 .help = "set DSCP value",
3542 .priv = PRIV_ACTION(SET_IPV4_DSCP,
3543 sizeof(struct rte_flow_action_set_dscp)),
3544 .next = NEXT(action_set_ipv4_dscp),
3547 [ACTION_SET_IPV4_DSCP_VALUE] = {
3548 .name = "dscp_value",
3549 .help = "new IPv4 DSCP value to set",
3550 .next = NEXT(action_set_ipv4_dscp, NEXT_ENTRY(UNSIGNED)),
3551 .args = ARGS(ARGS_ENTRY
3552 (struct rte_flow_action_set_dscp, dscp)),
3553 .call = parse_vc_conf,
3555 [ACTION_SET_IPV6_DSCP] = {
3556 .name = "set_ipv6_dscp",
3557 .help = "set DSCP value",
3558 .priv = PRIV_ACTION(SET_IPV6_DSCP,
3559 sizeof(struct rte_flow_action_set_dscp)),
3560 .next = NEXT(action_set_ipv6_dscp),
3563 [ACTION_SET_IPV6_DSCP_VALUE] = {
3564 .name = "dscp_value",
3565 .help = "new IPv6 DSCP value to set",
3566 .next = NEXT(action_set_ipv6_dscp, NEXT_ENTRY(UNSIGNED)),
3567 .args = ARGS(ARGS_ENTRY
3568 (struct rte_flow_action_set_dscp, dscp)),
3569 .call = parse_vc_conf,
3573 /** Remove and return last entry from argument stack. */
3574 static const struct arg *
3575 pop_args(struct context *ctx)
3577 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3580 /** Add entry on top of the argument stack. */
3582 push_args(struct context *ctx, const struct arg *arg)
3584 if (ctx->args_num == CTX_STACK_SIZE)
3586 ctx->args[ctx->args_num++] = arg;
3590 /** Spread value into buffer according to bit-mask. */
3592 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3594 uint32_t i = arg->size;
3602 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3611 unsigned int shift = 0;
3612 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3614 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3615 if (!(arg->mask[i] & (1 << shift)))
3620 *buf &= ~(1 << shift);
3621 *buf |= (val & 1) << shift;
3629 /** Compare a string with a partial one of a given length. */
3631 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3633 int r = strncmp(full, partial, partial_len);
3637 if (strlen(full) <= partial_len)
3639 return full[partial_len];
3643 * Parse a prefix length and generate a bit-mask.
3645 * Last argument (ctx->args) is retrieved to determine mask size, storage
3646 * location and whether the result must use network byte ordering.
3649 parse_prefix(struct context *ctx, const struct token *token,
3650 const char *str, unsigned int len,
3651 void *buf, unsigned int size)
3653 const struct arg *arg = pop_args(ctx);
3654 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3661 /* Argument is expected. */
3665 u = strtoumax(str, &end, 0);
3666 if (errno || (size_t)(end - str) != len)
3671 extra = arg_entry_bf_fill(NULL, 0, arg);
3680 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3681 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3688 if (bytes > size || bytes + !!extra > size)
3692 buf = (uint8_t *)ctx->object + arg->offset;
3693 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3695 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3696 memset(buf, 0x00, size - bytes);
3698 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3702 memset(buf, 0xff, bytes);
3703 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3705 ((uint8_t *)buf)[bytes] = conv[extra];
3708 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3711 push_args(ctx, arg);
3715 /** Default parsing function for token name matching. */
3717 parse_default(struct context *ctx, const struct token *token,
3718 const char *str, unsigned int len,
3719 void *buf, unsigned int size)
3724 if (strcmp_partial(token->name, str, len))
3729 /** Parse flow command, initialize output buffer for subsequent tokens. */
3731 parse_init(struct context *ctx, const struct token *token,
3732 const char *str, unsigned int len,
3733 void *buf, unsigned int size)
3735 struct buffer *out = buf;
3737 /* Token name must match. */
3738 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3740 /* Nothing else to do if there is no buffer. */
3743 /* Make sure buffer is large enough. */
3744 if (size < sizeof(*out))
3746 /* Initialize buffer. */
3747 memset(out, 0x00, sizeof(*out));
3748 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3751 ctx->objmask = NULL;
3755 /** Parse tokens for validate/create commands. */
3757 parse_vc(struct context *ctx, const struct token *token,
3758 const char *str, unsigned int len,
3759 void *buf, unsigned int size)
3761 struct buffer *out = buf;
3765 /* Token name must match. */
3766 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3768 /* Nothing else to do if there is no buffer. */
3771 if (!out->command) {
3772 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3774 if (sizeof(*out) > size)
3776 out->command = ctx->curr;
3779 ctx->objmask = NULL;
3780 out->args.vc.data = (uint8_t *)out + size;
3784 ctx->object = &out->args.vc.attr;
3785 ctx->objmask = NULL;
3786 switch (ctx->curr) {
3791 out->args.vc.attr.ingress = 1;
3794 out->args.vc.attr.egress = 1;
3797 out->args.vc.attr.transfer = 1;
3800 out->args.vc.pattern =
3801 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3803 ctx->object = out->args.vc.pattern;
3804 ctx->objmask = NULL;
3807 out->args.vc.actions =
3808 (void *)RTE_ALIGN_CEIL((uintptr_t)
3809 (out->args.vc.pattern +
3810 out->args.vc.pattern_n),
3812 ctx->object = out->args.vc.actions;
3813 ctx->objmask = NULL;
3820 if (!out->args.vc.actions) {
3821 const struct parse_item_priv *priv = token->priv;
3822 struct rte_flow_item *item =
3823 out->args.vc.pattern + out->args.vc.pattern_n;
3825 data_size = priv->size * 3; /* spec, last, mask */
3826 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3827 (out->args.vc.data - data_size),
3829 if ((uint8_t *)item + sizeof(*item) > data)
3831 *item = (struct rte_flow_item){
3834 ++out->args.vc.pattern_n;
3836 ctx->objmask = NULL;
3838 const struct parse_action_priv *priv = token->priv;
3839 struct rte_flow_action *action =
3840 out->args.vc.actions + out->args.vc.actions_n;
3842 data_size = priv->size; /* configuration */
3843 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3844 (out->args.vc.data - data_size),
3846 if ((uint8_t *)action + sizeof(*action) > data)
3848 *action = (struct rte_flow_action){
3850 .conf = data_size ? data : NULL,
3852 ++out->args.vc.actions_n;
3853 ctx->object = action;
3854 ctx->objmask = NULL;
3856 memset(data, 0, data_size);
3857 out->args.vc.data = data;
3858 ctx->objdata = data_size;
3862 /** Parse pattern item parameter type. */
3864 parse_vc_spec(struct context *ctx, const struct token *token,
3865 const char *str, unsigned int len,
3866 void *buf, unsigned int size)
3868 struct buffer *out = buf;
3869 struct rte_flow_item *item;
3875 /* Token name must match. */
3876 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3878 /* Parse parameter types. */
3879 switch (ctx->curr) {
3880 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3886 case ITEM_PARAM_SPEC:
3889 case ITEM_PARAM_LAST:
3892 case ITEM_PARAM_PREFIX:
3893 /* Modify next token to expect a prefix. */
3894 if (ctx->next_num < 2)
3896 ctx->next[ctx->next_num - 2] = prefix;
3898 case ITEM_PARAM_MASK:
3904 /* Nothing else to do if there is no buffer. */
3907 if (!out->args.vc.pattern_n)
3909 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3910 data_size = ctx->objdata / 3; /* spec, last, mask */
3911 /* Point to selected object. */
3912 ctx->object = out->args.vc.data + (data_size * index);
3914 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3915 item->mask = ctx->objmask;
3917 ctx->objmask = NULL;
3918 /* Update relevant item pointer. */
3919 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3924 /** Parse action configuration field. */
3926 parse_vc_conf(struct context *ctx, const struct token *token,
3927 const char *str, unsigned int len,
3928 void *buf, unsigned int size)
3930 struct buffer *out = buf;
3933 /* Token name must match. */
3934 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3936 /* Nothing else to do if there is no buffer. */
3939 /* Point to selected object. */
3940 ctx->object = out->args.vc.data;
3941 ctx->objmask = NULL;
3945 /** Parse RSS action. */
3947 parse_vc_action_rss(struct context *ctx, const struct token *token,
3948 const char *str, unsigned int len,
3949 void *buf, unsigned int size)
3951 struct buffer *out = buf;
3952 struct rte_flow_action *action;
3953 struct action_rss_data *action_rss_data;
3957 ret = parse_vc(ctx, token, str, len, buf, size);
3960 /* Nothing else to do if there is no buffer. */
3963 if (!out->args.vc.actions_n)
3965 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3966 /* Point to selected object. */
3967 ctx->object = out->args.vc.data;
3968 ctx->objmask = NULL;
3969 /* Set up default configuration. */
3970 action_rss_data = ctx->object;
3971 *action_rss_data = (struct action_rss_data){
3972 .conf = (struct rte_flow_action_rss){
3973 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3976 .key_len = sizeof(action_rss_data->key),
3977 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3978 .key = action_rss_data->key,
3979 .queue = action_rss_data->queue,
3981 .key = "testpmd's default RSS hash key, "
3982 "override it for better balancing",
3985 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3986 action_rss_data->queue[i] = i;
3987 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3988 ctx->port != (portid_t)RTE_PORT_ALL) {
3989 struct rte_eth_dev_info info;
3992 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3996 action_rss_data->conf.key_len =
3997 RTE_MIN(sizeof(action_rss_data->key),
3998 info.hash_key_size);
4000 action->conf = &action_rss_data->conf;
4005 * Parse func field for RSS action.
4007 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
4008 * ACTION_RSS_FUNC_* index that called this function.
4011 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
4012 const char *str, unsigned int len,
4013 void *buf, unsigned int size)
4015 struct action_rss_data *action_rss_data;
4016 enum rte_eth_hash_function func;
4020 /* Token name must match. */
4021 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4023 switch (ctx->curr) {
4024 case ACTION_RSS_FUNC_DEFAULT:
4025 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
4027 case ACTION_RSS_FUNC_TOEPLITZ:
4028 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
4030 case ACTION_RSS_FUNC_SIMPLE_XOR:
4031 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
4033 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
4034 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
4041 action_rss_data = ctx->object;
4042 action_rss_data->conf.func = func;
4047 * Parse type field for RSS action.
4049 * Valid tokens are type field names and the "end" token.
4052 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
4053 const char *str, unsigned int len,
4054 void *buf, unsigned int size)
4056 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
4057 struct action_rss_data *action_rss_data;
4063 if (ctx->curr != ACTION_RSS_TYPE)
4065 if (!(ctx->objdata >> 16) && ctx->object) {
4066 action_rss_data = ctx->object;
4067 action_rss_data->conf.types = 0;
4069 if (!strcmp_partial("end", str, len)) {
4070 ctx->objdata &= 0xffff;
4073 for (i = 0; rss_type_table[i].str; ++i)
4074 if (!strcmp_partial(rss_type_table[i].str, str, len))
4076 if (!rss_type_table[i].str)
4078 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
4080 if (ctx->next_num == RTE_DIM(ctx->next))
4082 ctx->next[ctx->next_num++] = next;
4085 action_rss_data = ctx->object;
4086 action_rss_data->conf.types |= rss_type_table[i].rss_type;
4091 * Parse queue field for RSS action.
4093 * Valid tokens are queue indices and the "end" token.
4096 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
4097 const char *str, unsigned int len,
4098 void *buf, unsigned int size)
4100 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
4101 struct action_rss_data *action_rss_data;
4102 const struct arg *arg;
4109 if (ctx->curr != ACTION_RSS_QUEUE)
4111 i = ctx->objdata >> 16;
4112 if (!strcmp_partial("end", str, len)) {
4113 ctx->objdata &= 0xffff;
4116 if (i >= ACTION_RSS_QUEUE_NUM)
4118 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4119 i * sizeof(action_rss_data->queue[i]),
4120 sizeof(action_rss_data->queue[i]));
4121 if (push_args(ctx, arg))
4123 ret = parse_int(ctx, token, str, len, NULL, 0);
4129 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
4131 if (ctx->next_num == RTE_DIM(ctx->next))
4133 ctx->next[ctx->next_num++] = next;
4137 action_rss_data = ctx->object;
4138 action_rss_data->conf.queue_num = i;
4139 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
4143 /** Parse VXLAN encap action. */
4145 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
4146 const char *str, unsigned int len,
4147 void *buf, unsigned int size)
4149 struct buffer *out = buf;
4150 struct rte_flow_action *action;
4151 struct action_vxlan_encap_data *action_vxlan_encap_data;
4154 ret = parse_vc(ctx, token, str, len, buf, size);
4157 /* Nothing else to do if there is no buffer. */
4160 if (!out->args.vc.actions_n)
4162 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4163 /* Point to selected object. */
4164 ctx->object = out->args.vc.data;
4165 ctx->objmask = NULL;
4166 /* Set up default configuration. */
4167 action_vxlan_encap_data = ctx->object;
4168 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
4169 .conf = (struct rte_flow_action_vxlan_encap){
4170 .definition = action_vxlan_encap_data->items,
4174 .type = RTE_FLOW_ITEM_TYPE_ETH,
4175 .spec = &action_vxlan_encap_data->item_eth,
4176 .mask = &rte_flow_item_eth_mask,
4179 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4180 .spec = &action_vxlan_encap_data->item_vlan,
4181 .mask = &rte_flow_item_vlan_mask,
4184 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4185 .spec = &action_vxlan_encap_data->item_ipv4,
4186 .mask = &rte_flow_item_ipv4_mask,
4189 .type = RTE_FLOW_ITEM_TYPE_UDP,
4190 .spec = &action_vxlan_encap_data->item_udp,
4191 .mask = &rte_flow_item_udp_mask,
4194 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
4195 .spec = &action_vxlan_encap_data->item_vxlan,
4196 .mask = &rte_flow_item_vxlan_mask,
4199 .type = RTE_FLOW_ITEM_TYPE_END,
4204 .tci = vxlan_encap_conf.vlan_tci,
4208 .src_addr = vxlan_encap_conf.ipv4_src,
4209 .dst_addr = vxlan_encap_conf.ipv4_dst,
4212 .src_port = vxlan_encap_conf.udp_src,
4213 .dst_port = vxlan_encap_conf.udp_dst,
4215 .item_vxlan.flags = 0,
4217 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
4218 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4219 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
4220 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4221 if (!vxlan_encap_conf.select_ipv4) {
4222 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
4223 &vxlan_encap_conf.ipv6_src,
4224 sizeof(vxlan_encap_conf.ipv6_src));
4225 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
4226 &vxlan_encap_conf.ipv6_dst,
4227 sizeof(vxlan_encap_conf.ipv6_dst));
4228 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
4229 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4230 .spec = &action_vxlan_encap_data->item_ipv6,
4231 .mask = &rte_flow_item_ipv6_mask,
4234 if (!vxlan_encap_conf.select_vlan)
4235 action_vxlan_encap_data->items[1].type =
4236 RTE_FLOW_ITEM_TYPE_VOID;
4237 if (vxlan_encap_conf.select_tos_ttl) {
4238 if (vxlan_encap_conf.select_ipv4) {
4239 static struct rte_flow_item_ipv4 ipv4_mask_tos;
4241 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
4242 sizeof(ipv4_mask_tos));
4243 ipv4_mask_tos.hdr.type_of_service = 0xff;
4244 ipv4_mask_tos.hdr.time_to_live = 0xff;
4245 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
4246 vxlan_encap_conf.ip_tos;
4247 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
4248 vxlan_encap_conf.ip_ttl;
4249 action_vxlan_encap_data->items[2].mask =
4252 static struct rte_flow_item_ipv6 ipv6_mask_tos;
4254 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
4255 sizeof(ipv6_mask_tos));
4256 ipv6_mask_tos.hdr.vtc_flow |=
4257 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
4258 ipv6_mask_tos.hdr.hop_limits = 0xff;
4259 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
4261 ((uint32_t)vxlan_encap_conf.ip_tos <<
4262 RTE_IPV6_HDR_TC_SHIFT);
4263 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
4264 vxlan_encap_conf.ip_ttl;
4265 action_vxlan_encap_data->items[2].mask =
4269 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
4270 RTE_DIM(vxlan_encap_conf.vni));
4271 action->conf = &action_vxlan_encap_data->conf;
4275 /** Parse NVGRE encap action. */
4277 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
4278 const char *str, unsigned int len,
4279 void *buf, unsigned int size)
4281 struct buffer *out = buf;
4282 struct rte_flow_action *action;
4283 struct action_nvgre_encap_data *action_nvgre_encap_data;
4286 ret = parse_vc(ctx, token, str, len, buf, size);
4289 /* Nothing else to do if there is no buffer. */
4292 if (!out->args.vc.actions_n)
4294 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4295 /* Point to selected object. */
4296 ctx->object = out->args.vc.data;
4297 ctx->objmask = NULL;
4298 /* Set up default configuration. */
4299 action_nvgre_encap_data = ctx->object;
4300 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
4301 .conf = (struct rte_flow_action_nvgre_encap){
4302 .definition = action_nvgre_encap_data->items,
4306 .type = RTE_FLOW_ITEM_TYPE_ETH,
4307 .spec = &action_nvgre_encap_data->item_eth,
4308 .mask = &rte_flow_item_eth_mask,
4311 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4312 .spec = &action_nvgre_encap_data->item_vlan,
4313 .mask = &rte_flow_item_vlan_mask,
4316 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4317 .spec = &action_nvgre_encap_data->item_ipv4,
4318 .mask = &rte_flow_item_ipv4_mask,
4321 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4322 .spec = &action_nvgre_encap_data->item_nvgre,
4323 .mask = &rte_flow_item_nvgre_mask,
4326 .type = RTE_FLOW_ITEM_TYPE_END,
4331 .tci = nvgre_encap_conf.vlan_tci,
4335 .src_addr = nvgre_encap_conf.ipv4_src,
4336 .dst_addr = nvgre_encap_conf.ipv4_dst,
4338 .item_nvgre.flow_id = 0,
4340 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4341 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4342 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4343 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4344 if (!nvgre_encap_conf.select_ipv4) {
4345 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4346 &nvgre_encap_conf.ipv6_src,
4347 sizeof(nvgre_encap_conf.ipv6_src));
4348 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4349 &nvgre_encap_conf.ipv6_dst,
4350 sizeof(nvgre_encap_conf.ipv6_dst));
4351 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4352 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4353 .spec = &action_nvgre_encap_data->item_ipv6,
4354 .mask = &rte_flow_item_ipv6_mask,
4357 if (!nvgre_encap_conf.select_vlan)
4358 action_nvgre_encap_data->items[1].type =
4359 RTE_FLOW_ITEM_TYPE_VOID;
4360 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4361 RTE_DIM(nvgre_encap_conf.tni));
4362 action->conf = &action_nvgre_encap_data->conf;
4366 /** Parse l2 encap action. */
4368 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4369 const char *str, unsigned int len,
4370 void *buf, unsigned int size)
4372 struct buffer *out = buf;
4373 struct rte_flow_action *action;
4374 struct action_raw_encap_data *action_encap_data;
4375 struct rte_flow_item_eth eth = { .type = 0, };
4376 struct rte_flow_item_vlan vlan = {
4377 .tci = mplsoudp_encap_conf.vlan_tci,
4383 ret = parse_vc(ctx, token, str, len, buf, size);
4386 /* Nothing else to do if there is no buffer. */
4389 if (!out->args.vc.actions_n)
4391 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4392 /* Point to selected object. */
4393 ctx->object = out->args.vc.data;
4394 ctx->objmask = NULL;
4395 /* Copy the headers to the buffer. */
4396 action_encap_data = ctx->object;
4397 *action_encap_data = (struct action_raw_encap_data) {
4398 .conf = (struct rte_flow_action_raw_encap){
4399 .data = action_encap_data->data,
4403 header = action_encap_data->data;
4404 if (l2_encap_conf.select_vlan)
4405 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4406 else if (l2_encap_conf.select_ipv4)
4407 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4409 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4410 memcpy(eth.dst.addr_bytes,
4411 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4412 memcpy(eth.src.addr_bytes,
4413 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4414 memcpy(header, ð, sizeof(eth));
4415 header += sizeof(eth);
4416 if (l2_encap_conf.select_vlan) {
4417 if (l2_encap_conf.select_ipv4)
4418 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4420 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4421 memcpy(header, &vlan, sizeof(vlan));
4422 header += sizeof(vlan);
4424 action_encap_data->conf.size = header -
4425 action_encap_data->data;
4426 action->conf = &action_encap_data->conf;
4430 /** Parse l2 decap action. */
4432 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4433 const char *str, unsigned int len,
4434 void *buf, unsigned int size)
4436 struct buffer *out = buf;
4437 struct rte_flow_action *action;
4438 struct action_raw_decap_data *action_decap_data;
4439 struct rte_flow_item_eth eth = { .type = 0, };
4440 struct rte_flow_item_vlan vlan = {
4441 .tci = mplsoudp_encap_conf.vlan_tci,
4447 ret = parse_vc(ctx, token, str, len, buf, size);
4450 /* Nothing else to do if there is no buffer. */
4453 if (!out->args.vc.actions_n)
4455 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4456 /* Point to selected object. */
4457 ctx->object = out->args.vc.data;
4458 ctx->objmask = NULL;
4459 /* Copy the headers to the buffer. */
4460 action_decap_data = ctx->object;
4461 *action_decap_data = (struct action_raw_decap_data) {
4462 .conf = (struct rte_flow_action_raw_decap){
4463 .data = action_decap_data->data,
4467 header = action_decap_data->data;
4468 if (l2_decap_conf.select_vlan)
4469 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4470 memcpy(header, ð, sizeof(eth));
4471 header += sizeof(eth);
4472 if (l2_decap_conf.select_vlan) {
4473 memcpy(header, &vlan, sizeof(vlan));
4474 header += sizeof(vlan);
4476 action_decap_data->conf.size = header -
4477 action_decap_data->data;
4478 action->conf = &action_decap_data->conf;
4482 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4484 /** Parse MPLSOGRE encap action. */
4486 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4487 const char *str, unsigned int len,
4488 void *buf, unsigned int size)
4490 struct buffer *out = buf;
4491 struct rte_flow_action *action;
4492 struct action_raw_encap_data *action_encap_data;
4493 struct rte_flow_item_eth eth = { .type = 0, };
4494 struct rte_flow_item_vlan vlan = {
4495 .tci = mplsogre_encap_conf.vlan_tci,
4498 struct rte_flow_item_ipv4 ipv4 = {
4500 .src_addr = mplsogre_encap_conf.ipv4_src,
4501 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4502 .next_proto_id = IPPROTO_GRE,
4503 .version_ihl = RTE_IPV4_VHL_DEF,
4504 .time_to_live = IPDEFTTL,
4507 struct rte_flow_item_ipv6 ipv6 = {
4509 .proto = IPPROTO_GRE,
4510 .hop_limits = IPDEFTTL,
4513 struct rte_flow_item_gre gre = {
4514 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4516 struct rte_flow_item_mpls mpls;
4520 ret = parse_vc(ctx, token, str, len, buf, size);
4523 /* Nothing else to do if there is no buffer. */
4526 if (!out->args.vc.actions_n)
4528 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4529 /* Point to selected object. */
4530 ctx->object = out->args.vc.data;
4531 ctx->objmask = NULL;
4532 /* Copy the headers to the buffer. */
4533 action_encap_data = ctx->object;
4534 *action_encap_data = (struct action_raw_encap_data) {
4535 .conf = (struct rte_flow_action_raw_encap){
4536 .data = action_encap_data->data,
4541 header = action_encap_data->data;
4542 if (mplsogre_encap_conf.select_vlan)
4543 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4544 else if (mplsogre_encap_conf.select_ipv4)
4545 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4547 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4548 memcpy(eth.dst.addr_bytes,
4549 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4550 memcpy(eth.src.addr_bytes,
4551 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4552 memcpy(header, ð, sizeof(eth));
4553 header += sizeof(eth);
4554 if (mplsogre_encap_conf.select_vlan) {
4555 if (mplsogre_encap_conf.select_ipv4)
4556 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4558 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4559 memcpy(header, &vlan, sizeof(vlan));
4560 header += sizeof(vlan);
4562 if (mplsogre_encap_conf.select_ipv4) {
4563 memcpy(header, &ipv4, sizeof(ipv4));
4564 header += sizeof(ipv4);
4566 memcpy(&ipv6.hdr.src_addr,
4567 &mplsogre_encap_conf.ipv6_src,
4568 sizeof(mplsogre_encap_conf.ipv6_src));
4569 memcpy(&ipv6.hdr.dst_addr,
4570 &mplsogre_encap_conf.ipv6_dst,
4571 sizeof(mplsogre_encap_conf.ipv6_dst));
4572 memcpy(header, &ipv6, sizeof(ipv6));
4573 header += sizeof(ipv6);
4575 memcpy(header, &gre, sizeof(gre));
4576 header += sizeof(gre);
4577 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4578 RTE_DIM(mplsogre_encap_conf.label));
4579 mpls.label_tc_s[2] |= 0x1;
4580 memcpy(header, &mpls, sizeof(mpls));
4581 header += sizeof(mpls);
4582 action_encap_data->conf.size = header -
4583 action_encap_data->data;
4584 action->conf = &action_encap_data->conf;
4588 /** Parse MPLSOGRE decap action. */
4590 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4591 const char *str, unsigned int len,
4592 void *buf, unsigned int size)
4594 struct buffer *out = buf;
4595 struct rte_flow_action *action;
4596 struct action_raw_decap_data *action_decap_data;
4597 struct rte_flow_item_eth eth = { .type = 0, };
4598 struct rte_flow_item_vlan vlan = {.tci = 0};
4599 struct rte_flow_item_ipv4 ipv4 = {
4601 .next_proto_id = IPPROTO_GRE,
4604 struct rte_flow_item_ipv6 ipv6 = {
4606 .proto = IPPROTO_GRE,
4609 struct rte_flow_item_gre gre = {
4610 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4612 struct rte_flow_item_mpls mpls;
4616 ret = parse_vc(ctx, token, str, len, buf, size);
4619 /* Nothing else to do if there is no buffer. */
4622 if (!out->args.vc.actions_n)
4624 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4625 /* Point to selected object. */
4626 ctx->object = out->args.vc.data;
4627 ctx->objmask = NULL;
4628 /* Copy the headers to the buffer. */
4629 action_decap_data = ctx->object;
4630 *action_decap_data = (struct action_raw_decap_data) {
4631 .conf = (struct rte_flow_action_raw_decap){
4632 .data = action_decap_data->data,
4636 header = action_decap_data->data;
4637 if (mplsogre_decap_conf.select_vlan)
4638 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4639 else if (mplsogre_encap_conf.select_ipv4)
4640 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4642 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4643 memcpy(eth.dst.addr_bytes,
4644 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4645 memcpy(eth.src.addr_bytes,
4646 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4647 memcpy(header, ð, sizeof(eth));
4648 header += sizeof(eth);
4649 if (mplsogre_encap_conf.select_vlan) {
4650 if (mplsogre_encap_conf.select_ipv4)
4651 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4653 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4654 memcpy(header, &vlan, sizeof(vlan));
4655 header += sizeof(vlan);
4657 if (mplsogre_encap_conf.select_ipv4) {
4658 memcpy(header, &ipv4, sizeof(ipv4));
4659 header += sizeof(ipv4);
4661 memcpy(header, &ipv6, sizeof(ipv6));
4662 header += sizeof(ipv6);
4664 memcpy(header, &gre, sizeof(gre));
4665 header += sizeof(gre);
4666 memset(&mpls, 0, sizeof(mpls));
4667 memcpy(header, &mpls, sizeof(mpls));
4668 header += sizeof(mpls);
4669 action_decap_data->conf.size = header -
4670 action_decap_data->data;
4671 action->conf = &action_decap_data->conf;
4675 /** Parse MPLSOUDP encap action. */
4677 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4678 const char *str, unsigned int len,
4679 void *buf, unsigned int size)
4681 struct buffer *out = buf;
4682 struct rte_flow_action *action;
4683 struct action_raw_encap_data *action_encap_data;
4684 struct rte_flow_item_eth eth = { .type = 0, };
4685 struct rte_flow_item_vlan vlan = {
4686 .tci = mplsoudp_encap_conf.vlan_tci,
4689 struct rte_flow_item_ipv4 ipv4 = {
4691 .src_addr = mplsoudp_encap_conf.ipv4_src,
4692 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4693 .next_proto_id = IPPROTO_UDP,
4694 .version_ihl = RTE_IPV4_VHL_DEF,
4695 .time_to_live = IPDEFTTL,
4698 struct rte_flow_item_ipv6 ipv6 = {
4700 .proto = IPPROTO_UDP,
4701 .hop_limits = IPDEFTTL,
4704 struct rte_flow_item_udp udp = {
4706 .src_port = mplsoudp_encap_conf.udp_src,
4707 .dst_port = mplsoudp_encap_conf.udp_dst,
4710 struct rte_flow_item_mpls mpls;
4714 ret = parse_vc(ctx, token, str, len, buf, size);
4717 /* Nothing else to do if there is no buffer. */
4720 if (!out->args.vc.actions_n)
4722 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4723 /* Point to selected object. */
4724 ctx->object = out->args.vc.data;
4725 ctx->objmask = NULL;
4726 /* Copy the headers to the buffer. */
4727 action_encap_data = ctx->object;
4728 *action_encap_data = (struct action_raw_encap_data) {
4729 .conf = (struct rte_flow_action_raw_encap){
4730 .data = action_encap_data->data,
4735 header = action_encap_data->data;
4736 if (mplsoudp_encap_conf.select_vlan)
4737 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4738 else if (mplsoudp_encap_conf.select_ipv4)
4739 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4741 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4742 memcpy(eth.dst.addr_bytes,
4743 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4744 memcpy(eth.src.addr_bytes,
4745 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4746 memcpy(header, ð, sizeof(eth));
4747 header += sizeof(eth);
4748 if (mplsoudp_encap_conf.select_vlan) {
4749 if (mplsoudp_encap_conf.select_ipv4)
4750 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4752 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4753 memcpy(header, &vlan, sizeof(vlan));
4754 header += sizeof(vlan);
4756 if (mplsoudp_encap_conf.select_ipv4) {
4757 memcpy(header, &ipv4, sizeof(ipv4));
4758 header += sizeof(ipv4);
4760 memcpy(&ipv6.hdr.src_addr,
4761 &mplsoudp_encap_conf.ipv6_src,
4762 sizeof(mplsoudp_encap_conf.ipv6_src));
4763 memcpy(&ipv6.hdr.dst_addr,
4764 &mplsoudp_encap_conf.ipv6_dst,
4765 sizeof(mplsoudp_encap_conf.ipv6_dst));
4766 memcpy(header, &ipv6, sizeof(ipv6));
4767 header += sizeof(ipv6);
4769 memcpy(header, &udp, sizeof(udp));
4770 header += sizeof(udp);
4771 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4772 RTE_DIM(mplsoudp_encap_conf.label));
4773 mpls.label_tc_s[2] |= 0x1;
4774 memcpy(header, &mpls, sizeof(mpls));
4775 header += sizeof(mpls);
4776 action_encap_data->conf.size = header -
4777 action_encap_data->data;
4778 action->conf = &action_encap_data->conf;
4782 /** Parse MPLSOUDP decap action. */
4784 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4785 const char *str, unsigned int len,
4786 void *buf, unsigned int size)
4788 struct buffer *out = buf;
4789 struct rte_flow_action *action;
4790 struct action_raw_decap_data *action_decap_data;
4791 struct rte_flow_item_eth eth = { .type = 0, };
4792 struct rte_flow_item_vlan vlan = {.tci = 0};
4793 struct rte_flow_item_ipv4 ipv4 = {
4795 .next_proto_id = IPPROTO_UDP,
4798 struct rte_flow_item_ipv6 ipv6 = {
4800 .proto = IPPROTO_UDP,
4803 struct rte_flow_item_udp udp = {
4805 .dst_port = rte_cpu_to_be_16(6635),
4808 struct rte_flow_item_mpls mpls;
4812 ret = parse_vc(ctx, token, str, len, buf, size);
4815 /* Nothing else to do if there is no buffer. */
4818 if (!out->args.vc.actions_n)
4820 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4821 /* Point to selected object. */
4822 ctx->object = out->args.vc.data;
4823 ctx->objmask = NULL;
4824 /* Copy the headers to the buffer. */
4825 action_decap_data = ctx->object;
4826 *action_decap_data = (struct action_raw_decap_data) {
4827 .conf = (struct rte_flow_action_raw_decap){
4828 .data = action_decap_data->data,
4832 header = action_decap_data->data;
4833 if (mplsoudp_decap_conf.select_vlan)
4834 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4835 else if (mplsoudp_encap_conf.select_ipv4)
4836 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4838 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4839 memcpy(eth.dst.addr_bytes,
4840 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4841 memcpy(eth.src.addr_bytes,
4842 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4843 memcpy(header, ð, sizeof(eth));
4844 header += sizeof(eth);
4845 if (mplsoudp_encap_conf.select_vlan) {
4846 if (mplsoudp_encap_conf.select_ipv4)
4847 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4849 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4850 memcpy(header, &vlan, sizeof(vlan));
4851 header += sizeof(vlan);
4853 if (mplsoudp_encap_conf.select_ipv4) {
4854 memcpy(header, &ipv4, sizeof(ipv4));
4855 header += sizeof(ipv4);
4857 memcpy(header, &ipv6, sizeof(ipv6));
4858 header += sizeof(ipv6);
4860 memcpy(header, &udp, sizeof(udp));
4861 header += sizeof(udp);
4862 memset(&mpls, 0, sizeof(mpls));
4863 memcpy(header, &mpls, sizeof(mpls));
4864 header += sizeof(mpls);
4865 action_decap_data->conf.size = header -
4866 action_decap_data->data;
4867 action->conf = &action_decap_data->conf;
4872 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
4873 const char *str, unsigned int len, void *buf,
4876 struct action_raw_decap_data *action_raw_decap_data;
4877 struct rte_flow_action *action;
4878 const struct arg *arg;
4879 struct buffer *out = buf;
4883 RTE_SET_USED(token);
4886 arg = ARGS_ENTRY_ARB_BOUNDED
4887 (offsetof(struct action_raw_decap_data, idx),
4888 sizeof(((struct action_raw_decap_data *)0)->idx),
4889 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4890 if (push_args(ctx, arg))
4892 ret = parse_int(ctx, token, str, len, NULL, 0);
4899 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4900 action_raw_decap_data = ctx->object;
4901 idx = action_raw_decap_data->idx;
4902 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
4903 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
4904 action->conf = &action_raw_decap_data->conf;
4910 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
4911 const char *str, unsigned int len, void *buf,
4914 struct action_raw_encap_data *action_raw_encap_data;
4915 struct rte_flow_action *action;
4916 const struct arg *arg;
4917 struct buffer *out = buf;
4921 RTE_SET_USED(token);
4924 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
4926 arg = ARGS_ENTRY_ARB_BOUNDED
4927 (offsetof(struct action_raw_encap_data, idx),
4928 sizeof(((struct action_raw_encap_data *)0)->idx),
4929 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4930 if (push_args(ctx, arg))
4932 ret = parse_int(ctx, token, str, len, NULL, 0);
4939 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4940 action_raw_encap_data = ctx->object;
4941 idx = action_raw_encap_data->idx;
4942 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
4943 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
4944 action_raw_encap_data->conf.preserve = NULL;
4945 action->conf = &action_raw_encap_data->conf;
4950 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4951 const char *str, unsigned int len, void *buf,
4954 struct buffer *out = buf;
4955 struct rte_flow_action *action;
4956 struct action_raw_encap_data *action_raw_encap_data = NULL;
4959 ret = parse_vc(ctx, token, str, len, buf, size);
4962 /* Nothing else to do if there is no buffer. */
4965 if (!out->args.vc.actions_n)
4967 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4968 /* Point to selected object. */
4969 ctx->object = out->args.vc.data;
4970 ctx->objmask = NULL;
4971 /* Copy the headers to the buffer. */
4972 action_raw_encap_data = ctx->object;
4973 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
4974 action_raw_encap_data->conf.preserve = NULL;
4975 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
4976 action->conf = &action_raw_encap_data->conf;
4981 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4982 const char *str, unsigned int len, void *buf,
4985 struct buffer *out = buf;
4986 struct rte_flow_action *action;
4987 struct action_raw_decap_data *action_raw_decap_data = NULL;
4990 ret = parse_vc(ctx, token, str, len, buf, size);
4993 /* Nothing else to do if there is no buffer. */
4996 if (!out->args.vc.actions_n)
4998 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4999 /* Point to selected object. */
5000 ctx->object = out->args.vc.data;
5001 ctx->objmask = NULL;
5002 /* Copy the headers to the buffer. */
5003 action_raw_decap_data = ctx->object;
5004 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
5005 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
5006 action->conf = &action_raw_decap_data->conf;
5011 parse_vc_action_set_meta(struct context *ctx, const struct token *token,
5012 const char *str, unsigned int len, void *buf,
5017 ret = parse_vc(ctx, token, str, len, buf, size);
5020 ret = rte_flow_dynf_metadata_register();
5026 /** Parse tokens for destroy command. */
5028 parse_destroy(struct context *ctx, const struct token *token,
5029 const char *str, unsigned int len,
5030 void *buf, unsigned int size)
5032 struct buffer *out = buf;
5034 /* Token name must match. */
5035 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5037 /* Nothing else to do if there is no buffer. */
5040 if (!out->command) {
5041 if (ctx->curr != DESTROY)
5043 if (sizeof(*out) > size)
5045 out->command = ctx->curr;
5048 ctx->objmask = NULL;
5049 out->args.destroy.rule =
5050 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5054 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
5055 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
5058 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
5059 ctx->objmask = NULL;
5063 /** Parse tokens for flush command. */
5065 parse_flush(struct context *ctx, const struct token *token,
5066 const char *str, unsigned int len,
5067 void *buf, unsigned int size)
5069 struct buffer *out = buf;
5071 /* Token name must match. */
5072 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5074 /* Nothing else to do if there is no buffer. */
5077 if (!out->command) {
5078 if (ctx->curr != FLUSH)
5080 if (sizeof(*out) > size)
5082 out->command = ctx->curr;
5085 ctx->objmask = NULL;
5090 /** Parse tokens for query command. */
5092 parse_query(struct context *ctx, const struct token *token,
5093 const char *str, unsigned int len,
5094 void *buf, unsigned int size)
5096 struct buffer *out = buf;
5098 /* Token name must match. */
5099 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5101 /* Nothing else to do if there is no buffer. */
5104 if (!out->command) {
5105 if (ctx->curr != QUERY)
5107 if (sizeof(*out) > size)
5109 out->command = ctx->curr;
5112 ctx->objmask = NULL;
5117 /** Parse action names. */
5119 parse_action(struct context *ctx, const struct token *token,
5120 const char *str, unsigned int len,
5121 void *buf, unsigned int size)
5123 struct buffer *out = buf;
5124 const struct arg *arg = pop_args(ctx);
5128 /* Argument is expected. */
5131 /* Parse action name. */
5132 for (i = 0; next_action[i]; ++i) {
5133 const struct parse_action_priv *priv;
5135 token = &token_list[next_action[i]];
5136 if (strcmp_partial(token->name, str, len))
5142 memcpy((uint8_t *)ctx->object + arg->offset,
5148 push_args(ctx, arg);
5152 /** Parse tokens for list command. */
5154 parse_list(struct context *ctx, const struct token *token,
5155 const char *str, unsigned int len,
5156 void *buf, unsigned int size)
5158 struct buffer *out = buf;
5160 /* Token name must match. */
5161 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5163 /* Nothing else to do if there is no buffer. */
5166 if (!out->command) {
5167 if (ctx->curr != LIST)
5169 if (sizeof(*out) > size)
5171 out->command = ctx->curr;
5174 ctx->objmask = NULL;
5175 out->args.list.group =
5176 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5180 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
5181 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
5184 ctx->object = out->args.list.group + out->args.list.group_n++;
5185 ctx->objmask = NULL;
5189 /** Parse tokens for isolate command. */
5191 parse_isolate(struct context *ctx, const struct token *token,
5192 const char *str, unsigned int len,
5193 void *buf, unsigned int size)
5195 struct buffer *out = buf;
5197 /* Token name must match. */
5198 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5200 /* Nothing else to do if there is no buffer. */
5203 if (!out->command) {
5204 if (ctx->curr != ISOLATE)
5206 if (sizeof(*out) > size)
5208 out->command = ctx->curr;
5211 ctx->objmask = NULL;
5217 * Parse signed/unsigned integers 8 to 64-bit long.
5219 * Last argument (ctx->args) is retrieved to determine integer type and
5223 parse_int(struct context *ctx, const struct token *token,
5224 const char *str, unsigned int len,
5225 void *buf, unsigned int size)
5227 const struct arg *arg = pop_args(ctx);
5232 /* Argument is expected. */
5237 (uintmax_t)strtoimax(str, &end, 0) :
5238 strtoumax(str, &end, 0);
5239 if (errno || (size_t)(end - str) != len)
5242 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
5243 (intmax_t)u > (intmax_t)arg->max)) ||
5244 (!arg->sign && (u < arg->min || u > arg->max))))
5249 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
5250 !arg_entry_bf_fill(ctx->objmask, -1, arg))
5254 buf = (uint8_t *)ctx->object + arg->offset;
5256 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
5260 case sizeof(uint8_t):
5261 *(uint8_t *)buf = u;
5263 case sizeof(uint16_t):
5264 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
5266 case sizeof(uint8_t [3]):
5267 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5269 ((uint8_t *)buf)[0] = u;
5270 ((uint8_t *)buf)[1] = u >> 8;
5271 ((uint8_t *)buf)[2] = u >> 16;
5275 ((uint8_t *)buf)[0] = u >> 16;
5276 ((uint8_t *)buf)[1] = u >> 8;
5277 ((uint8_t *)buf)[2] = u;
5279 case sizeof(uint32_t):
5280 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
5282 case sizeof(uint64_t):
5283 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
5288 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
5290 buf = (uint8_t *)ctx->objmask + arg->offset;
5295 push_args(ctx, arg);
5302 * Three arguments (ctx->args) are retrieved from the stack to store data,
5303 * its actual length and address (in that order).
5306 parse_string(struct context *ctx, const struct token *token,
5307 const char *str, unsigned int len,
5308 void *buf, unsigned int size)
5310 const struct arg *arg_data = pop_args(ctx);
5311 const struct arg *arg_len = pop_args(ctx);
5312 const struct arg *arg_addr = pop_args(ctx);
5313 char tmp[16]; /* Ought to be enough. */
5316 /* Arguments are expected. */
5320 push_args(ctx, arg_data);
5324 push_args(ctx, arg_len);
5325 push_args(ctx, arg_data);
5328 size = arg_data->size;
5329 /* Bit-mask fill is not supported. */
5330 if (arg_data->mask || size < len)
5334 /* Let parse_int() fill length information first. */
5335 ret = snprintf(tmp, sizeof(tmp), "%u", len);
5338 push_args(ctx, arg_len);
5339 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5344 buf = (uint8_t *)ctx->object + arg_data->offset;
5345 /* Output buffer is not necessarily NUL-terminated. */
5346 memcpy(buf, str, len);
5347 memset((uint8_t *)buf + len, 0x00, size - len);
5349 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
5350 /* Save address if requested. */
5351 if (arg_addr->size) {
5352 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5354 (uint8_t *)ctx->object + arg_data->offset
5358 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5360 (uint8_t *)ctx->objmask + arg_data->offset
5366 push_args(ctx, arg_addr);
5367 push_args(ctx, arg_len);
5368 push_args(ctx, arg_data);
5373 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
5379 /* Check input parameters */
5380 if ((src == NULL) ||
5386 /* Convert chars to bytes */
5387 for (i = 0, len = 0; i < *size; i += 2) {
5388 snprintf(tmp, 3, "%s", src + i);
5389 dst[len++] = strtoul(tmp, &c, 16);
5404 parse_hex(struct context *ctx, const struct token *token,
5405 const char *str, unsigned int len,
5406 void *buf, unsigned int size)
5408 const struct arg *arg_data = pop_args(ctx);
5409 const struct arg *arg_len = pop_args(ctx);
5410 const struct arg *arg_addr = pop_args(ctx);
5411 char tmp[16]; /* Ought to be enough. */
5413 unsigned int hexlen = len;
5414 unsigned int length = 256;
5415 uint8_t hex_tmp[length];
5417 /* Arguments are expected. */
5421 push_args(ctx, arg_data);
5425 push_args(ctx, arg_len);
5426 push_args(ctx, arg_data);
5429 size = arg_data->size;
5430 /* Bit-mask fill is not supported. */
5436 /* translate bytes string to array. */
5437 if (str[0] == '0' && ((str[1] == 'x') ||
5442 if (hexlen > length)
5444 ret = parse_hex_string(str, hex_tmp, &hexlen);
5447 /* Let parse_int() fill length information first. */
5448 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5451 push_args(ctx, arg_len);
5452 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5457 buf = (uint8_t *)ctx->object + arg_data->offset;
5458 /* Output buffer is not necessarily NUL-terminated. */
5459 memcpy(buf, hex_tmp, hexlen);
5460 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5462 memset((uint8_t *)ctx->objmask + arg_data->offset,
5464 /* Save address if requested. */
5465 if (arg_addr->size) {
5466 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5468 (uint8_t *)ctx->object + arg_data->offset
5472 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5474 (uint8_t *)ctx->objmask + arg_data->offset
5480 push_args(ctx, arg_addr);
5481 push_args(ctx, arg_len);
5482 push_args(ctx, arg_data);
5488 * Parse a MAC address.
5490 * Last argument (ctx->args) is retrieved to determine storage size and
5494 parse_mac_addr(struct context *ctx, const struct token *token,
5495 const char *str, unsigned int len,
5496 void *buf, unsigned int size)
5498 const struct arg *arg = pop_args(ctx);
5499 struct rte_ether_addr tmp;
5503 /* Argument is expected. */
5507 /* Bit-mask fill is not supported. */
5508 if (arg->mask || size != sizeof(tmp))
5510 /* Only network endian is supported. */
5513 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5514 if (ret < 0 || (unsigned int)ret != len)
5518 buf = (uint8_t *)ctx->object + arg->offset;
5519 memcpy(buf, &tmp, size);
5521 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5524 push_args(ctx, arg);
5529 * Parse an IPv4 address.
5531 * Last argument (ctx->args) is retrieved to determine storage size and
5535 parse_ipv4_addr(struct context *ctx, const struct token *token,
5536 const char *str, unsigned int len,
5537 void *buf, unsigned int size)
5539 const struct arg *arg = pop_args(ctx);
5544 /* Argument is expected. */
5548 /* Bit-mask fill is not supported. */
5549 if (arg->mask || size != sizeof(tmp))
5551 /* Only network endian is supported. */
5554 memcpy(str2, str, len);
5556 ret = inet_pton(AF_INET, str2, &tmp);
5558 /* Attempt integer parsing. */
5559 push_args(ctx, arg);
5560 return parse_int(ctx, token, str, len, buf, size);
5564 buf = (uint8_t *)ctx->object + arg->offset;
5565 memcpy(buf, &tmp, size);
5567 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5570 push_args(ctx, arg);
5575 * Parse an IPv6 address.
5577 * Last argument (ctx->args) is retrieved to determine storage size and
5581 parse_ipv6_addr(struct context *ctx, const struct token *token,
5582 const char *str, unsigned int len,
5583 void *buf, unsigned int size)
5585 const struct arg *arg = pop_args(ctx);
5587 struct in6_addr tmp;
5591 /* Argument is expected. */
5595 /* Bit-mask fill is not supported. */
5596 if (arg->mask || size != sizeof(tmp))
5598 /* Only network endian is supported. */
5601 memcpy(str2, str, len);
5603 ret = inet_pton(AF_INET6, str2, &tmp);
5608 buf = (uint8_t *)ctx->object + arg->offset;
5609 memcpy(buf, &tmp, size);
5611 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5614 push_args(ctx, arg);
5618 /** Boolean values (even indices stand for false). */
5619 static const char *const boolean_name[] = {
5629 * Parse a boolean value.
5631 * Last argument (ctx->args) is retrieved to determine storage size and
5635 parse_boolean(struct context *ctx, const struct token *token,
5636 const char *str, unsigned int len,
5637 void *buf, unsigned int size)
5639 const struct arg *arg = pop_args(ctx);
5643 /* Argument is expected. */
5646 for (i = 0; boolean_name[i]; ++i)
5647 if (!strcmp_partial(boolean_name[i], str, len))
5649 /* Process token as integer. */
5650 if (boolean_name[i])
5651 str = i & 1 ? "1" : "0";
5652 push_args(ctx, arg);
5653 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5654 return ret > 0 ? (int)len : ret;
5657 /** Parse port and update context. */
5659 parse_port(struct context *ctx, const struct token *token,
5660 const char *str, unsigned int len,
5661 void *buf, unsigned int size)
5663 struct buffer *out = &(struct buffer){ .port = 0 };
5671 ctx->objmask = NULL;
5672 size = sizeof(*out);
5674 ret = parse_int(ctx, token, str, len, out, size);
5676 ctx->port = out->port;
5682 /** Parse set command, initialize output buffer for subsequent tokens. */
5684 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5685 const char *str, unsigned int len,
5686 void *buf, unsigned int size)
5688 struct buffer *out = buf;
5690 /* Token name must match. */
5691 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5693 /* Nothing else to do if there is no buffer. */
5696 /* Make sure buffer is large enough. */
5697 if (size < sizeof(*out))
5700 ctx->objmask = NULL;
5704 out->command = ctx->curr;
5709 * Parse set raw_encap/raw_decap command,
5710 * initialize output buffer for subsequent tokens.
5713 parse_set_init(struct context *ctx, const struct token *token,
5714 const char *str, unsigned int len,
5715 void *buf, unsigned int size)
5717 struct buffer *out = buf;
5719 /* Token name must match. */
5720 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5722 /* Nothing else to do if there is no buffer. */
5725 /* Make sure buffer is large enough. */
5726 if (size < sizeof(*out))
5728 /* Initialize buffer. */
5729 memset(out, 0x00, sizeof(*out));
5730 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5733 ctx->objmask = NULL;
5734 if (!out->command) {
5735 if (ctx->curr != SET)
5737 if (sizeof(*out) > size)
5739 out->command = ctx->curr;
5740 out->args.vc.data = (uint8_t *)out + size;
5741 /* All we need is pattern */
5742 out->args.vc.pattern =
5743 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5745 ctx->object = out->args.vc.pattern;
5750 /** No completion. */
5752 comp_none(struct context *ctx, const struct token *token,
5753 unsigned int ent, char *buf, unsigned int size)
5763 /** Complete boolean values. */
5765 comp_boolean(struct context *ctx, const struct token *token,
5766 unsigned int ent, char *buf, unsigned int size)
5772 for (i = 0; boolean_name[i]; ++i)
5773 if (buf && i == ent)
5774 return strlcpy(buf, boolean_name[i], size);
5780 /** Complete action names. */
5782 comp_action(struct context *ctx, const struct token *token,
5783 unsigned int ent, char *buf, unsigned int size)
5789 for (i = 0; next_action[i]; ++i)
5790 if (buf && i == ent)
5791 return strlcpy(buf, token_list[next_action[i]].name,
5798 /** Complete available ports. */
5800 comp_port(struct context *ctx, const struct token *token,
5801 unsigned int ent, char *buf, unsigned int size)
5808 RTE_ETH_FOREACH_DEV(p) {
5809 if (buf && i == ent)
5810 return snprintf(buf, size, "%u", p);
5818 /** Complete available rule IDs. */
5820 comp_rule_id(struct context *ctx, const struct token *token,
5821 unsigned int ent, char *buf, unsigned int size)
5824 struct rte_port *port;
5825 struct port_flow *pf;
5828 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5829 ctx->port == (portid_t)RTE_PORT_ALL)
5831 port = &ports[ctx->port];
5832 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5833 if (buf && i == ent)
5834 return snprintf(buf, size, "%u", pf->id);
5842 /** Complete type field for RSS action. */
5844 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5845 unsigned int ent, char *buf, unsigned int size)
5851 for (i = 0; rss_type_table[i].str; ++i)
5856 return strlcpy(buf, rss_type_table[ent].str, size);
5858 return snprintf(buf, size, "end");
5862 /** Complete queue field for RSS action. */
5864 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5865 unsigned int ent, char *buf, unsigned int size)
5872 return snprintf(buf, size, "%u", ent);
5874 return snprintf(buf, size, "end");
5878 /** Complete index number for set raw_encap/raw_decap commands. */
5880 comp_set_raw_index(struct context *ctx, const struct token *token,
5881 unsigned int ent, char *buf, unsigned int size)
5887 RTE_SET_USED(token);
5888 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
5889 if (buf && idx == ent)
5890 return snprintf(buf, size, "%u", idx);
5896 /** Internal context. */
5897 static struct context cmd_flow_context;
5899 /** Global parser instance (cmdline API). */
5900 cmdline_parse_inst_t cmd_flow;
5901 cmdline_parse_inst_t cmd_set_raw;
5903 /** Initialize context. */
5905 cmd_flow_context_init(struct context *ctx)
5907 /* A full memset() is not necessary. */
5917 ctx->objmask = NULL;
5920 /** Parse a token (cmdline API). */
5922 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5925 struct context *ctx = &cmd_flow_context;
5926 const struct token *token;
5927 const enum index *list;
5932 token = &token_list[ctx->curr];
5933 /* Check argument length. */
5936 for (len = 0; src[len]; ++len)
5937 if (src[len] == '#' || isspace(src[len]))
5941 /* Last argument and EOL detection. */
5942 for (i = len; src[i]; ++i)
5943 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5945 else if (!isspace(src[i])) {
5950 if (src[i] == '\r' || src[i] == '\n') {
5954 /* Initialize context if necessary. */
5955 if (!ctx->next_num) {
5958 ctx->next[ctx->next_num++] = token->next[0];
5960 /* Process argument through candidates. */
5961 ctx->prev = ctx->curr;
5962 list = ctx->next[ctx->next_num - 1];
5963 for (i = 0; list[i]; ++i) {
5964 const struct token *next = &token_list[list[i]];
5967 ctx->curr = list[i];
5969 tmp = next->call(ctx, next, src, len, result, size);
5971 tmp = parse_default(ctx, next, src, len, result, size);
5972 if (tmp == -1 || tmp != len)
5980 /* Push subsequent tokens if any. */
5982 for (i = 0; token->next[i]; ++i) {
5983 if (ctx->next_num == RTE_DIM(ctx->next))
5985 ctx->next[ctx->next_num++] = token->next[i];
5987 /* Push arguments if any. */
5989 for (i = 0; token->args[i]; ++i) {
5990 if (ctx->args_num == RTE_DIM(ctx->args))
5992 ctx->args[ctx->args_num++] = token->args[i];
5997 /** Return number of completion entries (cmdline API). */
5999 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
6001 struct context *ctx = &cmd_flow_context;
6002 const struct token *token = &token_list[ctx->curr];
6003 const enum index *list;
6007 /* Count number of tokens in current list. */
6009 list = ctx->next[ctx->next_num - 1];
6011 list = token->next[0];
6012 for (i = 0; list[i]; ++i)
6017 * If there is a single token, use its completion callback, otherwise
6018 * return the number of entries.
6020 token = &token_list[list[0]];
6021 if (i == 1 && token->comp) {
6022 /* Save index for cmd_flow_get_help(). */
6023 ctx->prev = list[0];
6024 return token->comp(ctx, token, 0, NULL, 0);
6029 /** Return a completion entry (cmdline API). */
6031 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
6032 char *dst, unsigned int size)
6034 struct context *ctx = &cmd_flow_context;
6035 const struct token *token = &token_list[ctx->curr];
6036 const enum index *list;
6040 /* Count number of tokens in current list. */
6042 list = ctx->next[ctx->next_num - 1];
6044 list = token->next[0];
6045 for (i = 0; list[i]; ++i)
6049 /* If there is a single token, use its completion callback. */
6050 token = &token_list[list[0]];
6051 if (i == 1 && token->comp) {
6052 /* Save index for cmd_flow_get_help(). */
6053 ctx->prev = list[0];
6054 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
6056 /* Otherwise make sure the index is valid and use defaults. */
6059 token = &token_list[list[index]];
6060 strlcpy(dst, token->name, size);
6061 /* Save index for cmd_flow_get_help(). */
6062 ctx->prev = list[index];
6066 /** Populate help strings for current token (cmdline API). */
6068 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
6070 struct context *ctx = &cmd_flow_context;
6071 const struct token *token = &token_list[ctx->prev];
6076 /* Set token type and update global help with details. */
6077 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
6079 cmd_flow.help_str = token->help;
6081 cmd_flow.help_str = token->name;
6085 /** Token definition template (cmdline API). */
6086 static struct cmdline_token_hdr cmd_flow_token_hdr = {
6087 .ops = &(struct cmdline_token_ops){
6088 .parse = cmd_flow_parse,
6089 .complete_get_nb = cmd_flow_complete_get_nb,
6090 .complete_get_elt = cmd_flow_complete_get_elt,
6091 .get_help = cmd_flow_get_help,
6096 /** Populate the next dynamic token. */
6098 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
6099 cmdline_parse_token_hdr_t **hdr_inst)
6101 struct context *ctx = &cmd_flow_context;
6103 /* Always reinitialize context before requesting the first token. */
6104 if (!(hdr_inst - cmd_flow.tokens))
6105 cmd_flow_context_init(ctx);
6106 /* Return NULL when no more tokens are expected. */
6107 if (!ctx->next_num && ctx->curr) {
6111 /* Determine if command should end here. */
6112 if (ctx->eol && ctx->last && ctx->next_num) {
6113 const enum index *list = ctx->next[ctx->next_num - 1];
6116 for (i = 0; list[i]; ++i) {
6123 *hdr = &cmd_flow_token_hdr;
6126 /** Dispatch parsed buffer to function calls. */
6128 cmd_flow_parsed(const struct buffer *in)
6130 switch (in->command) {
6132 port_flow_validate(in->port, &in->args.vc.attr,
6133 in->args.vc.pattern, in->args.vc.actions);
6136 port_flow_create(in->port, &in->args.vc.attr,
6137 in->args.vc.pattern, in->args.vc.actions);
6140 port_flow_destroy(in->port, in->args.destroy.rule_n,
6141 in->args.destroy.rule);
6144 port_flow_flush(in->port);
6147 port_flow_query(in->port, in->args.query.rule,
6148 &in->args.query.action);
6151 port_flow_list(in->port, in->args.list.group_n,
6152 in->args.list.group);
6155 port_flow_isolate(in->port, in->args.isolate.set);
6162 /** Token generator and output processing callback (cmdline API). */
6164 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
6167 cmd_flow_tok(arg0, arg2);
6169 cmd_flow_parsed(arg0);
6172 /** Global parser instance (cmdline API). */
6173 cmdline_parse_inst_t cmd_flow = {
6175 .data = NULL, /**< Unused. */
6176 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6179 }, /**< Tokens are returned by cmd_flow_tok(). */
6182 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
6185 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
6187 struct rte_flow_item_ipv4 *ipv4;
6188 struct rte_flow_item_eth *eth;
6189 struct rte_flow_item_ipv6 *ipv6;
6190 struct rte_flow_item_vxlan *vxlan;
6191 struct rte_flow_item_vxlan_gpe *gpe;
6192 struct rte_flow_item_nvgre *nvgre;
6193 uint32_t ipv6_vtc_flow;
6195 switch (item->type) {
6196 case RTE_FLOW_ITEM_TYPE_ETH:
6197 eth = (struct rte_flow_item_eth *)buf;
6199 eth->type = rte_cpu_to_be_16(next_proto);
6201 case RTE_FLOW_ITEM_TYPE_IPV4:
6202 ipv4 = (struct rte_flow_item_ipv4 *)buf;
6203 ipv4->hdr.version_ihl = 0x45;
6204 if (next_proto && ipv4->hdr.next_proto_id == 0)
6205 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
6207 case RTE_FLOW_ITEM_TYPE_IPV6:
6208 ipv6 = (struct rte_flow_item_ipv6 *)buf;
6209 if (next_proto && ipv6->hdr.proto == 0)
6210 ipv6->hdr.proto = (uint8_t)next_proto;
6211 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
6212 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
6213 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
6214 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
6216 case RTE_FLOW_ITEM_TYPE_VXLAN:
6217 vxlan = (struct rte_flow_item_vxlan *)buf;
6218 vxlan->flags = 0x08;
6220 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6221 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
6224 case RTE_FLOW_ITEM_TYPE_NVGRE:
6225 nvgre = (struct rte_flow_item_nvgre *)buf;
6226 nvgre->protocol = rte_cpu_to_be_16(0x6558);
6227 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
6234 /** Helper of get item's default mask. */
6236 flow_item_default_mask(const struct rte_flow_item *item)
6238 const void *mask = NULL;
6239 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6241 switch (item->type) {
6242 case RTE_FLOW_ITEM_TYPE_ANY:
6243 mask = &rte_flow_item_any_mask;
6245 case RTE_FLOW_ITEM_TYPE_VF:
6246 mask = &rte_flow_item_vf_mask;
6248 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6249 mask = &rte_flow_item_port_id_mask;
6251 case RTE_FLOW_ITEM_TYPE_RAW:
6252 mask = &rte_flow_item_raw_mask;
6254 case RTE_FLOW_ITEM_TYPE_ETH:
6255 mask = &rte_flow_item_eth_mask;
6257 case RTE_FLOW_ITEM_TYPE_VLAN:
6258 mask = &rte_flow_item_vlan_mask;
6260 case RTE_FLOW_ITEM_TYPE_IPV4:
6261 mask = &rte_flow_item_ipv4_mask;
6263 case RTE_FLOW_ITEM_TYPE_IPV6:
6264 mask = &rte_flow_item_ipv6_mask;
6266 case RTE_FLOW_ITEM_TYPE_ICMP:
6267 mask = &rte_flow_item_icmp_mask;
6269 case RTE_FLOW_ITEM_TYPE_UDP:
6270 mask = &rte_flow_item_udp_mask;
6272 case RTE_FLOW_ITEM_TYPE_TCP:
6273 mask = &rte_flow_item_tcp_mask;
6275 case RTE_FLOW_ITEM_TYPE_SCTP:
6276 mask = &rte_flow_item_sctp_mask;
6278 case RTE_FLOW_ITEM_TYPE_VXLAN:
6279 mask = &rte_flow_item_vxlan_mask;
6281 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6282 mask = &rte_flow_item_vxlan_gpe_mask;
6284 case RTE_FLOW_ITEM_TYPE_E_TAG:
6285 mask = &rte_flow_item_e_tag_mask;
6287 case RTE_FLOW_ITEM_TYPE_NVGRE:
6288 mask = &rte_flow_item_nvgre_mask;
6290 case RTE_FLOW_ITEM_TYPE_MPLS:
6291 mask = &rte_flow_item_mpls_mask;
6293 case RTE_FLOW_ITEM_TYPE_GRE:
6294 mask = &rte_flow_item_gre_mask;
6296 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6297 mask = &gre_key_default_mask;
6299 case RTE_FLOW_ITEM_TYPE_META:
6300 mask = &rte_flow_item_meta_mask;
6302 case RTE_FLOW_ITEM_TYPE_FUZZY:
6303 mask = &rte_flow_item_fuzzy_mask;
6305 case RTE_FLOW_ITEM_TYPE_GTP:
6306 mask = &rte_flow_item_gtp_mask;
6308 case RTE_FLOW_ITEM_TYPE_ESP:
6309 mask = &rte_flow_item_esp_mask;
6311 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6312 mask = &rte_flow_item_gtp_psc_mask;
6314 case RTE_FLOW_ITEM_TYPE_GENEVE:
6315 mask = &rte_flow_item_geneve_mask;
6317 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
6318 mask = &rte_flow_item_pppoe_proto_id_mask;
6320 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
6321 mask = &rte_flow_item_l2tpv3oip_mask;
6331 /** Dispatch parsed buffer to function calls. */
6333 cmd_set_raw_parsed(const struct buffer *in)
6335 uint32_t n = in->args.vc.pattern_n;
6337 struct rte_flow_item *item = NULL;
6339 uint8_t *data = NULL;
6340 uint8_t *data_tail = NULL;
6341 size_t *total_size = NULL;
6342 uint16_t upper_layer = 0;
6344 uint16_t idx = in->port; /* We borrow port field as index */
6346 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
6347 in->command == SET_RAW_DECAP);
6348 if (in->command == SET_RAW_ENCAP) {
6349 total_size = &raw_encap_confs[idx].size;
6350 data = (uint8_t *)&raw_encap_confs[idx].data;
6352 total_size = &raw_decap_confs[idx].size;
6353 data = (uint8_t *)&raw_decap_confs[idx].data;
6356 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6357 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
6358 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
6359 for (i = n - 1 ; i >= 0; --i) {
6360 item = in->args.vc.pattern + i;
6361 if (item->spec == NULL)
6362 item->spec = flow_item_default_mask(item);
6363 switch (item->type) {
6364 case RTE_FLOW_ITEM_TYPE_ETH:
6365 size = sizeof(struct rte_flow_item_eth);
6367 case RTE_FLOW_ITEM_TYPE_VLAN:
6368 size = sizeof(struct rte_flow_item_vlan);
6369 proto = RTE_ETHER_TYPE_VLAN;
6371 case RTE_FLOW_ITEM_TYPE_IPV4:
6372 size = sizeof(struct rte_flow_item_ipv4);
6373 proto = RTE_ETHER_TYPE_IPV4;
6375 case RTE_FLOW_ITEM_TYPE_IPV6:
6376 size = sizeof(struct rte_flow_item_ipv6);
6377 proto = RTE_ETHER_TYPE_IPV6;
6379 case RTE_FLOW_ITEM_TYPE_UDP:
6380 size = sizeof(struct rte_flow_item_udp);
6383 case RTE_FLOW_ITEM_TYPE_TCP:
6384 size = sizeof(struct rte_flow_item_tcp);
6387 case RTE_FLOW_ITEM_TYPE_VXLAN:
6388 size = sizeof(struct rte_flow_item_vxlan);
6390 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6391 size = sizeof(struct rte_flow_item_vxlan_gpe);
6393 case RTE_FLOW_ITEM_TYPE_GRE:
6394 size = sizeof(struct rte_flow_item_gre);
6397 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6398 size = sizeof(rte_be32_t);
6401 case RTE_FLOW_ITEM_TYPE_MPLS:
6402 size = sizeof(struct rte_flow_item_mpls);
6405 case RTE_FLOW_ITEM_TYPE_NVGRE:
6406 size = sizeof(struct rte_flow_item_nvgre);
6409 case RTE_FLOW_ITEM_TYPE_GENEVE:
6410 size = sizeof(struct rte_flow_item_geneve);
6412 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
6413 size = sizeof(struct rte_flow_item_l2tpv3oip);
6417 printf("Error - Not supported item\n");
6419 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6422 *total_size += size;
6423 rte_memcpy(data_tail - (*total_size), item->spec, size);
6424 /* update some fields which cannot be set by cmdline */
6425 update_fields((data_tail - (*total_size)), item,
6427 upper_layer = proto;
6429 if (verbose_level & 0x1)
6430 printf("total data size is %zu\n", (*total_size));
6431 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
6432 memmove(data, (data_tail - (*total_size)), *total_size);
6435 /** Populate help strings for current token (cmdline API). */
6437 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6440 struct context *ctx = &cmd_flow_context;
6441 const struct token *token = &token_list[ctx->prev];
6446 /* Set token type and update global help with details. */
6447 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6449 cmd_set_raw.help_str = token->help;
6451 cmd_set_raw.help_str = token->name;
6455 /** Token definition template (cmdline API). */
6456 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6457 .ops = &(struct cmdline_token_ops){
6458 .parse = cmd_flow_parse,
6459 .complete_get_nb = cmd_flow_complete_get_nb,
6460 .complete_get_elt = cmd_flow_complete_get_elt,
6461 .get_help = cmd_set_raw_get_help,
6466 /** Populate the next dynamic token. */
6468 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6469 cmdline_parse_token_hdr_t **hdr_inst)
6471 struct context *ctx = &cmd_flow_context;
6473 /* Always reinitialize context before requesting the first token. */
6474 if (!(hdr_inst - cmd_set_raw.tokens)) {
6475 cmd_flow_context_init(ctx);
6476 ctx->curr = START_SET;
6478 /* Return NULL when no more tokens are expected. */
6479 if (!ctx->next_num && (ctx->curr != START_SET)) {
6483 /* Determine if command should end here. */
6484 if (ctx->eol && ctx->last && ctx->next_num) {
6485 const enum index *list = ctx->next[ctx->next_num - 1];
6488 for (i = 0; list[i]; ++i) {
6495 *hdr = &cmd_set_raw_token_hdr;
6498 /** Token generator and output processing callback (cmdline API). */
6500 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6503 cmd_set_raw_tok(arg0, arg2);
6505 cmd_set_raw_parsed(arg0);
6508 /** Global parser instance (cmdline API). */
6509 cmdline_parse_inst_t cmd_set_raw = {
6510 .f = cmd_set_raw_cb,
6511 .data = NULL, /**< Unused. */
6512 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6515 }, /**< Tokens are returned by cmd_flow_tok(). */
6518 /* *** display raw_encap/raw_decap buf */
6519 struct cmd_show_set_raw_result {
6520 cmdline_fixed_string_t cmd_show;
6521 cmdline_fixed_string_t cmd_what;
6522 cmdline_fixed_string_t cmd_all;
6527 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
6529 struct cmd_show_set_raw_result *res = parsed_result;
6530 uint16_t index = res->cmd_index;
6532 uint8_t *raw_data = NULL;
6533 size_t raw_size = 0;
6534 char title[16] = {0};
6538 if (!strcmp(res->cmd_all, "all")) {
6541 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
6542 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
6546 if (!strcmp(res->cmd_what, "raw_encap")) {
6547 raw_data = (uint8_t *)&raw_encap_confs[index].data;
6548 raw_size = raw_encap_confs[index].size;
6549 snprintf(title, 16, "\nindex: %u", index);
6550 rte_hexdump(stdout, title, raw_data, raw_size);
6552 raw_data = (uint8_t *)&raw_decap_confs[index].data;
6553 raw_size = raw_decap_confs[index].size;
6554 snprintf(title, 16, "\nindex: %u", index);
6555 rte_hexdump(stdout, title, raw_data, raw_size);
6557 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
6560 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
6561 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6563 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
6564 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6565 cmd_what, "raw_encap#raw_decap");
6566 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
6567 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
6569 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
6570 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6572 cmdline_parse_inst_t cmd_show_set_raw = {
6573 .f = cmd_show_set_raw_parsed,
6575 .help_str = "show <raw_encap|raw_decap> <index>",
6577 (void *)&cmd_show_set_raw_cmd_show,
6578 (void *)&cmd_show_set_raw_cmd_what,
6579 (void *)&cmd_show_set_raw_cmd_index,
6583 cmdline_parse_inst_t cmd_show_set_raw_all = {
6584 .f = cmd_show_set_raw_parsed,
6586 .help_str = "show <raw_encap|raw_decap> all",
6588 (void *)&cmd_show_set_raw_cmd_show,
6589 (void *)&cmd_show_set_raw_cmd_what,
6590 (void *)&cmd_show_set_raw_cmd_all,