1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
26 #include <rte_vxlan.h>
30 /** Parser token indices. */
55 /* Top-level command. */
57 /* Sub-leve commands. */
64 /* Top-level command. */
66 /* Sub-level commands. */
79 /* Tunnel arguments. */
86 /* Destroy arguments. */
89 /* Query arguments. */
95 /* Destroy aged flow arguments. */
98 /* Validate/create arguments. */
107 /* Shared action arguments */
108 SHARED_ACTION_CREATE,
109 SHARED_ACTION_UPDATE,
110 SHARED_ACTION_DESTROY,
113 /* Shared action create arguments */
114 SHARED_ACTION_CREATE_ID,
115 SHARED_ACTION_INGRESS,
116 SHARED_ACTION_EGRESS,
119 /* Shared action destroy arguments */
120 SHARED_ACTION_DESTROY_ID,
122 /* Validate/create pattern. */
160 ITEM_VLAN_INNER_TYPE,
161 ITEM_VLAN_HAS_MORE_VLAN,
164 ITEM_IPV4_FRAGMENT_OFFSET,
176 ITEM_IPV6_HAS_FRAG_EXT,
197 ITEM_E_TAG_GRP_ECID_B,
206 ITEM_GRE_C_RSVD0_VER,
224 ITEM_ARP_ETH_IPV4_SHA,
225 ITEM_ARP_ETH_IPV4_SPA,
226 ITEM_ARP_ETH_IPV4_THA,
227 ITEM_ARP_ETH_IPV4_TPA,
229 ITEM_IPV6_EXT_NEXT_HDR,
231 ITEM_IPV6_FRAG_EXT_NEXT_HDR,
232 ITEM_IPV6_FRAG_EXT_FRAG_DATA,
237 ITEM_ICMP6_ND_NS_TARGET_ADDR,
239 ITEM_ICMP6_ND_NA_TARGET_ADDR,
241 ITEM_ICMP6_ND_OPT_TYPE,
242 ITEM_ICMP6_ND_OPT_SLA_ETH,
243 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
244 ITEM_ICMP6_ND_OPT_TLA_ETH,
245 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
258 ITEM_HIGIG2_CLASSIFICATION,
264 ITEM_L2TPV3OIP_SESSION_ID,
274 ITEM_ECPRI_COMMON_TYPE,
275 ITEM_ECPRI_COMMON_TYPE_IQ_DATA,
276 ITEM_ECPRI_COMMON_TYPE_RTC_CTRL,
277 ITEM_ECPRI_COMMON_TYPE_DLY_MSR,
278 ITEM_ECPRI_MSG_IQ_DATA_PCID,
279 ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
280 ITEM_ECPRI_MSG_DLY_MSR_MSRID,
282 /* Validate/create actions. */
302 ACTION_RSS_FUNC_DEFAULT,
303 ACTION_RSS_FUNC_TOEPLITZ,
304 ACTION_RSS_FUNC_SIMPLE_XOR,
305 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
317 ACTION_PHY_PORT_ORIGINAL,
318 ACTION_PHY_PORT_INDEX,
320 ACTION_PORT_ID_ORIGINAL,
324 ACTION_OF_SET_MPLS_TTL,
325 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
326 ACTION_OF_DEC_MPLS_TTL,
327 ACTION_OF_SET_NW_TTL,
328 ACTION_OF_SET_NW_TTL_NW_TTL,
329 ACTION_OF_DEC_NW_TTL,
330 ACTION_OF_COPY_TTL_OUT,
331 ACTION_OF_COPY_TTL_IN,
334 ACTION_OF_PUSH_VLAN_ETHERTYPE,
335 ACTION_OF_SET_VLAN_VID,
336 ACTION_OF_SET_VLAN_VID_VLAN_VID,
337 ACTION_OF_SET_VLAN_PCP,
338 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
340 ACTION_OF_POP_MPLS_ETHERTYPE,
342 ACTION_OF_PUSH_MPLS_ETHERTYPE,
349 ACTION_MPLSOGRE_ENCAP,
350 ACTION_MPLSOGRE_DECAP,
351 ACTION_MPLSOUDP_ENCAP,
352 ACTION_MPLSOUDP_DECAP,
354 ACTION_SET_IPV4_SRC_IPV4_SRC,
356 ACTION_SET_IPV4_DST_IPV4_DST,
358 ACTION_SET_IPV6_SRC_IPV6_SRC,
360 ACTION_SET_IPV6_DST_IPV6_DST,
362 ACTION_SET_TP_SRC_TP_SRC,
364 ACTION_SET_TP_DST_TP_DST,
370 ACTION_SET_MAC_SRC_MAC_SRC,
372 ACTION_SET_MAC_DST_MAC_DST,
374 ACTION_INC_TCP_SEQ_VALUE,
376 ACTION_DEC_TCP_SEQ_VALUE,
378 ACTION_INC_TCP_ACK_VALUE,
380 ACTION_DEC_TCP_ACK_VALUE,
383 ACTION_RAW_ENCAP_INDEX,
384 ACTION_RAW_ENCAP_INDEX_VALUE,
385 ACTION_RAW_DECAP_INDEX,
386 ACTION_RAW_DECAP_INDEX_VALUE,
389 ACTION_SET_TAG_INDEX,
392 ACTION_SET_META_DATA,
393 ACTION_SET_META_MASK,
394 ACTION_SET_IPV4_DSCP,
395 ACTION_SET_IPV4_DSCP_VALUE,
396 ACTION_SET_IPV6_DSCP,
397 ACTION_SET_IPV6_DSCP_VALUE,
403 ACTION_SAMPLE_INDEX_VALUE,
405 SHARED_ACTION_ID2PTR,
408 /** Maximum size for pattern in struct rte_flow_item_raw. */
409 #define ITEM_RAW_PATTERN_SIZE 40
411 /** Storage size for struct rte_flow_item_raw including pattern. */
412 #define ITEM_RAW_SIZE \
413 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
415 /** Maximum number of queue indices in struct rte_flow_action_rss. */
416 #define ACTION_RSS_QUEUE_NUM 128
418 /** Storage for struct rte_flow_action_rss including external data. */
419 struct action_rss_data {
420 struct rte_flow_action_rss conf;
421 uint8_t key[RSS_HASH_KEY_LENGTH];
422 uint16_t queue[ACTION_RSS_QUEUE_NUM];
425 /** Maximum data size in struct rte_flow_action_raw_encap. */
426 #define ACTION_RAW_ENCAP_MAX_DATA 128
427 #define RAW_ENCAP_CONFS_MAX_NUM 8
429 /** Storage for struct rte_flow_action_raw_encap. */
430 struct raw_encap_conf {
431 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
432 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
436 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
438 /** Storage for struct rte_flow_action_raw_encap including external data. */
439 struct action_raw_encap_data {
440 struct rte_flow_action_raw_encap conf;
441 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
442 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
446 /** Storage for struct rte_flow_action_raw_decap. */
447 struct raw_decap_conf {
448 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
452 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
454 /** Storage for struct rte_flow_action_raw_decap including external data. */
455 struct action_raw_decap_data {
456 struct rte_flow_action_raw_decap conf;
457 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
461 struct vxlan_encap_conf vxlan_encap_conf = {
465 .vni = "\x00\x00\x00",
467 .udp_dst = RTE_BE16(RTE_VXLAN_DEFAULT_PORT),
468 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
469 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
470 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
471 "\x00\x00\x00\x00\x00\x00\x00\x01",
472 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
473 "\x00\x00\x00\x00\x00\x00\x11\x11",
477 .eth_src = "\x00\x00\x00\x00\x00\x00",
478 .eth_dst = "\xff\xff\xff\xff\xff\xff",
481 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
482 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
484 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
485 struct action_vxlan_encap_data {
486 struct rte_flow_action_vxlan_encap conf;
487 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
488 struct rte_flow_item_eth item_eth;
489 struct rte_flow_item_vlan item_vlan;
491 struct rte_flow_item_ipv4 item_ipv4;
492 struct rte_flow_item_ipv6 item_ipv6;
494 struct rte_flow_item_udp item_udp;
495 struct rte_flow_item_vxlan item_vxlan;
498 struct nvgre_encap_conf nvgre_encap_conf = {
501 .tni = "\x00\x00\x00",
502 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
503 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
504 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
505 "\x00\x00\x00\x00\x00\x00\x00\x01",
506 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
507 "\x00\x00\x00\x00\x00\x00\x11\x11",
509 .eth_src = "\x00\x00\x00\x00\x00\x00",
510 .eth_dst = "\xff\xff\xff\xff\xff\xff",
513 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
514 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
516 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
517 struct action_nvgre_encap_data {
518 struct rte_flow_action_nvgre_encap conf;
519 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
520 struct rte_flow_item_eth item_eth;
521 struct rte_flow_item_vlan item_vlan;
523 struct rte_flow_item_ipv4 item_ipv4;
524 struct rte_flow_item_ipv6 item_ipv6;
526 struct rte_flow_item_nvgre item_nvgre;
529 struct l2_encap_conf l2_encap_conf;
531 struct l2_decap_conf l2_decap_conf;
533 struct mplsogre_encap_conf mplsogre_encap_conf;
535 struct mplsogre_decap_conf mplsogre_decap_conf;
537 struct mplsoudp_encap_conf mplsoudp_encap_conf;
539 struct mplsoudp_decap_conf mplsoudp_decap_conf;
541 #define ACTION_SAMPLE_ACTIONS_NUM 10
542 #define RAW_SAMPLE_CONFS_MAX_NUM 8
543 /** Storage for struct rte_flow_action_sample including external data. */
544 struct action_sample_data {
545 struct rte_flow_action_sample conf;
548 /** Storage for struct rte_flow_action_sample. */
549 struct raw_sample_conf {
550 struct rte_flow_action data[ACTION_SAMPLE_ACTIONS_NUM];
552 struct raw_sample_conf raw_sample_confs[RAW_SAMPLE_CONFS_MAX_NUM];
553 struct rte_flow_action_mark sample_mark[RAW_SAMPLE_CONFS_MAX_NUM];
554 struct rte_flow_action_queue sample_queue[RAW_SAMPLE_CONFS_MAX_NUM];
555 struct rte_flow_action_count sample_count[RAW_SAMPLE_CONFS_MAX_NUM];
556 struct rte_flow_action_port_id sample_port_id[RAW_SAMPLE_CONFS_MAX_NUM];
557 struct rte_flow_action_raw_encap sample_encap[RAW_SAMPLE_CONFS_MAX_NUM];
559 /** Maximum number of subsequent tokens and arguments on the stack. */
560 #define CTX_STACK_SIZE 16
562 /** Parser context. */
564 /** Stack of subsequent token lists to process. */
565 const enum index *next[CTX_STACK_SIZE];
566 /** Arguments for stacked tokens. */
567 const void *args[CTX_STACK_SIZE];
568 enum index curr; /**< Current token index. */
569 enum index prev; /**< Index of the last token seen. */
570 int next_num; /**< Number of entries in next[]. */
571 int args_num; /**< Number of entries in args[]. */
572 uint32_t eol:1; /**< EOL has been detected. */
573 uint32_t last:1; /**< No more arguments. */
574 portid_t port; /**< Current port ID (for completions). */
575 uint32_t objdata; /**< Object-specific data. */
576 void *object; /**< Address of current object for relative offsets. */
577 void *objmask; /**< Object a full mask must be written to. */
580 /** Token argument. */
582 uint32_t hton:1; /**< Use network byte ordering. */
583 uint32_t sign:1; /**< Value is signed. */
584 uint32_t bounded:1; /**< Value is bounded. */
585 uintmax_t min; /**< Minimum value if bounded. */
586 uintmax_t max; /**< Maximum value if bounded. */
587 uint32_t offset; /**< Relative offset from ctx->object. */
588 uint32_t size; /**< Field size. */
589 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
592 /** Parser token definition. */
594 /** Type displayed during completion (defaults to "TOKEN"). */
596 /** Help displayed during completion (defaults to token name). */
598 /** Private data used by parser functions. */
601 * Lists of subsequent tokens to push on the stack. Each call to the
602 * parser consumes the last entry of that stack.
604 const enum index *const *next;
605 /** Arguments stack for subsequent tokens that need them. */
606 const struct arg *const *args;
608 * Token-processing callback, returns -1 in case of error, the
609 * length of the matched string otherwise. If NULL, attempts to
610 * match the token name.
612 * If buf is not NULL, the result should be stored in it according
613 * to context. An error is returned if not large enough.
615 int (*call)(struct context *ctx, const struct token *token,
616 const char *str, unsigned int len,
617 void *buf, unsigned int size);
619 * Callback that provides possible values for this token, used for
620 * completion. Returns -1 in case of error, the number of possible
621 * values otherwise. If NULL, the token name is used.
623 * If buf is not NULL, entry index ent is written to buf and the
624 * full length of the entry is returned (same behavior as
627 int (*comp)(struct context *ctx, const struct token *token,
628 unsigned int ent, char *buf, unsigned int size);
629 /** Mandatory token name, no default value. */
633 /** Static initializer for the next field. */
634 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
636 /** Static initializer for a NEXT() entry. */
637 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
639 /** Static initializer for the args field. */
640 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
642 /** Static initializer for ARGS() to target a field. */
643 #define ARGS_ENTRY(s, f) \
644 (&(const struct arg){ \
645 .offset = offsetof(s, f), \
646 .size = sizeof(((s *)0)->f), \
649 /** Static initializer for ARGS() to target a bit-field. */
650 #define ARGS_ENTRY_BF(s, f, b) \
651 (&(const struct arg){ \
653 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
656 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
657 #define ARGS_ENTRY_MASK(s, f, m) \
658 (&(const struct arg){ \
659 .offset = offsetof(s, f), \
660 .size = sizeof(((s *)0)->f), \
661 .mask = (const void *)(m), \
664 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
665 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
666 (&(const struct arg){ \
668 .offset = offsetof(s, f), \
669 .size = sizeof(((s *)0)->f), \
670 .mask = (const void *)(m), \
673 /** Static initializer for ARGS() to target a pointer. */
674 #define ARGS_ENTRY_PTR(s, f) \
675 (&(const struct arg){ \
676 .size = sizeof(*((s *)0)->f), \
679 /** Static initializer for ARGS() with arbitrary offset and size. */
680 #define ARGS_ENTRY_ARB(o, s) \
681 (&(const struct arg){ \
686 /** Same as ARGS_ENTRY_ARB() with bounded values. */
687 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
688 (&(const struct arg){ \
696 /** Same as ARGS_ENTRY() using network byte ordering. */
697 #define ARGS_ENTRY_HTON(s, f) \
698 (&(const struct arg){ \
700 .offset = offsetof(s, f), \
701 .size = sizeof(((s *)0)->f), \
704 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
705 #define ARG_ENTRY_HTON(s) \
706 (&(const struct arg){ \
712 /** Parser output buffer layout expected by cmd_flow_parsed(). */
714 enum index command; /**< Flow command. */
715 portid_t port; /**< Affected port ID. */
719 uint32_t action_id_n;
720 } sa_destroy; /**< Shared action destroy arguments. */
723 } sa; /* Shared action query arguments */
725 struct rte_flow_attr attr;
726 struct tunnel_ops tunnel_ops;
727 struct rte_flow_item *pattern;
728 struct rte_flow_action *actions;
732 } vc; /**< Validate/create arguments. */
736 } destroy; /**< Destroy arguments. */
739 } dump; /**< Dump arguments. */
742 struct rte_flow_action action;
743 } query; /**< Query arguments. */
747 } list; /**< List arguments. */
750 } isolate; /**< Isolated mode arguments. */
753 } aged; /**< Aged arguments. */
754 } args; /**< Command arguments. */
757 /** Private data for pattern items. */
758 struct parse_item_priv {
759 enum rte_flow_item_type type; /**< Item type. */
760 uint32_t size; /**< Size of item specification structure. */
763 #define PRIV_ITEM(t, s) \
764 (&(const struct parse_item_priv){ \
765 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
769 /** Private data for actions. */
770 struct parse_action_priv {
771 enum rte_flow_action_type type; /**< Action type. */
772 uint32_t size; /**< Size of action configuration structure. */
775 #define PRIV_ACTION(t, s) \
776 (&(const struct parse_action_priv){ \
777 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
781 static const enum index next_sa_create_attr[] = {
782 SHARED_ACTION_CREATE_ID,
783 SHARED_ACTION_INGRESS,
784 SHARED_ACTION_EGRESS,
789 static const enum index next_sa_subcmd[] = {
790 SHARED_ACTION_CREATE,
791 SHARED_ACTION_UPDATE,
792 SHARED_ACTION_DESTROY,
797 static const enum index next_vc_attr[] = {
809 static const enum index tunnel_create_attr[] = {
816 static const enum index tunnel_destroy_attr[] = {
823 static const enum index tunnel_list_attr[] = {
829 static const enum index next_destroy_attr[] = {
835 static const enum index next_dump_attr[] = {
841 static const enum index next_list_attr[] = {
847 static const enum index next_aged_attr[] = {
853 static const enum index next_sa_destroy_attr[] = {
854 SHARED_ACTION_DESTROY_ID,
859 static const enum index item_param[] = {
868 static const enum index next_item[] = {
905 ITEM_ICMP6_ND_OPT_SLA_ETH,
906 ITEM_ICMP6_ND_OPT_TLA_ETH,
924 static const enum index item_fuzzy[] = {
930 static const enum index item_any[] = {
936 static const enum index item_vf[] = {
942 static const enum index item_phy_port[] = {
948 static const enum index item_port_id[] = {
954 static const enum index item_mark[] = {
960 static const enum index item_raw[] = {
970 static const enum index item_eth[] = {
979 static const enum index item_vlan[] = {
984 ITEM_VLAN_INNER_TYPE,
985 ITEM_VLAN_HAS_MORE_VLAN,
990 static const enum index item_ipv4[] = {
992 ITEM_IPV4_FRAGMENT_OFFSET,
1001 static const enum index item_ipv6[] = {
1008 ITEM_IPV6_HAS_FRAG_EXT,
1013 static const enum index item_icmp[] = {
1022 static const enum index item_udp[] = {
1029 static const enum index item_tcp[] = {
1037 static const enum index item_sctp[] = {
1046 static const enum index item_vxlan[] = {
1052 static const enum index item_e_tag[] = {
1053 ITEM_E_TAG_GRP_ECID_B,
1058 static const enum index item_nvgre[] = {
1064 static const enum index item_mpls[] = {
1072 static const enum index item_gre[] = {
1074 ITEM_GRE_C_RSVD0_VER,
1082 static const enum index item_gre_key[] = {
1088 static const enum index item_gtp[] = {
1096 static const enum index item_geneve[] = {
1103 static const enum index item_vxlan_gpe[] = {
1109 static const enum index item_arp_eth_ipv4[] = {
1110 ITEM_ARP_ETH_IPV4_SHA,
1111 ITEM_ARP_ETH_IPV4_SPA,
1112 ITEM_ARP_ETH_IPV4_THA,
1113 ITEM_ARP_ETH_IPV4_TPA,
1118 static const enum index item_ipv6_ext[] = {
1119 ITEM_IPV6_EXT_NEXT_HDR,
1124 static const enum index item_ipv6_frag_ext[] = {
1125 ITEM_IPV6_FRAG_EXT_NEXT_HDR,
1126 ITEM_IPV6_FRAG_EXT_FRAG_DATA,
1131 static const enum index item_icmp6[] = {
1138 static const enum index item_icmp6_nd_ns[] = {
1139 ITEM_ICMP6_ND_NS_TARGET_ADDR,
1144 static const enum index item_icmp6_nd_na[] = {
1145 ITEM_ICMP6_ND_NA_TARGET_ADDR,
1150 static const enum index item_icmp6_nd_opt[] = {
1151 ITEM_ICMP6_ND_OPT_TYPE,
1156 static const enum index item_icmp6_nd_opt_sla_eth[] = {
1157 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
1162 static const enum index item_icmp6_nd_opt_tla_eth[] = {
1163 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
1168 static const enum index item_meta[] = {
1174 static const enum index item_gtp_psc[] = {
1181 static const enum index item_pppoed[] = {
1187 static const enum index item_pppoes[] = {
1193 static const enum index item_pppoe_proto_id[] = {
1198 static const enum index item_higig2[] = {
1199 ITEM_HIGIG2_CLASSIFICATION,
1205 static const enum index item_esp[] = {
1211 static const enum index item_ah[] = {
1217 static const enum index item_pfcp[] = {
1224 static const enum index next_set_raw[] = {
1230 static const enum index item_tag[] = {
1237 static const enum index item_l2tpv3oip[] = {
1238 ITEM_L2TPV3OIP_SESSION_ID,
1243 static const enum index item_ecpri[] = {
1249 static const enum index item_ecpri_common[] = {
1250 ITEM_ECPRI_COMMON_TYPE,
1254 static const enum index item_ecpri_common_type[] = {
1255 ITEM_ECPRI_COMMON_TYPE_IQ_DATA,
1256 ITEM_ECPRI_COMMON_TYPE_RTC_CTRL,
1257 ITEM_ECPRI_COMMON_TYPE_DLY_MSR,
1261 static const enum index next_action[] = {
1277 ACTION_OF_SET_MPLS_TTL,
1278 ACTION_OF_DEC_MPLS_TTL,
1279 ACTION_OF_SET_NW_TTL,
1280 ACTION_OF_DEC_NW_TTL,
1281 ACTION_OF_COPY_TTL_OUT,
1282 ACTION_OF_COPY_TTL_IN,
1284 ACTION_OF_PUSH_VLAN,
1285 ACTION_OF_SET_VLAN_VID,
1286 ACTION_OF_SET_VLAN_PCP,
1288 ACTION_OF_PUSH_MPLS,
1295 ACTION_MPLSOGRE_ENCAP,
1296 ACTION_MPLSOGRE_DECAP,
1297 ACTION_MPLSOUDP_ENCAP,
1298 ACTION_MPLSOUDP_DECAP,
1299 ACTION_SET_IPV4_SRC,
1300 ACTION_SET_IPV4_DST,
1301 ACTION_SET_IPV6_SRC,
1302 ACTION_SET_IPV6_DST,
1318 ACTION_SET_IPV4_DSCP,
1319 ACTION_SET_IPV6_DSCP,
1326 static const enum index action_mark[] = {
1332 static const enum index action_queue[] = {
1338 static const enum index action_count[] = {
1340 ACTION_COUNT_SHARED,
1345 static const enum index action_rss[] = {
1356 static const enum index action_vf[] = {
1363 static const enum index action_phy_port[] = {
1364 ACTION_PHY_PORT_ORIGINAL,
1365 ACTION_PHY_PORT_INDEX,
1370 static const enum index action_port_id[] = {
1371 ACTION_PORT_ID_ORIGINAL,
1377 static const enum index action_meter[] = {
1383 static const enum index action_of_set_mpls_ttl[] = {
1384 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1389 static const enum index action_of_set_nw_ttl[] = {
1390 ACTION_OF_SET_NW_TTL_NW_TTL,
1395 static const enum index action_of_push_vlan[] = {
1396 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1401 static const enum index action_of_set_vlan_vid[] = {
1402 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1407 static const enum index action_of_set_vlan_pcp[] = {
1408 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1413 static const enum index action_of_pop_mpls[] = {
1414 ACTION_OF_POP_MPLS_ETHERTYPE,
1419 static const enum index action_of_push_mpls[] = {
1420 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1425 static const enum index action_set_ipv4_src[] = {
1426 ACTION_SET_IPV4_SRC_IPV4_SRC,
1431 static const enum index action_set_mac_src[] = {
1432 ACTION_SET_MAC_SRC_MAC_SRC,
1437 static const enum index action_set_ipv4_dst[] = {
1438 ACTION_SET_IPV4_DST_IPV4_DST,
1443 static const enum index action_set_ipv6_src[] = {
1444 ACTION_SET_IPV6_SRC_IPV6_SRC,
1449 static const enum index action_set_ipv6_dst[] = {
1450 ACTION_SET_IPV6_DST_IPV6_DST,
1455 static const enum index action_set_tp_src[] = {
1456 ACTION_SET_TP_SRC_TP_SRC,
1461 static const enum index action_set_tp_dst[] = {
1462 ACTION_SET_TP_DST_TP_DST,
1467 static const enum index action_set_ttl[] = {
1473 static const enum index action_jump[] = {
1479 static const enum index action_set_mac_dst[] = {
1480 ACTION_SET_MAC_DST_MAC_DST,
1485 static const enum index action_inc_tcp_seq[] = {
1486 ACTION_INC_TCP_SEQ_VALUE,
1491 static const enum index action_dec_tcp_seq[] = {
1492 ACTION_DEC_TCP_SEQ_VALUE,
1497 static const enum index action_inc_tcp_ack[] = {
1498 ACTION_INC_TCP_ACK_VALUE,
1503 static const enum index action_dec_tcp_ack[] = {
1504 ACTION_DEC_TCP_ACK_VALUE,
1509 static const enum index action_raw_encap[] = {
1510 ACTION_RAW_ENCAP_INDEX,
1515 static const enum index action_raw_decap[] = {
1516 ACTION_RAW_DECAP_INDEX,
1521 static const enum index action_set_tag[] = {
1522 ACTION_SET_TAG_DATA,
1523 ACTION_SET_TAG_INDEX,
1524 ACTION_SET_TAG_MASK,
1529 static const enum index action_set_meta[] = {
1530 ACTION_SET_META_DATA,
1531 ACTION_SET_META_MASK,
1536 static const enum index action_set_ipv4_dscp[] = {
1537 ACTION_SET_IPV4_DSCP_VALUE,
1542 static const enum index action_set_ipv6_dscp[] = {
1543 ACTION_SET_IPV6_DSCP_VALUE,
1548 static const enum index action_age[] = {
1555 static const enum index action_sample[] = {
1557 ACTION_SAMPLE_RATIO,
1558 ACTION_SAMPLE_INDEX,
1563 static const enum index next_action_sample[] = {
1573 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1574 const char *, unsigned int,
1575 void *, unsigned int);
1576 static int parse_set_sample_action(struct context *, const struct token *,
1577 const char *, unsigned int,
1578 void *, unsigned int);
1579 static int parse_set_init(struct context *, const struct token *,
1580 const char *, unsigned int,
1581 void *, unsigned int);
1582 static int parse_init(struct context *, const struct token *,
1583 const char *, unsigned int,
1584 void *, unsigned int);
1585 static int parse_vc(struct context *, const struct token *,
1586 const char *, unsigned int,
1587 void *, unsigned int);
1588 static int parse_vc_spec(struct context *, const struct token *,
1589 const char *, unsigned int, void *, unsigned int);
1590 static int parse_vc_conf(struct context *, const struct token *,
1591 const char *, unsigned int, void *, unsigned int);
1592 static int parse_vc_item_ecpri_type(struct context *, const struct token *,
1593 const char *, unsigned int,
1594 void *, unsigned int);
1595 static int parse_vc_action_rss(struct context *, const struct token *,
1596 const char *, unsigned int, void *,
1598 static int parse_vc_action_rss_func(struct context *, const struct token *,
1599 const char *, unsigned int, void *,
1601 static int parse_vc_action_rss_type(struct context *, const struct token *,
1602 const char *, unsigned int, void *,
1604 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1605 const char *, unsigned int, void *,
1607 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1608 const char *, unsigned int, void *,
1610 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1611 const char *, unsigned int, void *,
1613 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1614 const char *, unsigned int, void *,
1616 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1617 const char *, unsigned int, void *,
1619 static int parse_vc_action_mplsogre_encap(struct context *,
1620 const struct token *, const char *,
1621 unsigned int, void *, unsigned int);
1622 static int parse_vc_action_mplsogre_decap(struct context *,
1623 const struct token *, const char *,
1624 unsigned int, void *, unsigned int);
1625 static int parse_vc_action_mplsoudp_encap(struct context *,
1626 const struct token *, const char *,
1627 unsigned int, void *, unsigned int);
1628 static int parse_vc_action_mplsoudp_decap(struct context *,
1629 const struct token *, const char *,
1630 unsigned int, void *, unsigned int);
1631 static int parse_vc_action_raw_encap(struct context *,
1632 const struct token *, const char *,
1633 unsigned int, void *, unsigned int);
1634 static int parse_vc_action_raw_decap(struct context *,
1635 const struct token *, const char *,
1636 unsigned int, void *, unsigned int);
1637 static int parse_vc_action_raw_encap_index(struct context *,
1638 const struct token *, const char *,
1639 unsigned int, void *, unsigned int);
1640 static int parse_vc_action_raw_decap_index(struct context *,
1641 const struct token *, const char *,
1642 unsigned int, void *, unsigned int);
1643 static int parse_vc_action_set_meta(struct context *ctx,
1644 const struct token *token, const char *str,
1645 unsigned int len, void *buf,
1647 static int parse_vc_action_sample(struct context *ctx,
1648 const struct token *token, const char *str,
1649 unsigned int len, void *buf,
1652 parse_vc_action_sample_index(struct context *ctx, const struct token *token,
1653 const char *str, unsigned int len, void *buf,
1655 static int parse_destroy(struct context *, const struct token *,
1656 const char *, unsigned int,
1657 void *, unsigned int);
1658 static int parse_flush(struct context *, const struct token *,
1659 const char *, unsigned int,
1660 void *, unsigned int);
1661 static int parse_dump(struct context *, const struct token *,
1662 const char *, unsigned int,
1663 void *, unsigned int);
1664 static int parse_query(struct context *, const struct token *,
1665 const char *, unsigned int,
1666 void *, unsigned int);
1667 static int parse_action(struct context *, const struct token *,
1668 const char *, unsigned int,
1669 void *, unsigned int);
1670 static int parse_list(struct context *, const struct token *,
1671 const char *, unsigned int,
1672 void *, unsigned int);
1673 static int parse_aged(struct context *, const struct token *,
1674 const char *, unsigned int,
1675 void *, unsigned int);
1676 static int parse_isolate(struct context *, const struct token *,
1677 const char *, unsigned int,
1678 void *, unsigned int);
1679 static int parse_tunnel(struct context *, const struct token *,
1680 const char *, unsigned int,
1681 void *, unsigned int);
1682 static int parse_int(struct context *, const struct token *,
1683 const char *, unsigned int,
1684 void *, unsigned int);
1685 static int parse_prefix(struct context *, const struct token *,
1686 const char *, unsigned int,
1687 void *, unsigned int);
1688 static int parse_boolean(struct context *, const struct token *,
1689 const char *, unsigned int,
1690 void *, unsigned int);
1691 static int parse_string(struct context *, const struct token *,
1692 const char *, unsigned int,
1693 void *, unsigned int);
1694 static int parse_hex(struct context *ctx, const struct token *token,
1695 const char *str, unsigned int len,
1696 void *buf, unsigned int size);
1697 static int parse_string0(struct context *, const struct token *,
1698 const char *, unsigned int,
1699 void *, unsigned int);
1700 static int parse_mac_addr(struct context *, const struct token *,
1701 const char *, unsigned int,
1702 void *, unsigned int);
1703 static int parse_ipv4_addr(struct context *, const struct token *,
1704 const char *, unsigned int,
1705 void *, unsigned int);
1706 static int parse_ipv6_addr(struct context *, const struct token *,
1707 const char *, unsigned int,
1708 void *, unsigned int);
1709 static int parse_port(struct context *, const struct token *,
1710 const char *, unsigned int,
1711 void *, unsigned int);
1712 static int parse_sa(struct context *, const struct token *,
1713 const char *, unsigned int,
1714 void *, unsigned int);
1715 static int parse_sa_destroy(struct context *ctx, const struct token *token,
1716 const char *str, unsigned int len,
1717 void *buf, unsigned int size);
1718 static int parse_sa_id2ptr(struct context *ctx, const struct token *token,
1719 const char *str, unsigned int len, void *buf,
1721 static int comp_none(struct context *, const struct token *,
1722 unsigned int, char *, unsigned int);
1723 static int comp_boolean(struct context *, const struct token *,
1724 unsigned int, char *, unsigned int);
1725 static int comp_action(struct context *, const struct token *,
1726 unsigned int, char *, unsigned int);
1727 static int comp_port(struct context *, const struct token *,
1728 unsigned int, char *, unsigned int);
1729 static int comp_rule_id(struct context *, const struct token *,
1730 unsigned int, char *, unsigned int);
1731 static int comp_vc_action_rss_type(struct context *, const struct token *,
1732 unsigned int, char *, unsigned int);
1733 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1734 unsigned int, char *, unsigned int);
1735 static int comp_set_raw_index(struct context *, const struct token *,
1736 unsigned int, char *, unsigned int);
1737 static int comp_set_sample_index(struct context *, const struct token *,
1738 unsigned int, char *, unsigned int);
1740 /** Token definitions. */
1741 static const struct token token_list[] = {
1742 /* Special tokens. */
1745 .help = "null entry, abused as the entry point",
1746 .next = NEXT(NEXT_ENTRY(FLOW)),
1751 .help = "command may end here",
1754 .name = "START_SET",
1755 .help = "null entry, abused as the entry point for set",
1756 .next = NEXT(NEXT_ENTRY(SET)),
1761 .help = "set command may end here",
1763 /* Common tokens. */
1767 .help = "integer value",
1772 .name = "{unsigned}",
1774 .help = "unsigned integer value",
1781 .help = "prefix length for bit-mask",
1782 .call = parse_prefix,
1786 .name = "{boolean}",
1788 .help = "any boolean value",
1789 .call = parse_boolean,
1790 .comp = comp_boolean,
1795 .help = "fixed string",
1796 .call = parse_string,
1802 .help = "fixed string",
1806 .name = "{file path}",
1808 .help = "file path",
1809 .call = parse_string0,
1813 .name = "{MAC address}",
1815 .help = "standard MAC address notation",
1816 .call = parse_mac_addr,
1820 .name = "{IPv4 address}",
1821 .type = "IPV4 ADDRESS",
1822 .help = "standard IPv4 address notation",
1823 .call = parse_ipv4_addr,
1827 .name = "{IPv6 address}",
1828 .type = "IPV6 ADDRESS",
1829 .help = "standard IPv6 address notation",
1830 .call = parse_ipv6_addr,
1834 .name = "{rule id}",
1836 .help = "rule identifier",
1838 .comp = comp_rule_id,
1841 .name = "{port_id}",
1843 .help = "port identifier",
1848 .name = "{group_id}",
1850 .help = "group identifier",
1854 [PRIORITY_LEVEL] = {
1857 .help = "priority level",
1861 [SHARED_ACTION_ID] = {
1862 .name = "{shared_action_id}",
1863 .type = "SHARED_ACTION_ID",
1864 .help = "shared action id",
1868 /* Top-level command. */
1871 .type = "{command} {port_id} [{arg} [...]]",
1872 .help = "manage ingress/egress flow rules",
1873 .next = NEXT(NEXT_ENTRY
1887 /* Top-level command. */
1889 .name = "shared_action",
1890 .type = "{command} {port_id} [{arg} [...]]",
1891 .help = "manage shared actions",
1892 .next = NEXT(next_sa_subcmd, NEXT_ENTRY(PORT_ID)),
1893 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1896 /* Sub-level commands. */
1897 [SHARED_ACTION_CREATE] = {
1899 .help = "create shared action",
1900 .next = NEXT(next_sa_create_attr),
1903 [SHARED_ACTION_UPDATE] = {
1905 .help = "update shared action",
1906 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_SPEC),
1907 NEXT_ENTRY(SHARED_ACTION_ID)),
1908 .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
1911 [SHARED_ACTION_DESTROY] = {
1913 .help = "destroy shared action",
1914 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_DESTROY_ID)),
1915 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1916 .call = parse_sa_destroy,
1918 [SHARED_ACTION_QUERY] = {
1920 .help = "query shared action",
1921 .next = NEXT(NEXT_ENTRY(END), NEXT_ENTRY(SHARED_ACTION_ID)),
1922 .args = ARGS(ARGS_ENTRY(struct buffer, args.sa.action_id)),
1927 .help = "check whether a flow rule can be created",
1928 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1929 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1934 .help = "create a flow rule",
1935 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1936 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1941 .help = "destroy specific flow rules",
1942 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1943 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1944 .call = parse_destroy,
1948 .help = "destroy all flow rules",
1949 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1950 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1951 .call = parse_flush,
1955 .help = "dump all flow rules to file",
1956 .next = NEXT(next_dump_attr, NEXT_ENTRY(PORT_ID)),
1957 .args = ARGS(ARGS_ENTRY(struct buffer, args.dump.file),
1958 ARGS_ENTRY(struct buffer, port)),
1963 .help = "query an existing flow rule",
1964 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1965 NEXT_ENTRY(RULE_ID),
1966 NEXT_ENTRY(PORT_ID)),
1967 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1968 ARGS_ENTRY(struct buffer, args.query.rule),
1969 ARGS_ENTRY(struct buffer, port)),
1970 .call = parse_query,
1974 .help = "list existing flow rules",
1975 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1976 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1981 .help = "list and destroy aged flows",
1982 .next = NEXT(next_aged_attr, NEXT_ENTRY(PORT_ID)),
1983 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1988 .help = "restrict ingress traffic to the defined flow rules",
1989 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1990 NEXT_ENTRY(PORT_ID)),
1991 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1992 ARGS_ENTRY(struct buffer, port)),
1993 .call = parse_isolate,
1997 .help = "new tunnel API",
1998 .next = NEXT(NEXT_ENTRY
1999 (TUNNEL_CREATE, TUNNEL_LIST, TUNNEL_DESTROY)),
2000 .call = parse_tunnel,
2002 /* Tunnel arguments. */
2005 .help = "create new tunnel object",
2006 .next = NEXT(tunnel_create_attr, NEXT_ENTRY(PORT_ID)),
2007 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2008 .call = parse_tunnel,
2010 [TUNNEL_CREATE_TYPE] = {
2012 .help = "create new tunnel",
2013 .next = NEXT(tunnel_create_attr, NEXT_ENTRY(FILE_PATH)),
2014 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, type)),
2015 .call = parse_tunnel,
2017 [TUNNEL_DESTROY] = {
2019 .help = "destroy tunel",
2020 .next = NEXT(tunnel_destroy_attr, NEXT_ENTRY(PORT_ID)),
2021 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2022 .call = parse_tunnel,
2024 [TUNNEL_DESTROY_ID] = {
2026 .help = "tunnel identifier to testroy",
2027 .next = NEXT(tunnel_destroy_attr, NEXT_ENTRY(UNSIGNED)),
2028 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2029 .call = parse_tunnel,
2033 .help = "list existing tunnels",
2034 .next = NEXT(tunnel_list_attr, NEXT_ENTRY(PORT_ID)),
2035 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2036 .call = parse_tunnel,
2038 /* Destroy arguments. */
2041 .help = "specify a rule identifier",
2042 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
2043 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
2044 .call = parse_destroy,
2046 /* Query arguments. */
2050 .help = "action to query, must be part of the rule",
2051 .call = parse_action,
2052 .comp = comp_action,
2054 /* List arguments. */
2057 .help = "specify a group",
2058 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
2059 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
2064 .help = "specify aged flows need be destroyed",
2068 /* Validate/create attributes. */
2071 .help = "specify a group",
2072 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
2073 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
2078 .help = "specify a priority level",
2079 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
2080 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
2085 .help = "affect rule to ingress",
2086 .next = NEXT(next_vc_attr),
2091 .help = "affect rule to egress",
2092 .next = NEXT(next_vc_attr),
2097 .help = "apply rule directly to endpoints found in pattern",
2098 .next = NEXT(next_vc_attr),
2102 .name = "tunnel_set",
2103 .help = "tunnel steer rule",
2104 .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
2105 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2109 .name = "tunnel_match",
2110 .help = "tunnel match rule",
2111 .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
2112 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2115 /* Validate/create pattern. */
2118 .help = "submit a list of pattern items",
2119 .next = NEXT(next_item),
2124 .help = "match value perfectly (with full bit-mask)",
2125 .call = parse_vc_spec,
2127 [ITEM_PARAM_SPEC] = {
2129 .help = "match value according to configured bit-mask",
2130 .call = parse_vc_spec,
2132 [ITEM_PARAM_LAST] = {
2134 .help = "specify upper bound to establish a range",
2135 .call = parse_vc_spec,
2137 [ITEM_PARAM_MASK] = {
2139 .help = "specify bit-mask with relevant bits set to one",
2140 .call = parse_vc_spec,
2142 [ITEM_PARAM_PREFIX] = {
2144 .help = "generate bit-mask from a prefix length",
2145 .call = parse_vc_spec,
2149 .help = "specify next pattern item",
2150 .next = NEXT(next_item),
2154 .help = "end list of pattern items",
2155 .priv = PRIV_ITEM(END, 0),
2156 .next = NEXT(NEXT_ENTRY(ACTIONS)),
2161 .help = "no-op pattern item",
2162 .priv = PRIV_ITEM(VOID, 0),
2163 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2168 .help = "perform actions when pattern does not match",
2169 .priv = PRIV_ITEM(INVERT, 0),
2170 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2175 .help = "match any protocol for the current layer",
2176 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
2177 .next = NEXT(item_any),
2182 .help = "number of layers covered",
2183 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
2184 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
2188 .help = "match traffic from/to the physical function",
2189 .priv = PRIV_ITEM(PF, 0),
2190 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2195 .help = "match traffic from/to a virtual function ID",
2196 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
2197 .next = NEXT(item_vf),
2203 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
2204 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
2208 .help = "match traffic from/to a specific physical port",
2209 .priv = PRIV_ITEM(PHY_PORT,
2210 sizeof(struct rte_flow_item_phy_port)),
2211 .next = NEXT(item_phy_port),
2214 [ITEM_PHY_PORT_INDEX] = {
2216 .help = "physical port index",
2217 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
2218 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
2222 .help = "match traffic from/to a given DPDK port ID",
2223 .priv = PRIV_ITEM(PORT_ID,
2224 sizeof(struct rte_flow_item_port_id)),
2225 .next = NEXT(item_port_id),
2228 [ITEM_PORT_ID_ID] = {
2230 .help = "DPDK port ID",
2231 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
2232 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
2236 .help = "match traffic against value set in previously matched rule",
2237 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
2238 .next = NEXT(item_mark),
2243 .help = "Integer value to match against",
2244 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
2245 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
2249 .help = "match an arbitrary byte string",
2250 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
2251 .next = NEXT(item_raw),
2254 [ITEM_RAW_RELATIVE] = {
2256 .help = "look for pattern after the previous item",
2257 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
2258 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
2261 [ITEM_RAW_SEARCH] = {
2263 .help = "search pattern from offset (see also limit)",
2264 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
2265 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
2268 [ITEM_RAW_OFFSET] = {
2270 .help = "absolute or relative offset for pattern",
2271 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
2272 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
2274 [ITEM_RAW_LIMIT] = {
2276 .help = "search area limit for start of pattern",
2277 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
2278 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
2280 [ITEM_RAW_PATTERN] = {
2282 .help = "byte string to look for",
2283 .next = NEXT(item_raw,
2285 NEXT_ENTRY(ITEM_PARAM_IS,
2288 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
2289 ARGS_ENTRY(struct rte_flow_item_raw, length),
2290 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
2291 ITEM_RAW_PATTERN_SIZE)),
2295 .help = "match Ethernet header",
2296 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
2297 .next = NEXT(item_eth),
2302 .help = "destination MAC",
2303 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
2304 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
2308 .help = "source MAC",
2309 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
2310 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
2314 .help = "EtherType",
2315 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
2316 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
2318 [ITEM_ETH_HAS_VLAN] = {
2320 .help = "packet header contains VLAN",
2321 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
2322 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_eth,
2327 .help = "match 802.1Q/ad VLAN tag",
2328 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
2329 .next = NEXT(item_vlan),
2334 .help = "tag control information",
2335 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2336 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
2340 .help = "priority code point",
2341 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2342 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2347 .help = "drop eligible indicator",
2348 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2349 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2354 .help = "VLAN identifier",
2355 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2356 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2359 [ITEM_VLAN_INNER_TYPE] = {
2360 .name = "inner_type",
2361 .help = "inner EtherType",
2362 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2363 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
2366 [ITEM_VLAN_HAS_MORE_VLAN] = {
2367 .name = "has_more_vlan",
2368 .help = "packet header contains another VLAN",
2369 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2370 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_vlan,
2375 .help = "match IPv4 header",
2376 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
2377 .next = NEXT(item_ipv4),
2382 .help = "type of service",
2383 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2384 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2385 hdr.type_of_service)),
2387 [ITEM_IPV4_FRAGMENT_OFFSET] = {
2388 .name = "fragment_offset",
2389 .help = "fragmentation flags and fragment offset",
2390 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2391 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2392 hdr.fragment_offset)),
2396 .help = "time to live",
2397 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2398 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2401 [ITEM_IPV4_PROTO] = {
2403 .help = "next protocol ID",
2404 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2405 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2406 hdr.next_proto_id)),
2410 .help = "source address",
2411 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2412 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2417 .help = "destination address",
2418 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2419 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2424 .help = "match IPv6 header",
2425 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
2426 .next = NEXT(item_ipv6),
2431 .help = "traffic class",
2432 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2433 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2435 "\x0f\xf0\x00\x00")),
2437 [ITEM_IPV6_FLOW] = {
2439 .help = "flow label",
2440 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2441 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2443 "\x00\x0f\xff\xff")),
2445 [ITEM_IPV6_PROTO] = {
2447 .help = "protocol (next header)",
2448 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2449 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2454 .help = "hop limit",
2455 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2456 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2461 .help = "source address",
2462 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2463 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2468 .help = "destination address",
2469 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2470 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2473 [ITEM_IPV6_HAS_FRAG_EXT] = {
2474 .name = "has_frag_ext",
2475 .help = "fragment packet attribute",
2476 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2477 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_ipv6,
2482 .help = "match ICMP header",
2483 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2484 .next = NEXT(item_icmp),
2487 [ITEM_ICMP_TYPE] = {
2489 .help = "ICMP packet type",
2490 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2491 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2494 [ITEM_ICMP_CODE] = {
2496 .help = "ICMP packet code",
2497 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2498 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2501 [ITEM_ICMP_IDENT] = {
2503 .help = "ICMP packet identifier",
2504 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2505 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2510 .help = "ICMP packet sequence number",
2511 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2512 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2517 .help = "match UDP header",
2518 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2519 .next = NEXT(item_udp),
2524 .help = "UDP source port",
2525 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2526 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2531 .help = "UDP destination port",
2532 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2533 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2538 .help = "match TCP header",
2539 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2540 .next = NEXT(item_tcp),
2545 .help = "TCP source port",
2546 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2547 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2552 .help = "TCP destination port",
2553 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2554 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2557 [ITEM_TCP_FLAGS] = {
2559 .help = "TCP flags",
2560 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2561 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2566 .help = "match SCTP header",
2567 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2568 .next = NEXT(item_sctp),
2573 .help = "SCTP source port",
2574 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2575 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2580 .help = "SCTP destination port",
2581 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2582 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2587 .help = "validation tag",
2588 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2589 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2592 [ITEM_SCTP_CKSUM] = {
2595 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2596 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2601 .help = "match VXLAN header",
2602 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2603 .next = NEXT(item_vxlan),
2606 [ITEM_VXLAN_VNI] = {
2608 .help = "VXLAN identifier",
2609 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2610 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2614 .help = "match E-Tag header",
2615 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2616 .next = NEXT(item_e_tag),
2619 [ITEM_E_TAG_GRP_ECID_B] = {
2620 .name = "grp_ecid_b",
2621 .help = "GRP and E-CID base",
2622 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2623 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2629 .help = "match NVGRE header",
2630 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2631 .next = NEXT(item_nvgre),
2634 [ITEM_NVGRE_TNI] = {
2636 .help = "virtual subnet ID",
2637 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2638 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2642 .help = "match MPLS header",
2643 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2644 .next = NEXT(item_mpls),
2647 [ITEM_MPLS_LABEL] = {
2649 .help = "MPLS label",
2650 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2651 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2657 .help = "MPLS Traffic Class",
2658 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2659 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2665 .help = "MPLS Bottom-of-Stack",
2666 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2667 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2673 .help = "match GRE header",
2674 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2675 .next = NEXT(item_gre),
2678 [ITEM_GRE_PROTO] = {
2680 .help = "GRE protocol type",
2681 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2682 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2685 [ITEM_GRE_C_RSVD0_VER] = {
2686 .name = "c_rsvd0_ver",
2688 "checksum (1b), undefined (1b), key bit (1b),"
2689 " sequence number (1b), reserved 0 (9b),"
2691 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2692 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2695 [ITEM_GRE_C_BIT] = {
2697 .help = "checksum bit (C)",
2698 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2699 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2701 "\x80\x00\x00\x00")),
2703 [ITEM_GRE_S_BIT] = {
2705 .help = "sequence number bit (S)",
2706 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2707 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2709 "\x10\x00\x00\x00")),
2711 [ITEM_GRE_K_BIT] = {
2713 .help = "key bit (K)",
2714 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2715 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2717 "\x20\x00\x00\x00")),
2721 .help = "fuzzy pattern match, expect faster than default",
2722 .priv = PRIV_ITEM(FUZZY,
2723 sizeof(struct rte_flow_item_fuzzy)),
2724 .next = NEXT(item_fuzzy),
2727 [ITEM_FUZZY_THRESH] = {
2729 .help = "match accuracy threshold",
2730 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2731 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2736 .help = "match GTP header",
2737 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2738 .next = NEXT(item_gtp),
2741 [ITEM_GTP_FLAGS] = {
2742 .name = "v_pt_rsv_flags",
2743 .help = "GTP flags",
2744 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2745 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp,
2748 [ITEM_GTP_MSG_TYPE] = {
2750 .help = "GTP message type",
2751 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2752 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp, msg_type)),
2756 .help = "tunnel endpoint identifier",
2757 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2758 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2762 .help = "match GTP header",
2763 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2764 .next = NEXT(item_gtp),
2769 .help = "match GTP header",
2770 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2771 .next = NEXT(item_gtp),
2776 .help = "match GENEVE header",
2777 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2778 .next = NEXT(item_geneve),
2781 [ITEM_GENEVE_VNI] = {
2783 .help = "virtual network identifier",
2784 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2785 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2787 [ITEM_GENEVE_PROTO] = {
2789 .help = "GENEVE protocol type",
2790 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2791 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2794 [ITEM_VXLAN_GPE] = {
2795 .name = "vxlan-gpe",
2796 .help = "match VXLAN-GPE header",
2797 .priv = PRIV_ITEM(VXLAN_GPE,
2798 sizeof(struct rte_flow_item_vxlan_gpe)),
2799 .next = NEXT(item_vxlan_gpe),
2802 [ITEM_VXLAN_GPE_VNI] = {
2804 .help = "VXLAN-GPE identifier",
2805 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2806 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2809 [ITEM_ARP_ETH_IPV4] = {
2810 .name = "arp_eth_ipv4",
2811 .help = "match ARP header for Ethernet/IPv4",
2812 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2813 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2814 .next = NEXT(item_arp_eth_ipv4),
2817 [ITEM_ARP_ETH_IPV4_SHA] = {
2819 .help = "sender hardware address",
2820 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2822 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2825 [ITEM_ARP_ETH_IPV4_SPA] = {
2827 .help = "sender IPv4 address",
2828 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2830 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2833 [ITEM_ARP_ETH_IPV4_THA] = {
2835 .help = "target hardware address",
2836 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2838 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2841 [ITEM_ARP_ETH_IPV4_TPA] = {
2843 .help = "target IPv4 address",
2844 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2846 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2851 .help = "match presence of any IPv6 extension header",
2852 .priv = PRIV_ITEM(IPV6_EXT,
2853 sizeof(struct rte_flow_item_ipv6_ext)),
2854 .next = NEXT(item_ipv6_ext),
2857 [ITEM_IPV6_EXT_NEXT_HDR] = {
2859 .help = "next header",
2860 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2861 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2864 [ITEM_IPV6_FRAG_EXT] = {
2865 .name = "ipv6_frag_ext",
2866 .help = "match presence of IPv6 fragment extension header",
2867 .priv = PRIV_ITEM(IPV6_FRAG_EXT,
2868 sizeof(struct rte_flow_item_ipv6_frag_ext)),
2869 .next = NEXT(item_ipv6_frag_ext),
2872 [ITEM_IPV6_FRAG_EXT_NEXT_HDR] = {
2874 .help = "next header",
2875 .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
2877 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_ipv6_frag_ext,
2880 [ITEM_IPV6_FRAG_EXT_FRAG_DATA] = {
2881 .name = "frag_data",
2882 .help = "Fragment flags and offset",
2883 .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
2885 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_frag_ext,
2890 .help = "match any ICMPv6 header",
2891 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2892 .next = NEXT(item_icmp6),
2895 [ITEM_ICMP6_TYPE] = {
2897 .help = "ICMPv6 type",
2898 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2899 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2902 [ITEM_ICMP6_CODE] = {
2904 .help = "ICMPv6 code",
2905 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2906 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2909 [ITEM_ICMP6_ND_NS] = {
2910 .name = "icmp6_nd_ns",
2911 .help = "match ICMPv6 neighbor discovery solicitation",
2912 .priv = PRIV_ITEM(ICMP6_ND_NS,
2913 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2914 .next = NEXT(item_icmp6_nd_ns),
2917 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2918 .name = "target_addr",
2919 .help = "target address",
2920 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2922 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2925 [ITEM_ICMP6_ND_NA] = {
2926 .name = "icmp6_nd_na",
2927 .help = "match ICMPv6 neighbor discovery advertisement",
2928 .priv = PRIV_ITEM(ICMP6_ND_NA,
2929 sizeof(struct rte_flow_item_icmp6_nd_na)),
2930 .next = NEXT(item_icmp6_nd_na),
2933 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2934 .name = "target_addr",
2935 .help = "target address",
2936 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2938 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2941 [ITEM_ICMP6_ND_OPT] = {
2942 .name = "icmp6_nd_opt",
2943 .help = "match presence of any ICMPv6 neighbor discovery"
2945 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2946 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2947 .next = NEXT(item_icmp6_nd_opt),
2950 [ITEM_ICMP6_ND_OPT_TYPE] = {
2952 .help = "ND option type",
2953 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2955 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2958 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2959 .name = "icmp6_nd_opt_sla_eth",
2960 .help = "match ICMPv6 neighbor discovery source Ethernet"
2961 " link-layer address option",
2963 (ICMP6_ND_OPT_SLA_ETH,
2964 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2965 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2968 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2970 .help = "source Ethernet LLA",
2971 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2973 .args = ARGS(ARGS_ENTRY_HTON
2974 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2976 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2977 .name = "icmp6_nd_opt_tla_eth",
2978 .help = "match ICMPv6 neighbor discovery target Ethernet"
2979 " link-layer address option",
2981 (ICMP6_ND_OPT_TLA_ETH,
2982 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2983 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2986 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2988 .help = "target Ethernet LLA",
2989 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2991 .args = ARGS(ARGS_ENTRY_HTON
2992 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2996 .help = "match metadata header",
2997 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2998 .next = NEXT(item_meta),
3001 [ITEM_META_DATA] = {
3003 .help = "metadata value",
3004 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
3005 .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta,
3006 data, "\xff\xff\xff\xff")),
3010 .help = "match GRE key",
3011 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
3012 .next = NEXT(item_gre_key),
3015 [ITEM_GRE_KEY_VALUE] = {
3017 .help = "key value",
3018 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
3019 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3023 .help = "match GTP extension header with type 0x85",
3024 .priv = PRIV_ITEM(GTP_PSC,
3025 sizeof(struct rte_flow_item_gtp_psc)),
3026 .next = NEXT(item_gtp_psc),
3029 [ITEM_GTP_PSC_QFI] = {
3031 .help = "QoS flow identifier",
3032 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
3033 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
3036 [ITEM_GTP_PSC_PDU_T] = {
3039 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
3040 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
3045 .help = "match PPPoE session header",
3046 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
3047 .next = NEXT(item_pppoes),
3052 .help = "match PPPoE discovery header",
3053 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
3054 .next = NEXT(item_pppoed),
3057 [ITEM_PPPOE_SEID] = {
3059 .help = "session identifier",
3060 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
3061 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
3064 [ITEM_PPPOE_PROTO_ID] = {
3065 .name = "pppoe_proto_id",
3066 .help = "match PPPoE session protocol identifier",
3067 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
3068 sizeof(struct rte_flow_item_pppoe_proto_id)),
3069 .next = NEXT(item_pppoe_proto_id, NEXT_ENTRY(UNSIGNED),
3071 .args = ARGS(ARGS_ENTRY_HTON
3072 (struct rte_flow_item_pppoe_proto_id, proto_id)),
3077 .help = "matches higig2 header",
3078 .priv = PRIV_ITEM(HIGIG2,
3079 sizeof(struct rte_flow_item_higig2_hdr)),
3080 .next = NEXT(item_higig2),
3083 [ITEM_HIGIG2_CLASSIFICATION] = {
3084 .name = "classification",
3085 .help = "matches classification of higig2 header",
3086 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
3087 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
3088 hdr.ppt1.classification)),
3090 [ITEM_HIGIG2_VID] = {
3092 .help = "matches vid of higig2 header",
3093 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
3094 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
3099 .help = "match tag value",
3100 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
3101 .next = NEXT(item_tag),
3106 .help = "tag value to match",
3107 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
3108 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)),
3110 [ITEM_TAG_INDEX] = {
3112 .help = "index of tag array to match",
3113 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
3114 NEXT_ENTRY(ITEM_PARAM_IS)),
3115 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
3117 [ITEM_L2TPV3OIP] = {
3118 .name = "l2tpv3oip",
3119 .help = "match L2TPv3 over IP header",
3120 .priv = PRIV_ITEM(L2TPV3OIP,
3121 sizeof(struct rte_flow_item_l2tpv3oip)),
3122 .next = NEXT(item_l2tpv3oip),
3125 [ITEM_L2TPV3OIP_SESSION_ID] = {
3126 .name = "session_id",
3127 .help = "session identifier",
3128 .next = NEXT(item_l2tpv3oip, NEXT_ENTRY(UNSIGNED), item_param),
3129 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_l2tpv3oip,
3134 .help = "match ESP header",
3135 .priv = PRIV_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
3136 .next = NEXT(item_esp),
3141 .help = "security policy index",
3142 .next = NEXT(item_esp, NEXT_ENTRY(UNSIGNED), item_param),
3143 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_esp,
3148 .help = "match AH header",
3149 .priv = PRIV_ITEM(AH, sizeof(struct rte_flow_item_ah)),
3150 .next = NEXT(item_ah),
3155 .help = "security parameters index",
3156 .next = NEXT(item_ah, NEXT_ENTRY(UNSIGNED), item_param),
3157 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ah, spi)),
3161 .help = "match pfcp header",
3162 .priv = PRIV_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
3163 .next = NEXT(item_pfcp),
3166 [ITEM_PFCP_S_FIELD] = {
3169 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
3170 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp,
3173 [ITEM_PFCP_SEID] = {
3175 .help = "session endpoint identifier",
3176 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
3177 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp, seid)),
3181 .help = "match eCPRI header",
3182 .priv = PRIV_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
3183 .next = NEXT(item_ecpri),
3186 [ITEM_ECPRI_COMMON] = {
3188 .help = "eCPRI common header",
3189 .next = NEXT(item_ecpri_common),
3191 [ITEM_ECPRI_COMMON_TYPE] = {
3193 .help = "type of common header",
3194 .next = NEXT(item_ecpri_common_type),
3195 .args = ARGS(ARG_ENTRY_HTON(struct rte_flow_item_ecpri)),
3197 [ITEM_ECPRI_COMMON_TYPE_IQ_DATA] = {
3199 .help = "Type #0: IQ Data",
3200 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_IQ_DATA_PCID,
3202 .call = parse_vc_item_ecpri_type,
3204 [ITEM_ECPRI_MSG_IQ_DATA_PCID] = {
3206 .help = "Physical Channel ID",
3207 .next = NEXT(item_ecpri, NEXT_ENTRY(UNSIGNED), item_param),
3208 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3211 [ITEM_ECPRI_COMMON_TYPE_RTC_CTRL] = {
3213 .help = "Type #2: Real-Time Control Data",
3214 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
3216 .call = parse_vc_item_ecpri_type,
3218 [ITEM_ECPRI_MSG_RTC_CTRL_RTCID] = {
3220 .help = "Real-Time Control Data ID",
3221 .next = NEXT(item_ecpri, NEXT_ENTRY(UNSIGNED), item_param),
3222 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3225 [ITEM_ECPRI_COMMON_TYPE_DLY_MSR] = {
3226 .name = "delay_measure",
3227 .help = "Type #5: One-Way Delay Measurement",
3228 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_DLY_MSR_MSRID,
3230 .call = parse_vc_item_ecpri_type,
3232 [ITEM_ECPRI_MSG_DLY_MSR_MSRID] = {
3234 .help = "Measurement ID",
3235 .next = NEXT(item_ecpri, NEXT_ENTRY(UNSIGNED), item_param),
3236 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3239 /* Validate/create actions. */
3242 .help = "submit a list of associated actions",
3243 .next = NEXT(next_action),
3248 .help = "specify next action",
3249 .next = NEXT(next_action),
3253 .help = "end list of actions",
3254 .priv = PRIV_ACTION(END, 0),
3259 .help = "no-op action",
3260 .priv = PRIV_ACTION(VOID, 0),
3261 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3264 [ACTION_PASSTHRU] = {
3266 .help = "let subsequent rule process matched packets",
3267 .priv = PRIV_ACTION(PASSTHRU, 0),
3268 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3273 .help = "redirect traffic to a given group",
3274 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
3275 .next = NEXT(action_jump),
3278 [ACTION_JUMP_GROUP] = {
3280 .help = "group to redirect traffic to",
3281 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
3282 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
3283 .call = parse_vc_conf,
3287 .help = "attach 32 bit value to packets",
3288 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
3289 .next = NEXT(action_mark),
3292 [ACTION_MARK_ID] = {
3294 .help = "32 bit value to return with packets",
3295 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
3296 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
3297 .call = parse_vc_conf,
3301 .help = "flag packets",
3302 .priv = PRIV_ACTION(FLAG, 0),
3303 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3308 .help = "assign packets to a given queue index",
3309 .priv = PRIV_ACTION(QUEUE,
3310 sizeof(struct rte_flow_action_queue)),
3311 .next = NEXT(action_queue),
3314 [ACTION_QUEUE_INDEX] = {
3316 .help = "queue index to use",
3317 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
3318 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
3319 .call = parse_vc_conf,
3323 .help = "drop packets (note: passthru has priority)",
3324 .priv = PRIV_ACTION(DROP, 0),
3325 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3330 .help = "enable counters for this rule",
3331 .priv = PRIV_ACTION(COUNT,
3332 sizeof(struct rte_flow_action_count)),
3333 .next = NEXT(action_count),
3336 [ACTION_COUNT_ID] = {
3337 .name = "identifier",
3338 .help = "counter identifier to use",
3339 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
3340 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
3341 .call = parse_vc_conf,
3343 [ACTION_COUNT_SHARED] = {
3345 .help = "shared counter",
3346 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
3347 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
3349 .call = parse_vc_conf,
3353 .help = "spread packets among several queues",
3354 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
3355 .next = NEXT(action_rss),
3356 .call = parse_vc_action_rss,
3358 [ACTION_RSS_FUNC] = {
3360 .help = "RSS hash function to apply",
3361 .next = NEXT(action_rss,
3362 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
3363 ACTION_RSS_FUNC_TOEPLITZ,
3364 ACTION_RSS_FUNC_SIMPLE_XOR,
3365 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
3367 [ACTION_RSS_FUNC_DEFAULT] = {
3369 .help = "default hash function",
3370 .call = parse_vc_action_rss_func,
3372 [ACTION_RSS_FUNC_TOEPLITZ] = {
3374 .help = "Toeplitz hash function",
3375 .call = parse_vc_action_rss_func,
3377 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
3378 .name = "simple_xor",
3379 .help = "simple XOR hash function",
3380 .call = parse_vc_action_rss_func,
3382 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
3383 .name = "symmetric_toeplitz",
3384 .help = "Symmetric Toeplitz hash function",
3385 .call = parse_vc_action_rss_func,
3387 [ACTION_RSS_LEVEL] = {
3389 .help = "encapsulation level for \"types\"",
3390 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
3391 .args = ARGS(ARGS_ENTRY_ARB
3392 (offsetof(struct action_rss_data, conf) +
3393 offsetof(struct rte_flow_action_rss, level),
3394 sizeof(((struct rte_flow_action_rss *)0)->
3397 [ACTION_RSS_TYPES] = {
3399 .help = "specific RSS hash types",
3400 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
3402 [ACTION_RSS_TYPE] = {
3404 .help = "RSS hash type",
3405 .call = parse_vc_action_rss_type,
3406 .comp = comp_vc_action_rss_type,
3408 [ACTION_RSS_KEY] = {
3410 .help = "RSS hash key",
3411 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
3412 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
3414 (offsetof(struct action_rss_data, conf) +
3415 offsetof(struct rte_flow_action_rss, key_len),
3416 sizeof(((struct rte_flow_action_rss *)0)->
3418 ARGS_ENTRY(struct action_rss_data, key)),
3420 [ACTION_RSS_KEY_LEN] = {
3422 .help = "RSS hash key length in bytes",
3423 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
3424 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3425 (offsetof(struct action_rss_data, conf) +
3426 offsetof(struct rte_flow_action_rss, key_len),
3427 sizeof(((struct rte_flow_action_rss *)0)->
3430 RSS_HASH_KEY_LENGTH)),
3432 [ACTION_RSS_QUEUES] = {
3434 .help = "queue indices to use",
3435 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
3436 .call = parse_vc_conf,
3438 [ACTION_RSS_QUEUE] = {
3440 .help = "queue index",
3441 .call = parse_vc_action_rss_queue,
3442 .comp = comp_vc_action_rss_queue,
3446 .help = "direct traffic to physical function",
3447 .priv = PRIV_ACTION(PF, 0),
3448 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3453 .help = "direct traffic to a virtual function ID",
3454 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
3455 .next = NEXT(action_vf),
3458 [ACTION_VF_ORIGINAL] = {
3460 .help = "use original VF ID if possible",
3461 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
3462 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
3464 .call = parse_vc_conf,
3469 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
3470 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
3471 .call = parse_vc_conf,
3473 [ACTION_PHY_PORT] = {
3475 .help = "direct packets to physical port index",
3476 .priv = PRIV_ACTION(PHY_PORT,
3477 sizeof(struct rte_flow_action_phy_port)),
3478 .next = NEXT(action_phy_port),
3481 [ACTION_PHY_PORT_ORIGINAL] = {
3483 .help = "use original port index if possible",
3484 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
3485 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
3487 .call = parse_vc_conf,
3489 [ACTION_PHY_PORT_INDEX] = {
3491 .help = "physical port index",
3492 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
3493 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
3495 .call = parse_vc_conf,
3497 [ACTION_PORT_ID] = {
3499 .help = "direct matching traffic to a given DPDK port ID",
3500 .priv = PRIV_ACTION(PORT_ID,
3501 sizeof(struct rte_flow_action_port_id)),
3502 .next = NEXT(action_port_id),
3505 [ACTION_PORT_ID_ORIGINAL] = {
3507 .help = "use original DPDK port ID if possible",
3508 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
3509 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
3511 .call = parse_vc_conf,
3513 [ACTION_PORT_ID_ID] = {
3515 .help = "DPDK port ID",
3516 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
3517 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
3518 .call = parse_vc_conf,
3522 .help = "meter the directed packets at given id",
3523 .priv = PRIV_ACTION(METER,
3524 sizeof(struct rte_flow_action_meter)),
3525 .next = NEXT(action_meter),
3528 [ACTION_METER_ID] = {
3530 .help = "meter id to use",
3531 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
3532 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
3533 .call = parse_vc_conf,
3535 [ACTION_OF_SET_MPLS_TTL] = {
3536 .name = "of_set_mpls_ttl",
3537 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
3540 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
3541 .next = NEXT(action_of_set_mpls_ttl),
3544 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
3547 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
3548 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
3550 .call = parse_vc_conf,
3552 [ACTION_OF_DEC_MPLS_TTL] = {
3553 .name = "of_dec_mpls_ttl",
3554 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
3555 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
3556 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3559 [ACTION_OF_SET_NW_TTL] = {
3560 .name = "of_set_nw_ttl",
3561 .help = "OpenFlow's OFPAT_SET_NW_TTL",
3564 sizeof(struct rte_flow_action_of_set_nw_ttl)),
3565 .next = NEXT(action_of_set_nw_ttl),
3568 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
3571 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
3572 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
3574 .call = parse_vc_conf,
3576 [ACTION_OF_DEC_NW_TTL] = {
3577 .name = "of_dec_nw_ttl",
3578 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
3579 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
3580 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3583 [ACTION_OF_COPY_TTL_OUT] = {
3584 .name = "of_copy_ttl_out",
3585 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
3586 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
3587 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3590 [ACTION_OF_COPY_TTL_IN] = {
3591 .name = "of_copy_ttl_in",
3592 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
3593 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
3594 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3597 [ACTION_OF_POP_VLAN] = {
3598 .name = "of_pop_vlan",
3599 .help = "OpenFlow's OFPAT_POP_VLAN",
3600 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
3601 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3604 [ACTION_OF_PUSH_VLAN] = {
3605 .name = "of_push_vlan",
3606 .help = "OpenFlow's OFPAT_PUSH_VLAN",
3609 sizeof(struct rte_flow_action_of_push_vlan)),
3610 .next = NEXT(action_of_push_vlan),
3613 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
3614 .name = "ethertype",
3615 .help = "EtherType",
3616 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
3617 .args = ARGS(ARGS_ENTRY_HTON
3618 (struct rte_flow_action_of_push_vlan,
3620 .call = parse_vc_conf,
3622 [ACTION_OF_SET_VLAN_VID] = {
3623 .name = "of_set_vlan_vid",
3624 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
3627 sizeof(struct rte_flow_action_of_set_vlan_vid)),
3628 .next = NEXT(action_of_set_vlan_vid),
3631 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
3634 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
3635 .args = ARGS(ARGS_ENTRY_HTON
3636 (struct rte_flow_action_of_set_vlan_vid,
3638 .call = parse_vc_conf,
3640 [ACTION_OF_SET_VLAN_PCP] = {
3641 .name = "of_set_vlan_pcp",
3642 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
3645 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
3646 .next = NEXT(action_of_set_vlan_pcp),
3649 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
3651 .help = "VLAN priority",
3652 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
3653 .args = ARGS(ARGS_ENTRY_HTON
3654 (struct rte_flow_action_of_set_vlan_pcp,
3656 .call = parse_vc_conf,
3658 [ACTION_OF_POP_MPLS] = {
3659 .name = "of_pop_mpls",
3660 .help = "OpenFlow's OFPAT_POP_MPLS",
3661 .priv = PRIV_ACTION(OF_POP_MPLS,
3662 sizeof(struct rte_flow_action_of_pop_mpls)),
3663 .next = NEXT(action_of_pop_mpls),
3666 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3667 .name = "ethertype",
3668 .help = "EtherType",
3669 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3670 .args = ARGS(ARGS_ENTRY_HTON
3671 (struct rte_flow_action_of_pop_mpls,
3673 .call = parse_vc_conf,
3675 [ACTION_OF_PUSH_MPLS] = {
3676 .name = "of_push_mpls",
3677 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3680 sizeof(struct rte_flow_action_of_push_mpls)),
3681 .next = NEXT(action_of_push_mpls),
3684 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3685 .name = "ethertype",
3686 .help = "EtherType",
3687 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3688 .args = ARGS(ARGS_ENTRY_HTON
3689 (struct rte_flow_action_of_push_mpls,
3691 .call = parse_vc_conf,
3693 [ACTION_VXLAN_ENCAP] = {
3694 .name = "vxlan_encap",
3695 .help = "VXLAN encapsulation, uses configuration set by \"set"
3697 .priv = PRIV_ACTION(VXLAN_ENCAP,
3698 sizeof(struct action_vxlan_encap_data)),
3699 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3700 .call = parse_vc_action_vxlan_encap,
3702 [ACTION_VXLAN_DECAP] = {
3703 .name = "vxlan_decap",
3704 .help = "Performs a decapsulation action by stripping all"
3705 " headers of the VXLAN tunnel network overlay from the"
3707 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3708 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3711 [ACTION_NVGRE_ENCAP] = {
3712 .name = "nvgre_encap",
3713 .help = "NVGRE encapsulation, uses configuration set by \"set"
3715 .priv = PRIV_ACTION(NVGRE_ENCAP,
3716 sizeof(struct action_nvgre_encap_data)),
3717 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3718 .call = parse_vc_action_nvgre_encap,
3720 [ACTION_NVGRE_DECAP] = {
3721 .name = "nvgre_decap",
3722 .help = "Performs a decapsulation action by stripping all"
3723 " headers of the NVGRE tunnel network overlay from the"
3725 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3726 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3729 [ACTION_L2_ENCAP] = {
3731 .help = "l2 encap, uses configuration set by"
3732 " \"set l2_encap\"",
3733 .priv = PRIV_ACTION(RAW_ENCAP,
3734 sizeof(struct action_raw_encap_data)),
3735 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3736 .call = parse_vc_action_l2_encap,
3738 [ACTION_L2_DECAP] = {
3740 .help = "l2 decap, uses configuration set by"
3741 " \"set l2_decap\"",
3742 .priv = PRIV_ACTION(RAW_DECAP,
3743 sizeof(struct action_raw_decap_data)),
3744 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3745 .call = parse_vc_action_l2_decap,
3747 [ACTION_MPLSOGRE_ENCAP] = {
3748 .name = "mplsogre_encap",
3749 .help = "mplsogre encapsulation, uses configuration set by"
3750 " \"set mplsogre_encap\"",
3751 .priv = PRIV_ACTION(RAW_ENCAP,
3752 sizeof(struct action_raw_encap_data)),
3753 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3754 .call = parse_vc_action_mplsogre_encap,
3756 [ACTION_MPLSOGRE_DECAP] = {
3757 .name = "mplsogre_decap",
3758 .help = "mplsogre decapsulation, uses configuration set by"
3759 " \"set mplsogre_decap\"",
3760 .priv = PRIV_ACTION(RAW_DECAP,
3761 sizeof(struct action_raw_decap_data)),
3762 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3763 .call = parse_vc_action_mplsogre_decap,
3765 [ACTION_MPLSOUDP_ENCAP] = {
3766 .name = "mplsoudp_encap",
3767 .help = "mplsoudp encapsulation, uses configuration set by"
3768 " \"set mplsoudp_encap\"",
3769 .priv = PRIV_ACTION(RAW_ENCAP,
3770 sizeof(struct action_raw_encap_data)),
3771 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3772 .call = parse_vc_action_mplsoudp_encap,
3774 [ACTION_MPLSOUDP_DECAP] = {
3775 .name = "mplsoudp_decap",
3776 .help = "mplsoudp decapsulation, uses configuration set by"
3777 " \"set mplsoudp_decap\"",
3778 .priv = PRIV_ACTION(RAW_DECAP,
3779 sizeof(struct action_raw_decap_data)),
3780 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3781 .call = parse_vc_action_mplsoudp_decap,
3783 [ACTION_SET_IPV4_SRC] = {
3784 .name = "set_ipv4_src",
3785 .help = "Set a new IPv4 source address in the outermost"
3787 .priv = PRIV_ACTION(SET_IPV4_SRC,
3788 sizeof(struct rte_flow_action_set_ipv4)),
3789 .next = NEXT(action_set_ipv4_src),
3792 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3793 .name = "ipv4_addr",
3794 .help = "new IPv4 source address to set",
3795 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3796 .args = ARGS(ARGS_ENTRY_HTON
3797 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3798 .call = parse_vc_conf,
3800 [ACTION_SET_IPV4_DST] = {
3801 .name = "set_ipv4_dst",
3802 .help = "Set a new IPv4 destination address in the outermost"
3804 .priv = PRIV_ACTION(SET_IPV4_DST,
3805 sizeof(struct rte_flow_action_set_ipv4)),
3806 .next = NEXT(action_set_ipv4_dst),
3809 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3810 .name = "ipv4_addr",
3811 .help = "new IPv4 destination address to set",
3812 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3813 .args = ARGS(ARGS_ENTRY_HTON
3814 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3815 .call = parse_vc_conf,
3817 [ACTION_SET_IPV6_SRC] = {
3818 .name = "set_ipv6_src",
3819 .help = "Set a new IPv6 source address in the outermost"
3821 .priv = PRIV_ACTION(SET_IPV6_SRC,
3822 sizeof(struct rte_flow_action_set_ipv6)),
3823 .next = NEXT(action_set_ipv6_src),
3826 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3827 .name = "ipv6_addr",
3828 .help = "new IPv6 source address to set",
3829 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3830 .args = ARGS(ARGS_ENTRY_HTON
3831 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3832 .call = parse_vc_conf,
3834 [ACTION_SET_IPV6_DST] = {
3835 .name = "set_ipv6_dst",
3836 .help = "Set a new IPv6 destination address in the outermost"
3838 .priv = PRIV_ACTION(SET_IPV6_DST,
3839 sizeof(struct rte_flow_action_set_ipv6)),
3840 .next = NEXT(action_set_ipv6_dst),
3843 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3844 .name = "ipv6_addr",
3845 .help = "new IPv6 destination address to set",
3846 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3847 .args = ARGS(ARGS_ENTRY_HTON
3848 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3849 .call = parse_vc_conf,
3851 [ACTION_SET_TP_SRC] = {
3852 .name = "set_tp_src",
3853 .help = "set a new source port number in the outermost"
3855 .priv = PRIV_ACTION(SET_TP_SRC,
3856 sizeof(struct rte_flow_action_set_tp)),
3857 .next = NEXT(action_set_tp_src),
3860 [ACTION_SET_TP_SRC_TP_SRC] = {
3862 .help = "new source port number to set",
3863 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3864 .args = ARGS(ARGS_ENTRY_HTON
3865 (struct rte_flow_action_set_tp, port)),
3866 .call = parse_vc_conf,
3868 [ACTION_SET_TP_DST] = {
3869 .name = "set_tp_dst",
3870 .help = "set a new destination port number in the outermost"
3872 .priv = PRIV_ACTION(SET_TP_DST,
3873 sizeof(struct rte_flow_action_set_tp)),
3874 .next = NEXT(action_set_tp_dst),
3877 [ACTION_SET_TP_DST_TP_DST] = {
3879 .help = "new destination port number to set",
3880 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3881 .args = ARGS(ARGS_ENTRY_HTON
3882 (struct rte_flow_action_set_tp, port)),
3883 .call = parse_vc_conf,
3885 [ACTION_MAC_SWAP] = {
3887 .help = "Swap the source and destination MAC addresses"
3888 " in the outermost Ethernet header",
3889 .priv = PRIV_ACTION(MAC_SWAP, 0),
3890 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3893 [ACTION_DEC_TTL] = {
3895 .help = "decrease network TTL if available",
3896 .priv = PRIV_ACTION(DEC_TTL, 0),
3897 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3900 [ACTION_SET_TTL] = {
3902 .help = "set ttl value",
3903 .priv = PRIV_ACTION(SET_TTL,
3904 sizeof(struct rte_flow_action_set_ttl)),
3905 .next = NEXT(action_set_ttl),
3908 [ACTION_SET_TTL_TTL] = {
3909 .name = "ttl_value",
3910 .help = "new ttl value to set",
3911 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3912 .args = ARGS(ARGS_ENTRY_HTON
3913 (struct rte_flow_action_set_ttl, ttl_value)),
3914 .call = parse_vc_conf,
3916 [ACTION_SET_MAC_SRC] = {
3917 .name = "set_mac_src",
3918 .help = "set source mac address",
3919 .priv = PRIV_ACTION(SET_MAC_SRC,
3920 sizeof(struct rte_flow_action_set_mac)),
3921 .next = NEXT(action_set_mac_src),
3924 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3926 .help = "new source mac address",
3927 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3928 .args = ARGS(ARGS_ENTRY_HTON
3929 (struct rte_flow_action_set_mac, mac_addr)),
3930 .call = parse_vc_conf,
3932 [ACTION_SET_MAC_DST] = {
3933 .name = "set_mac_dst",
3934 .help = "set destination mac address",
3935 .priv = PRIV_ACTION(SET_MAC_DST,
3936 sizeof(struct rte_flow_action_set_mac)),
3937 .next = NEXT(action_set_mac_dst),
3940 [ACTION_SET_MAC_DST_MAC_DST] = {
3942 .help = "new destination mac address to set",
3943 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3944 .args = ARGS(ARGS_ENTRY_HTON
3945 (struct rte_flow_action_set_mac, mac_addr)),
3946 .call = parse_vc_conf,
3948 [ACTION_INC_TCP_SEQ] = {
3949 .name = "inc_tcp_seq",
3950 .help = "increase TCP sequence number",
3951 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3952 .next = NEXT(action_inc_tcp_seq),
3955 [ACTION_INC_TCP_SEQ_VALUE] = {
3957 .help = "the value to increase TCP sequence number by",
3958 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3959 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3960 .call = parse_vc_conf,
3962 [ACTION_DEC_TCP_SEQ] = {
3963 .name = "dec_tcp_seq",
3964 .help = "decrease TCP sequence number",
3965 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3966 .next = NEXT(action_dec_tcp_seq),
3969 [ACTION_DEC_TCP_SEQ_VALUE] = {
3971 .help = "the value to decrease TCP sequence number by",
3972 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3973 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3974 .call = parse_vc_conf,
3976 [ACTION_INC_TCP_ACK] = {
3977 .name = "inc_tcp_ack",
3978 .help = "increase TCP acknowledgment number",
3979 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3980 .next = NEXT(action_inc_tcp_ack),
3983 [ACTION_INC_TCP_ACK_VALUE] = {
3985 .help = "the value to increase TCP acknowledgment number by",
3986 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3987 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3988 .call = parse_vc_conf,
3990 [ACTION_DEC_TCP_ACK] = {
3991 .name = "dec_tcp_ack",
3992 .help = "decrease TCP acknowledgment number",
3993 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3994 .next = NEXT(action_dec_tcp_ack),
3997 [ACTION_DEC_TCP_ACK_VALUE] = {
3999 .help = "the value to decrease TCP acknowledgment number by",
4000 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
4001 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
4002 .call = parse_vc_conf,
4004 [ACTION_RAW_ENCAP] = {
4005 .name = "raw_encap",
4006 .help = "encapsulation data, defined by set raw_encap",
4007 .priv = PRIV_ACTION(RAW_ENCAP,
4008 sizeof(struct action_raw_encap_data)),
4009 .next = NEXT(action_raw_encap),
4010 .call = parse_vc_action_raw_encap,
4012 [ACTION_RAW_ENCAP_INDEX] = {
4014 .help = "the index of raw_encap_confs",
4015 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
4017 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
4020 .help = "unsigned integer value",
4021 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4022 .call = parse_vc_action_raw_encap_index,
4023 .comp = comp_set_raw_index,
4025 [ACTION_RAW_DECAP] = {
4026 .name = "raw_decap",
4027 .help = "decapsulation data, defined by set raw_encap",
4028 .priv = PRIV_ACTION(RAW_DECAP,
4029 sizeof(struct action_raw_decap_data)),
4030 .next = NEXT(action_raw_decap),
4031 .call = parse_vc_action_raw_decap,
4033 [ACTION_RAW_DECAP_INDEX] = {
4035 .help = "the index of raw_encap_confs",
4036 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
4038 [ACTION_RAW_DECAP_INDEX_VALUE] = {
4041 .help = "unsigned integer value",
4042 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4043 .call = parse_vc_action_raw_decap_index,
4044 .comp = comp_set_raw_index,
4046 /* Top level command. */
4049 .help = "set raw encap/decap/sample data",
4050 .type = "set raw_encap|raw_decap <index> <pattern>"
4051 " or set sample_actions <index> <action>",
4052 .next = NEXT(NEXT_ENTRY
4055 SET_SAMPLE_ACTIONS)),
4056 .call = parse_set_init,
4058 /* Sub-level commands. */
4060 .name = "raw_encap",
4061 .help = "set raw encap data",
4062 .next = NEXT(next_set_raw),
4063 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4064 (offsetof(struct buffer, port),
4065 sizeof(((struct buffer *)0)->port),
4066 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
4067 .call = parse_set_raw_encap_decap,
4070 .name = "raw_decap",
4071 .help = "set raw decap data",
4072 .next = NEXT(next_set_raw),
4073 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4074 (offsetof(struct buffer, port),
4075 sizeof(((struct buffer *)0)->port),
4076 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
4077 .call = parse_set_raw_encap_decap,
4082 .help = "index of raw_encap/raw_decap data",
4083 .next = NEXT(next_item),
4086 [SET_SAMPLE_INDEX] = {
4089 .help = "index of sample actions",
4090 .next = NEXT(next_action_sample),
4093 [SET_SAMPLE_ACTIONS] = {
4094 .name = "sample_actions",
4095 .help = "set sample actions list",
4096 .next = NEXT(NEXT_ENTRY(SET_SAMPLE_INDEX)),
4097 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4098 (offsetof(struct buffer, port),
4099 sizeof(((struct buffer *)0)->port),
4100 0, RAW_SAMPLE_CONFS_MAX_NUM - 1)),
4101 .call = parse_set_sample_action,
4103 [ACTION_SET_TAG] = {
4106 .priv = PRIV_ACTION(SET_TAG,
4107 sizeof(struct rte_flow_action_set_tag)),
4108 .next = NEXT(action_set_tag),
4111 [ACTION_SET_TAG_INDEX] = {
4113 .help = "index of tag array",
4114 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4115 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
4116 .call = parse_vc_conf,
4118 [ACTION_SET_TAG_DATA] = {
4120 .help = "tag value",
4121 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4122 .args = ARGS(ARGS_ENTRY
4123 (struct rte_flow_action_set_tag, data)),
4124 .call = parse_vc_conf,
4126 [ACTION_SET_TAG_MASK] = {
4128 .help = "mask for tag value",
4129 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4130 .args = ARGS(ARGS_ENTRY
4131 (struct rte_flow_action_set_tag, mask)),
4132 .call = parse_vc_conf,
4134 [ACTION_SET_META] = {
4136 .help = "set metadata",
4137 .priv = PRIV_ACTION(SET_META,
4138 sizeof(struct rte_flow_action_set_meta)),
4139 .next = NEXT(action_set_meta),
4140 .call = parse_vc_action_set_meta,
4142 [ACTION_SET_META_DATA] = {
4144 .help = "metadata value",
4145 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
4146 .args = ARGS(ARGS_ENTRY
4147 (struct rte_flow_action_set_meta, data)),
4148 .call = parse_vc_conf,
4150 [ACTION_SET_META_MASK] = {
4152 .help = "mask for metadata value",
4153 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
4154 .args = ARGS(ARGS_ENTRY
4155 (struct rte_flow_action_set_meta, mask)),
4156 .call = parse_vc_conf,
4158 [ACTION_SET_IPV4_DSCP] = {
4159 .name = "set_ipv4_dscp",
4160 .help = "set DSCP value",
4161 .priv = PRIV_ACTION(SET_IPV4_DSCP,
4162 sizeof(struct rte_flow_action_set_dscp)),
4163 .next = NEXT(action_set_ipv4_dscp),
4166 [ACTION_SET_IPV4_DSCP_VALUE] = {
4167 .name = "dscp_value",
4168 .help = "new IPv4 DSCP value to set",
4169 .next = NEXT(action_set_ipv4_dscp, NEXT_ENTRY(UNSIGNED)),
4170 .args = ARGS(ARGS_ENTRY
4171 (struct rte_flow_action_set_dscp, dscp)),
4172 .call = parse_vc_conf,
4174 [ACTION_SET_IPV6_DSCP] = {
4175 .name = "set_ipv6_dscp",
4176 .help = "set DSCP value",
4177 .priv = PRIV_ACTION(SET_IPV6_DSCP,
4178 sizeof(struct rte_flow_action_set_dscp)),
4179 .next = NEXT(action_set_ipv6_dscp),
4182 [ACTION_SET_IPV6_DSCP_VALUE] = {
4183 .name = "dscp_value",
4184 .help = "new IPv6 DSCP value to set",
4185 .next = NEXT(action_set_ipv6_dscp, NEXT_ENTRY(UNSIGNED)),
4186 .args = ARGS(ARGS_ENTRY
4187 (struct rte_flow_action_set_dscp, dscp)),
4188 .call = parse_vc_conf,
4192 .help = "set a specific metadata header",
4193 .next = NEXT(action_age),
4194 .priv = PRIV_ACTION(AGE,
4195 sizeof(struct rte_flow_action_age)),
4198 [ACTION_AGE_TIMEOUT] = {
4200 .help = "flow age timeout value",
4201 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_age,
4203 .next = NEXT(action_age, NEXT_ENTRY(UNSIGNED)),
4204 .call = parse_vc_conf,
4208 .help = "set a sample action",
4209 .next = NEXT(action_sample),
4210 .priv = PRIV_ACTION(SAMPLE,
4211 sizeof(struct action_sample_data)),
4212 .call = parse_vc_action_sample,
4214 [ACTION_SAMPLE_RATIO] = {
4216 .help = "flow sample ratio value",
4217 .next = NEXT(action_sample, NEXT_ENTRY(UNSIGNED)),
4218 .args = ARGS(ARGS_ENTRY_ARB
4219 (offsetof(struct action_sample_data, conf) +
4220 offsetof(struct rte_flow_action_sample, ratio),
4221 sizeof(((struct rte_flow_action_sample *)0)->
4224 [ACTION_SAMPLE_INDEX] = {
4226 .help = "the index of sample actions list",
4227 .next = NEXT(NEXT_ENTRY(ACTION_SAMPLE_INDEX_VALUE)),
4229 [ACTION_SAMPLE_INDEX_VALUE] = {
4232 .help = "unsigned integer value",
4233 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4234 .call = parse_vc_action_sample_index,
4235 .comp = comp_set_sample_index,
4237 /* Shared action destroy arguments. */
4238 [SHARED_ACTION_DESTROY_ID] = {
4239 .name = "action_id",
4240 .help = "specify a shared action id to destroy",
4241 .next = NEXT(next_sa_destroy_attr,
4242 NEXT_ENTRY(SHARED_ACTION_ID)),
4243 .args = ARGS(ARGS_ENTRY_PTR(struct buffer,
4244 args.sa_destroy.action_id)),
4245 .call = parse_sa_destroy,
4247 /* Shared action create arguments. */
4248 [SHARED_ACTION_CREATE_ID] = {
4249 .name = "action_id",
4250 .help = "specify a shared action id to create",
4251 .next = NEXT(next_sa_create_attr,
4252 NEXT_ENTRY(SHARED_ACTION_ID)),
4253 .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
4257 .help = "apply shared action by id",
4258 .priv = PRIV_ACTION(SHARED, 0),
4259 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_ID2PTR)),
4260 .args = ARGS(ARGS_ENTRY_ARB(0, sizeof(uint32_t))),
4263 [SHARED_ACTION_ID2PTR] = {
4264 .name = "{action_id}",
4265 .type = "SHARED_ACTION_ID",
4266 .help = "shared action id",
4267 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4268 .call = parse_sa_id2ptr,
4271 [SHARED_ACTION_INGRESS] = {
4273 .help = "affect rule to ingress",
4274 .next = NEXT(next_sa_create_attr),
4277 [SHARED_ACTION_EGRESS] = {
4279 .help = "affect rule to egress",
4280 .next = NEXT(next_sa_create_attr),
4283 [SHARED_ACTION_SPEC] = {
4285 .help = "specify action to share",
4286 .next = NEXT(next_action),
4290 /** Remove and return last entry from argument stack. */
4291 static const struct arg *
4292 pop_args(struct context *ctx)
4294 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
4297 /** Add entry on top of the argument stack. */
4299 push_args(struct context *ctx, const struct arg *arg)
4301 if (ctx->args_num == CTX_STACK_SIZE)
4303 ctx->args[ctx->args_num++] = arg;
4307 /** Spread value into buffer according to bit-mask. */
4309 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
4311 uint32_t i = arg->size;
4319 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4328 unsigned int shift = 0;
4329 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
4331 for (shift = 0; arg->mask[i] >> shift; ++shift) {
4332 if (!(arg->mask[i] & (1 << shift)))
4337 *buf &= ~(1 << shift);
4338 *buf |= (val & 1) << shift;
4346 /** Compare a string with a partial one of a given length. */
4348 strcmp_partial(const char *full, const char *partial, size_t partial_len)
4350 int r = strncmp(full, partial, partial_len);
4354 if (strlen(full) <= partial_len)
4356 return full[partial_len];
4360 * Parse a prefix length and generate a bit-mask.
4362 * Last argument (ctx->args) is retrieved to determine mask size, storage
4363 * location and whether the result must use network byte ordering.
4366 parse_prefix(struct context *ctx, const struct token *token,
4367 const char *str, unsigned int len,
4368 void *buf, unsigned int size)
4370 const struct arg *arg = pop_args(ctx);
4371 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
4378 /* Argument is expected. */
4382 u = strtoumax(str, &end, 0);
4383 if (errno || (size_t)(end - str) != len)
4388 extra = arg_entry_bf_fill(NULL, 0, arg);
4397 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
4398 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4405 if (bytes > size || bytes + !!extra > size)
4409 buf = (uint8_t *)ctx->object + arg->offset;
4410 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4412 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
4413 memset(buf, 0x00, size - bytes);
4415 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
4419 memset(buf, 0xff, bytes);
4420 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
4422 ((uint8_t *)buf)[bytes] = conv[extra];
4425 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4428 push_args(ctx, arg);
4432 /** Default parsing function for token name matching. */
4434 parse_default(struct context *ctx, const struct token *token,
4435 const char *str, unsigned int len,
4436 void *buf, unsigned int size)
4441 if (strcmp_partial(token->name, str, len))
4446 /** Parse flow command, initialize output buffer for subsequent tokens. */
4448 parse_init(struct context *ctx, const struct token *token,
4449 const char *str, unsigned int len,
4450 void *buf, unsigned int size)
4452 struct buffer *out = buf;
4454 /* Token name must match. */
4455 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4457 /* Nothing else to do if there is no buffer. */
4460 /* Make sure buffer is large enough. */
4461 if (size < sizeof(*out))
4463 /* Initialize buffer. */
4464 memset(out, 0x00, sizeof(*out));
4465 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
4468 ctx->objmask = NULL;
4472 /** Parse tokens for shared action commands. */
4474 parse_sa(struct context *ctx, const struct token *token,
4475 const char *str, unsigned int len,
4476 void *buf, unsigned int size)
4478 struct buffer *out = buf;
4480 /* Token name must match. */
4481 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4483 /* Nothing else to do if there is no buffer. */
4486 if (!out->command) {
4487 if (ctx->curr != SHARED_ACTION)
4489 if (sizeof(*out) > size)
4491 out->command = ctx->curr;
4494 ctx->objmask = NULL;
4495 out->args.vc.data = (uint8_t *)out + size;
4498 switch (ctx->curr) {
4499 case SHARED_ACTION_CREATE:
4500 case SHARED_ACTION_UPDATE:
4501 out->args.vc.actions =
4502 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4504 out->args.vc.attr.group = UINT32_MAX;
4506 case SHARED_ACTION_QUERY:
4507 out->command = ctx->curr;
4510 ctx->objmask = NULL;
4512 case SHARED_ACTION_EGRESS:
4513 out->args.vc.attr.egress = 1;
4515 case SHARED_ACTION_INGRESS:
4516 out->args.vc.attr.ingress = 1;
4524 /** Parse tokens for shared action destroy command. */
4526 parse_sa_destroy(struct context *ctx, const struct token *token,
4527 const char *str, unsigned int len,
4528 void *buf, unsigned int size)
4530 struct buffer *out = buf;
4531 uint32_t *action_id;
4533 /* Token name must match. */
4534 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4536 /* Nothing else to do if there is no buffer. */
4539 if (!out->command || out->command == SHARED_ACTION) {
4540 if (ctx->curr != SHARED_ACTION_DESTROY)
4542 if (sizeof(*out) > size)
4544 out->command = ctx->curr;
4547 ctx->objmask = NULL;
4548 out->args.sa_destroy.action_id =
4549 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4553 action_id = out->args.sa_destroy.action_id
4554 + out->args.sa_destroy.action_id_n++;
4555 if ((uint8_t *)action_id > (uint8_t *)out + size)
4558 ctx->object = action_id;
4559 ctx->objmask = NULL;
4563 /** Parse tokens for validate/create commands. */
4565 parse_vc(struct context *ctx, const struct token *token,
4566 const char *str, unsigned int len,
4567 void *buf, unsigned int size)
4569 struct buffer *out = buf;
4573 /* Token name must match. */
4574 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4576 /* Nothing else to do if there is no buffer. */
4579 if (!out->command) {
4580 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
4582 if (sizeof(*out) > size)
4584 out->command = ctx->curr;
4587 ctx->objmask = NULL;
4588 out->args.vc.data = (uint8_t *)out + size;
4592 switch (ctx->curr) {
4594 ctx->object = &out->args.vc.attr;
4598 ctx->object = &out->args.vc.tunnel_ops;
4601 ctx->objmask = NULL;
4602 switch (ctx->curr) {
4607 out->args.vc.tunnel_ops.enabled = 1;
4608 out->args.vc.tunnel_ops.actions = 1;
4611 out->args.vc.tunnel_ops.enabled = 1;
4612 out->args.vc.tunnel_ops.items = 1;
4615 out->args.vc.attr.ingress = 1;
4618 out->args.vc.attr.egress = 1;
4621 out->args.vc.attr.transfer = 1;
4624 out->args.vc.pattern =
4625 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4627 ctx->object = out->args.vc.pattern;
4628 ctx->objmask = NULL;
4631 out->args.vc.actions =
4632 (void *)RTE_ALIGN_CEIL((uintptr_t)
4633 (out->args.vc.pattern +
4634 out->args.vc.pattern_n),
4636 ctx->object = out->args.vc.actions;
4637 ctx->objmask = NULL;
4644 if (!out->args.vc.actions) {
4645 const struct parse_item_priv *priv = token->priv;
4646 struct rte_flow_item *item =
4647 out->args.vc.pattern + out->args.vc.pattern_n;
4649 data_size = priv->size * 3; /* spec, last, mask */
4650 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
4651 (out->args.vc.data - data_size),
4653 if ((uint8_t *)item + sizeof(*item) > data)
4655 *item = (struct rte_flow_item){
4658 ++out->args.vc.pattern_n;
4660 ctx->objmask = NULL;
4662 const struct parse_action_priv *priv = token->priv;
4663 struct rte_flow_action *action =
4664 out->args.vc.actions + out->args.vc.actions_n;
4666 data_size = priv->size; /* configuration */
4667 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
4668 (out->args.vc.data - data_size),
4670 if ((uint8_t *)action + sizeof(*action) > data)
4672 *action = (struct rte_flow_action){
4674 .conf = data_size ? data : NULL,
4676 ++out->args.vc.actions_n;
4677 ctx->object = action;
4678 ctx->objmask = NULL;
4680 memset(data, 0, data_size);
4681 out->args.vc.data = data;
4682 ctx->objdata = data_size;
4686 /** Parse pattern item parameter type. */
4688 parse_vc_spec(struct context *ctx, const struct token *token,
4689 const char *str, unsigned int len,
4690 void *buf, unsigned int size)
4692 struct buffer *out = buf;
4693 struct rte_flow_item *item;
4699 /* Token name must match. */
4700 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4702 /* Parse parameter types. */
4703 switch (ctx->curr) {
4704 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
4710 case ITEM_PARAM_SPEC:
4713 case ITEM_PARAM_LAST:
4716 case ITEM_PARAM_PREFIX:
4717 /* Modify next token to expect a prefix. */
4718 if (ctx->next_num < 2)
4720 ctx->next[ctx->next_num - 2] = prefix;
4722 case ITEM_PARAM_MASK:
4728 /* Nothing else to do if there is no buffer. */
4731 if (!out->args.vc.pattern_n)
4733 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4734 data_size = ctx->objdata / 3; /* spec, last, mask */
4735 /* Point to selected object. */
4736 ctx->object = out->args.vc.data + (data_size * index);
4738 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
4739 item->mask = ctx->objmask;
4741 ctx->objmask = NULL;
4742 /* Update relevant item pointer. */
4743 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
4748 /** Parse action configuration field. */
4750 parse_vc_conf(struct context *ctx, const struct token *token,
4751 const char *str, unsigned int len,
4752 void *buf, unsigned int size)
4754 struct buffer *out = buf;
4757 /* Token name must match. */
4758 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4760 /* Nothing else to do if there is no buffer. */
4763 /* Point to selected object. */
4764 ctx->object = out->args.vc.data;
4765 ctx->objmask = NULL;
4769 /** Parse eCPRI common header type field. */
4771 parse_vc_item_ecpri_type(struct context *ctx, const struct token *token,
4772 const char *str, unsigned int len,
4773 void *buf, unsigned int size)
4775 struct rte_flow_item_ecpri *ecpri;
4776 struct rte_flow_item_ecpri *ecpri_mask;
4777 struct rte_flow_item *item;
4780 struct buffer *out = buf;
4781 const struct arg *arg;
4784 /* Token name must match. */
4785 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4787 switch (ctx->curr) {
4788 case ITEM_ECPRI_COMMON_TYPE_IQ_DATA:
4789 msg_type = RTE_ECPRI_MSG_TYPE_IQ_DATA;
4791 case ITEM_ECPRI_COMMON_TYPE_RTC_CTRL:
4792 msg_type = RTE_ECPRI_MSG_TYPE_RTC_CTRL;
4794 case ITEM_ECPRI_COMMON_TYPE_DLY_MSR:
4795 msg_type = RTE_ECPRI_MSG_TYPE_DLY_MSR;
4802 arg = pop_args(ctx);
4805 ecpri = (struct rte_flow_item_ecpri *)out->args.vc.data;
4806 ecpri->hdr.common.type = msg_type;
4807 data_size = ctx->objdata / 3; /* spec, last, mask */
4808 ecpri_mask = (struct rte_flow_item_ecpri *)(out->args.vc.data +
4810 ecpri_mask->hdr.common.type = 0xFF;
4812 ecpri->hdr.common.u32 = rte_cpu_to_be_32(ecpri->hdr.common.u32);
4813 ecpri_mask->hdr.common.u32 =
4814 rte_cpu_to_be_32(ecpri_mask->hdr.common.u32);
4816 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4818 item->mask = ecpri_mask;
4822 /** Parse RSS action. */
4824 parse_vc_action_rss(struct context *ctx, const struct token *token,
4825 const char *str, unsigned int len,
4826 void *buf, unsigned int size)
4828 struct buffer *out = buf;
4829 struct rte_flow_action *action;
4830 struct action_rss_data *action_rss_data;
4834 ret = parse_vc(ctx, token, str, len, buf, size);
4837 /* Nothing else to do if there is no buffer. */
4840 if (!out->args.vc.actions_n)
4842 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4843 /* Point to selected object. */
4844 ctx->object = out->args.vc.data;
4845 ctx->objmask = NULL;
4846 /* Set up default configuration. */
4847 action_rss_data = ctx->object;
4848 *action_rss_data = (struct action_rss_data){
4849 .conf = (struct rte_flow_action_rss){
4850 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4853 .key_len = sizeof(action_rss_data->key),
4854 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
4855 .key = action_rss_data->key,
4856 .queue = action_rss_data->queue,
4858 .key = "testpmd's default RSS hash key, "
4859 "override it for better balancing",
4862 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
4863 action_rss_data->queue[i] = i;
4864 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
4865 ctx->port != (portid_t)RTE_PORT_ALL) {
4866 struct rte_eth_dev_info info;
4869 ret2 = rte_eth_dev_info_get(ctx->port, &info);
4873 action_rss_data->conf.key_len =
4874 RTE_MIN(sizeof(action_rss_data->key),
4875 info.hash_key_size);
4877 action->conf = &action_rss_data->conf;
4882 * Parse func field for RSS action.
4884 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
4885 * ACTION_RSS_FUNC_* index that called this function.
4888 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
4889 const char *str, unsigned int len,
4890 void *buf, unsigned int size)
4892 struct action_rss_data *action_rss_data;
4893 enum rte_eth_hash_function func;
4897 /* Token name must match. */
4898 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4900 switch (ctx->curr) {
4901 case ACTION_RSS_FUNC_DEFAULT:
4902 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
4904 case ACTION_RSS_FUNC_TOEPLITZ:
4905 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
4907 case ACTION_RSS_FUNC_SIMPLE_XOR:
4908 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
4910 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
4911 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
4918 action_rss_data = ctx->object;
4919 action_rss_data->conf.func = func;
4924 * Parse type field for RSS action.
4926 * Valid tokens are type field names and the "end" token.
4929 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
4930 const char *str, unsigned int len,
4931 void *buf, unsigned int size)
4933 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
4934 struct action_rss_data *action_rss_data;
4940 if (ctx->curr != ACTION_RSS_TYPE)
4942 if (!(ctx->objdata >> 16) && ctx->object) {
4943 action_rss_data = ctx->object;
4944 action_rss_data->conf.types = 0;
4946 if (!strcmp_partial("end", str, len)) {
4947 ctx->objdata &= 0xffff;
4950 for (i = 0; rss_type_table[i].str; ++i)
4951 if (!strcmp_partial(rss_type_table[i].str, str, len))
4953 if (!rss_type_table[i].str)
4955 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
4957 if (ctx->next_num == RTE_DIM(ctx->next))
4959 ctx->next[ctx->next_num++] = next;
4962 action_rss_data = ctx->object;
4963 action_rss_data->conf.types |= rss_type_table[i].rss_type;
4968 * Parse queue field for RSS action.
4970 * Valid tokens are queue indices and the "end" token.
4973 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
4974 const char *str, unsigned int len,
4975 void *buf, unsigned int size)
4977 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
4978 struct action_rss_data *action_rss_data;
4979 const struct arg *arg;
4986 if (ctx->curr != ACTION_RSS_QUEUE)
4988 i = ctx->objdata >> 16;
4989 if (!strcmp_partial("end", str, len)) {
4990 ctx->objdata &= 0xffff;
4993 if (i >= ACTION_RSS_QUEUE_NUM)
4995 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4996 i * sizeof(action_rss_data->queue[i]),
4997 sizeof(action_rss_data->queue[i]));
4998 if (push_args(ctx, arg))
5000 ret = parse_int(ctx, token, str, len, NULL, 0);
5006 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
5008 if (ctx->next_num == RTE_DIM(ctx->next))
5010 ctx->next[ctx->next_num++] = next;
5014 action_rss_data = ctx->object;
5015 action_rss_data->conf.queue_num = i;
5016 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
5020 /** Parse VXLAN encap action. */
5022 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
5023 const char *str, unsigned int len,
5024 void *buf, unsigned int size)
5026 struct buffer *out = buf;
5027 struct rte_flow_action *action;
5028 struct action_vxlan_encap_data *action_vxlan_encap_data;
5031 ret = parse_vc(ctx, token, str, len, buf, size);
5034 /* Nothing else to do if there is no buffer. */
5037 if (!out->args.vc.actions_n)
5039 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5040 /* Point to selected object. */
5041 ctx->object = out->args.vc.data;
5042 ctx->objmask = NULL;
5043 /* Set up default configuration. */
5044 action_vxlan_encap_data = ctx->object;
5045 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
5046 .conf = (struct rte_flow_action_vxlan_encap){
5047 .definition = action_vxlan_encap_data->items,
5051 .type = RTE_FLOW_ITEM_TYPE_ETH,
5052 .spec = &action_vxlan_encap_data->item_eth,
5053 .mask = &rte_flow_item_eth_mask,
5056 .type = RTE_FLOW_ITEM_TYPE_VLAN,
5057 .spec = &action_vxlan_encap_data->item_vlan,
5058 .mask = &rte_flow_item_vlan_mask,
5061 .type = RTE_FLOW_ITEM_TYPE_IPV4,
5062 .spec = &action_vxlan_encap_data->item_ipv4,
5063 .mask = &rte_flow_item_ipv4_mask,
5066 .type = RTE_FLOW_ITEM_TYPE_UDP,
5067 .spec = &action_vxlan_encap_data->item_udp,
5068 .mask = &rte_flow_item_udp_mask,
5071 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
5072 .spec = &action_vxlan_encap_data->item_vxlan,
5073 .mask = &rte_flow_item_vxlan_mask,
5076 .type = RTE_FLOW_ITEM_TYPE_END,
5081 .tci = vxlan_encap_conf.vlan_tci,
5085 .src_addr = vxlan_encap_conf.ipv4_src,
5086 .dst_addr = vxlan_encap_conf.ipv4_dst,
5089 .src_port = vxlan_encap_conf.udp_src,
5090 .dst_port = vxlan_encap_conf.udp_dst,
5092 .item_vxlan.flags = 0,
5094 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
5095 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5096 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
5097 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5098 if (!vxlan_encap_conf.select_ipv4) {
5099 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
5100 &vxlan_encap_conf.ipv6_src,
5101 sizeof(vxlan_encap_conf.ipv6_src));
5102 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
5103 &vxlan_encap_conf.ipv6_dst,
5104 sizeof(vxlan_encap_conf.ipv6_dst));
5105 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
5106 .type = RTE_FLOW_ITEM_TYPE_IPV6,
5107 .spec = &action_vxlan_encap_data->item_ipv6,
5108 .mask = &rte_flow_item_ipv6_mask,
5111 if (!vxlan_encap_conf.select_vlan)
5112 action_vxlan_encap_data->items[1].type =
5113 RTE_FLOW_ITEM_TYPE_VOID;
5114 if (vxlan_encap_conf.select_tos_ttl) {
5115 if (vxlan_encap_conf.select_ipv4) {
5116 static struct rte_flow_item_ipv4 ipv4_mask_tos;
5118 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
5119 sizeof(ipv4_mask_tos));
5120 ipv4_mask_tos.hdr.type_of_service = 0xff;
5121 ipv4_mask_tos.hdr.time_to_live = 0xff;
5122 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
5123 vxlan_encap_conf.ip_tos;
5124 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
5125 vxlan_encap_conf.ip_ttl;
5126 action_vxlan_encap_data->items[2].mask =
5129 static struct rte_flow_item_ipv6 ipv6_mask_tos;
5131 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
5132 sizeof(ipv6_mask_tos));
5133 ipv6_mask_tos.hdr.vtc_flow |=
5134 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
5135 ipv6_mask_tos.hdr.hop_limits = 0xff;
5136 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
5138 ((uint32_t)vxlan_encap_conf.ip_tos <<
5139 RTE_IPV6_HDR_TC_SHIFT);
5140 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
5141 vxlan_encap_conf.ip_ttl;
5142 action_vxlan_encap_data->items[2].mask =
5146 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
5147 RTE_DIM(vxlan_encap_conf.vni));
5148 action->conf = &action_vxlan_encap_data->conf;
5152 /** Parse NVGRE encap action. */
5154 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
5155 const char *str, unsigned int len,
5156 void *buf, unsigned int size)
5158 struct buffer *out = buf;
5159 struct rte_flow_action *action;
5160 struct action_nvgre_encap_data *action_nvgre_encap_data;
5163 ret = parse_vc(ctx, token, str, len, buf, size);
5166 /* Nothing else to do if there is no buffer. */
5169 if (!out->args.vc.actions_n)
5171 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5172 /* Point to selected object. */
5173 ctx->object = out->args.vc.data;
5174 ctx->objmask = NULL;
5175 /* Set up default configuration. */
5176 action_nvgre_encap_data = ctx->object;
5177 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
5178 .conf = (struct rte_flow_action_nvgre_encap){
5179 .definition = action_nvgre_encap_data->items,
5183 .type = RTE_FLOW_ITEM_TYPE_ETH,
5184 .spec = &action_nvgre_encap_data->item_eth,
5185 .mask = &rte_flow_item_eth_mask,
5188 .type = RTE_FLOW_ITEM_TYPE_VLAN,
5189 .spec = &action_nvgre_encap_data->item_vlan,
5190 .mask = &rte_flow_item_vlan_mask,
5193 .type = RTE_FLOW_ITEM_TYPE_IPV4,
5194 .spec = &action_nvgre_encap_data->item_ipv4,
5195 .mask = &rte_flow_item_ipv4_mask,
5198 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
5199 .spec = &action_nvgre_encap_data->item_nvgre,
5200 .mask = &rte_flow_item_nvgre_mask,
5203 .type = RTE_FLOW_ITEM_TYPE_END,
5208 .tci = nvgre_encap_conf.vlan_tci,
5212 .src_addr = nvgre_encap_conf.ipv4_src,
5213 .dst_addr = nvgre_encap_conf.ipv4_dst,
5215 .item_nvgre.flow_id = 0,
5217 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
5218 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5219 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
5220 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5221 if (!nvgre_encap_conf.select_ipv4) {
5222 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
5223 &nvgre_encap_conf.ipv6_src,
5224 sizeof(nvgre_encap_conf.ipv6_src));
5225 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
5226 &nvgre_encap_conf.ipv6_dst,
5227 sizeof(nvgre_encap_conf.ipv6_dst));
5228 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
5229 .type = RTE_FLOW_ITEM_TYPE_IPV6,
5230 .spec = &action_nvgre_encap_data->item_ipv6,
5231 .mask = &rte_flow_item_ipv6_mask,
5234 if (!nvgre_encap_conf.select_vlan)
5235 action_nvgre_encap_data->items[1].type =
5236 RTE_FLOW_ITEM_TYPE_VOID;
5237 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
5238 RTE_DIM(nvgre_encap_conf.tni));
5239 action->conf = &action_nvgre_encap_data->conf;
5243 /** Parse l2 encap action. */
5245 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
5246 const char *str, unsigned int len,
5247 void *buf, unsigned int size)
5249 struct buffer *out = buf;
5250 struct rte_flow_action *action;
5251 struct action_raw_encap_data *action_encap_data;
5252 struct rte_flow_item_eth eth = { .type = 0, };
5253 struct rte_flow_item_vlan vlan = {
5254 .tci = mplsoudp_encap_conf.vlan_tci,
5260 ret = parse_vc(ctx, token, str, len, buf, size);
5263 /* Nothing else to do if there is no buffer. */
5266 if (!out->args.vc.actions_n)
5268 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5269 /* Point to selected object. */
5270 ctx->object = out->args.vc.data;
5271 ctx->objmask = NULL;
5272 /* Copy the headers to the buffer. */
5273 action_encap_data = ctx->object;
5274 *action_encap_data = (struct action_raw_encap_data) {
5275 .conf = (struct rte_flow_action_raw_encap){
5276 .data = action_encap_data->data,
5280 header = action_encap_data->data;
5281 if (l2_encap_conf.select_vlan)
5282 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5283 else if (l2_encap_conf.select_ipv4)
5284 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5286 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5287 memcpy(eth.dst.addr_bytes,
5288 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5289 memcpy(eth.src.addr_bytes,
5290 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5291 memcpy(header, ð, sizeof(eth));
5292 header += sizeof(eth);
5293 if (l2_encap_conf.select_vlan) {
5294 if (l2_encap_conf.select_ipv4)
5295 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5297 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5298 memcpy(header, &vlan, sizeof(vlan));
5299 header += sizeof(vlan);
5301 action_encap_data->conf.size = header -
5302 action_encap_data->data;
5303 action->conf = &action_encap_data->conf;
5307 /** Parse l2 decap action. */
5309 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
5310 const char *str, unsigned int len,
5311 void *buf, unsigned int size)
5313 struct buffer *out = buf;
5314 struct rte_flow_action *action;
5315 struct action_raw_decap_data *action_decap_data;
5316 struct rte_flow_item_eth eth = { .type = 0, };
5317 struct rte_flow_item_vlan vlan = {
5318 .tci = mplsoudp_encap_conf.vlan_tci,
5324 ret = parse_vc(ctx, token, str, len, buf, size);
5327 /* Nothing else to do if there is no buffer. */
5330 if (!out->args.vc.actions_n)
5332 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5333 /* Point to selected object. */
5334 ctx->object = out->args.vc.data;
5335 ctx->objmask = NULL;
5336 /* Copy the headers to the buffer. */
5337 action_decap_data = ctx->object;
5338 *action_decap_data = (struct action_raw_decap_data) {
5339 .conf = (struct rte_flow_action_raw_decap){
5340 .data = action_decap_data->data,
5344 header = action_decap_data->data;
5345 if (l2_decap_conf.select_vlan)
5346 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5347 memcpy(header, ð, sizeof(eth));
5348 header += sizeof(eth);
5349 if (l2_decap_conf.select_vlan) {
5350 memcpy(header, &vlan, sizeof(vlan));
5351 header += sizeof(vlan);
5353 action_decap_data->conf.size = header -
5354 action_decap_data->data;
5355 action->conf = &action_decap_data->conf;
5359 #define ETHER_TYPE_MPLS_UNICAST 0x8847
5361 /** Parse MPLSOGRE encap action. */
5363 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
5364 const char *str, unsigned int len,
5365 void *buf, unsigned int size)
5367 struct buffer *out = buf;
5368 struct rte_flow_action *action;
5369 struct action_raw_encap_data *action_encap_data;
5370 struct rte_flow_item_eth eth = { .type = 0, };
5371 struct rte_flow_item_vlan vlan = {
5372 .tci = mplsogre_encap_conf.vlan_tci,
5375 struct rte_flow_item_ipv4 ipv4 = {
5377 .src_addr = mplsogre_encap_conf.ipv4_src,
5378 .dst_addr = mplsogre_encap_conf.ipv4_dst,
5379 .next_proto_id = IPPROTO_GRE,
5380 .version_ihl = RTE_IPV4_VHL_DEF,
5381 .time_to_live = IPDEFTTL,
5384 struct rte_flow_item_ipv6 ipv6 = {
5386 .proto = IPPROTO_GRE,
5387 .hop_limits = IPDEFTTL,
5390 struct rte_flow_item_gre gre = {
5391 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
5393 struct rte_flow_item_mpls mpls = {
5399 ret = parse_vc(ctx, token, str, len, buf, size);
5402 /* Nothing else to do if there is no buffer. */
5405 if (!out->args.vc.actions_n)
5407 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5408 /* Point to selected object. */
5409 ctx->object = out->args.vc.data;
5410 ctx->objmask = NULL;
5411 /* Copy the headers to the buffer. */
5412 action_encap_data = ctx->object;
5413 *action_encap_data = (struct action_raw_encap_data) {
5414 .conf = (struct rte_flow_action_raw_encap){
5415 .data = action_encap_data->data,
5420 header = action_encap_data->data;
5421 if (mplsogre_encap_conf.select_vlan)
5422 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5423 else if (mplsogre_encap_conf.select_ipv4)
5424 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5426 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5427 memcpy(eth.dst.addr_bytes,
5428 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5429 memcpy(eth.src.addr_bytes,
5430 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5431 memcpy(header, ð, sizeof(eth));
5432 header += sizeof(eth);
5433 if (mplsogre_encap_conf.select_vlan) {
5434 if (mplsogre_encap_conf.select_ipv4)
5435 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5437 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5438 memcpy(header, &vlan, sizeof(vlan));
5439 header += sizeof(vlan);
5441 if (mplsogre_encap_conf.select_ipv4) {
5442 memcpy(header, &ipv4, sizeof(ipv4));
5443 header += sizeof(ipv4);
5445 memcpy(&ipv6.hdr.src_addr,
5446 &mplsogre_encap_conf.ipv6_src,
5447 sizeof(mplsogre_encap_conf.ipv6_src));
5448 memcpy(&ipv6.hdr.dst_addr,
5449 &mplsogre_encap_conf.ipv6_dst,
5450 sizeof(mplsogre_encap_conf.ipv6_dst));
5451 memcpy(header, &ipv6, sizeof(ipv6));
5452 header += sizeof(ipv6);
5454 memcpy(header, &gre, sizeof(gre));
5455 header += sizeof(gre);
5456 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
5457 RTE_DIM(mplsogre_encap_conf.label));
5458 mpls.label_tc_s[2] |= 0x1;
5459 memcpy(header, &mpls, sizeof(mpls));
5460 header += sizeof(mpls);
5461 action_encap_data->conf.size = header -
5462 action_encap_data->data;
5463 action->conf = &action_encap_data->conf;
5467 /** Parse MPLSOGRE decap action. */
5469 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
5470 const char *str, unsigned int len,
5471 void *buf, unsigned int size)
5473 struct buffer *out = buf;
5474 struct rte_flow_action *action;
5475 struct action_raw_decap_data *action_decap_data;
5476 struct rte_flow_item_eth eth = { .type = 0, };
5477 struct rte_flow_item_vlan vlan = {.tci = 0};
5478 struct rte_flow_item_ipv4 ipv4 = {
5480 .next_proto_id = IPPROTO_GRE,
5483 struct rte_flow_item_ipv6 ipv6 = {
5485 .proto = IPPROTO_GRE,
5488 struct rte_flow_item_gre gre = {
5489 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
5491 struct rte_flow_item_mpls mpls;
5495 ret = parse_vc(ctx, token, str, len, buf, size);
5498 /* Nothing else to do if there is no buffer. */
5501 if (!out->args.vc.actions_n)
5503 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5504 /* Point to selected object. */
5505 ctx->object = out->args.vc.data;
5506 ctx->objmask = NULL;
5507 /* Copy the headers to the buffer. */
5508 action_decap_data = ctx->object;
5509 *action_decap_data = (struct action_raw_decap_data) {
5510 .conf = (struct rte_flow_action_raw_decap){
5511 .data = action_decap_data->data,
5515 header = action_decap_data->data;
5516 if (mplsogre_decap_conf.select_vlan)
5517 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5518 else if (mplsogre_encap_conf.select_ipv4)
5519 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5521 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5522 memcpy(eth.dst.addr_bytes,
5523 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5524 memcpy(eth.src.addr_bytes,
5525 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5526 memcpy(header, ð, sizeof(eth));
5527 header += sizeof(eth);
5528 if (mplsogre_encap_conf.select_vlan) {
5529 if (mplsogre_encap_conf.select_ipv4)
5530 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5532 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5533 memcpy(header, &vlan, sizeof(vlan));
5534 header += sizeof(vlan);
5536 if (mplsogre_encap_conf.select_ipv4) {
5537 memcpy(header, &ipv4, sizeof(ipv4));
5538 header += sizeof(ipv4);
5540 memcpy(header, &ipv6, sizeof(ipv6));
5541 header += sizeof(ipv6);
5543 memcpy(header, &gre, sizeof(gre));
5544 header += sizeof(gre);
5545 memset(&mpls, 0, sizeof(mpls));
5546 memcpy(header, &mpls, sizeof(mpls));
5547 header += sizeof(mpls);
5548 action_decap_data->conf.size = header -
5549 action_decap_data->data;
5550 action->conf = &action_decap_data->conf;
5554 /** Parse MPLSOUDP encap action. */
5556 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
5557 const char *str, unsigned int len,
5558 void *buf, unsigned int size)
5560 struct buffer *out = buf;
5561 struct rte_flow_action *action;
5562 struct action_raw_encap_data *action_encap_data;
5563 struct rte_flow_item_eth eth = { .type = 0, };
5564 struct rte_flow_item_vlan vlan = {
5565 .tci = mplsoudp_encap_conf.vlan_tci,
5568 struct rte_flow_item_ipv4 ipv4 = {
5570 .src_addr = mplsoudp_encap_conf.ipv4_src,
5571 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
5572 .next_proto_id = IPPROTO_UDP,
5573 .version_ihl = RTE_IPV4_VHL_DEF,
5574 .time_to_live = IPDEFTTL,
5577 struct rte_flow_item_ipv6 ipv6 = {
5579 .proto = IPPROTO_UDP,
5580 .hop_limits = IPDEFTTL,
5583 struct rte_flow_item_udp udp = {
5585 .src_port = mplsoudp_encap_conf.udp_src,
5586 .dst_port = mplsoudp_encap_conf.udp_dst,
5589 struct rte_flow_item_mpls mpls;
5593 ret = parse_vc(ctx, token, str, len, buf, size);
5596 /* Nothing else to do if there is no buffer. */
5599 if (!out->args.vc.actions_n)
5601 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5602 /* Point to selected object. */
5603 ctx->object = out->args.vc.data;
5604 ctx->objmask = NULL;
5605 /* Copy the headers to the buffer. */
5606 action_encap_data = ctx->object;
5607 *action_encap_data = (struct action_raw_encap_data) {
5608 .conf = (struct rte_flow_action_raw_encap){
5609 .data = action_encap_data->data,
5614 header = action_encap_data->data;
5615 if (mplsoudp_encap_conf.select_vlan)
5616 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5617 else if (mplsoudp_encap_conf.select_ipv4)
5618 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5620 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5621 memcpy(eth.dst.addr_bytes,
5622 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5623 memcpy(eth.src.addr_bytes,
5624 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5625 memcpy(header, ð, sizeof(eth));
5626 header += sizeof(eth);
5627 if (mplsoudp_encap_conf.select_vlan) {
5628 if (mplsoudp_encap_conf.select_ipv4)
5629 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5631 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5632 memcpy(header, &vlan, sizeof(vlan));
5633 header += sizeof(vlan);
5635 if (mplsoudp_encap_conf.select_ipv4) {
5636 memcpy(header, &ipv4, sizeof(ipv4));
5637 header += sizeof(ipv4);
5639 memcpy(&ipv6.hdr.src_addr,
5640 &mplsoudp_encap_conf.ipv6_src,
5641 sizeof(mplsoudp_encap_conf.ipv6_src));
5642 memcpy(&ipv6.hdr.dst_addr,
5643 &mplsoudp_encap_conf.ipv6_dst,
5644 sizeof(mplsoudp_encap_conf.ipv6_dst));
5645 memcpy(header, &ipv6, sizeof(ipv6));
5646 header += sizeof(ipv6);
5648 memcpy(header, &udp, sizeof(udp));
5649 header += sizeof(udp);
5650 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
5651 RTE_DIM(mplsoudp_encap_conf.label));
5652 mpls.label_tc_s[2] |= 0x1;
5653 memcpy(header, &mpls, sizeof(mpls));
5654 header += sizeof(mpls);
5655 action_encap_data->conf.size = header -
5656 action_encap_data->data;
5657 action->conf = &action_encap_data->conf;
5661 /** Parse MPLSOUDP decap action. */
5663 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
5664 const char *str, unsigned int len,
5665 void *buf, unsigned int size)
5667 struct buffer *out = buf;
5668 struct rte_flow_action *action;
5669 struct action_raw_decap_data *action_decap_data;
5670 struct rte_flow_item_eth eth = { .type = 0, };
5671 struct rte_flow_item_vlan vlan = {.tci = 0};
5672 struct rte_flow_item_ipv4 ipv4 = {
5674 .next_proto_id = IPPROTO_UDP,
5677 struct rte_flow_item_ipv6 ipv6 = {
5679 .proto = IPPROTO_UDP,
5682 struct rte_flow_item_udp udp = {
5684 .dst_port = rte_cpu_to_be_16(6635),
5687 struct rte_flow_item_mpls mpls;
5691 ret = parse_vc(ctx, token, str, len, buf, size);
5694 /* Nothing else to do if there is no buffer. */
5697 if (!out->args.vc.actions_n)
5699 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5700 /* Point to selected object. */
5701 ctx->object = out->args.vc.data;
5702 ctx->objmask = NULL;
5703 /* Copy the headers to the buffer. */
5704 action_decap_data = ctx->object;
5705 *action_decap_data = (struct action_raw_decap_data) {
5706 .conf = (struct rte_flow_action_raw_decap){
5707 .data = action_decap_data->data,
5711 header = action_decap_data->data;
5712 if (mplsoudp_decap_conf.select_vlan)
5713 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5714 else if (mplsoudp_encap_conf.select_ipv4)
5715 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5717 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5718 memcpy(eth.dst.addr_bytes,
5719 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5720 memcpy(eth.src.addr_bytes,
5721 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5722 memcpy(header, ð, sizeof(eth));
5723 header += sizeof(eth);
5724 if (mplsoudp_encap_conf.select_vlan) {
5725 if (mplsoudp_encap_conf.select_ipv4)
5726 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5728 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5729 memcpy(header, &vlan, sizeof(vlan));
5730 header += sizeof(vlan);
5732 if (mplsoudp_encap_conf.select_ipv4) {
5733 memcpy(header, &ipv4, sizeof(ipv4));
5734 header += sizeof(ipv4);
5736 memcpy(header, &ipv6, sizeof(ipv6));
5737 header += sizeof(ipv6);
5739 memcpy(header, &udp, sizeof(udp));
5740 header += sizeof(udp);
5741 memset(&mpls, 0, sizeof(mpls));
5742 memcpy(header, &mpls, sizeof(mpls));
5743 header += sizeof(mpls);
5744 action_decap_data->conf.size = header -
5745 action_decap_data->data;
5746 action->conf = &action_decap_data->conf;
5751 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
5752 const char *str, unsigned int len, void *buf,
5755 struct action_raw_decap_data *action_raw_decap_data;
5756 struct rte_flow_action *action;
5757 const struct arg *arg;
5758 struct buffer *out = buf;
5762 RTE_SET_USED(token);
5765 arg = ARGS_ENTRY_ARB_BOUNDED
5766 (offsetof(struct action_raw_decap_data, idx),
5767 sizeof(((struct action_raw_decap_data *)0)->idx),
5768 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5769 if (push_args(ctx, arg))
5771 ret = parse_int(ctx, token, str, len, NULL, 0);
5778 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5779 action_raw_decap_data = ctx->object;
5780 idx = action_raw_decap_data->idx;
5781 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
5782 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
5783 action->conf = &action_raw_decap_data->conf;
5789 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
5790 const char *str, unsigned int len, void *buf,
5793 struct action_raw_encap_data *action_raw_encap_data;
5794 struct rte_flow_action *action;
5795 const struct arg *arg;
5796 struct buffer *out = buf;
5800 RTE_SET_USED(token);
5803 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
5805 arg = ARGS_ENTRY_ARB_BOUNDED
5806 (offsetof(struct action_raw_encap_data, idx),
5807 sizeof(((struct action_raw_encap_data *)0)->idx),
5808 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5809 if (push_args(ctx, arg))
5811 ret = parse_int(ctx, token, str, len, NULL, 0);
5818 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5819 action_raw_encap_data = ctx->object;
5820 idx = action_raw_encap_data->idx;
5821 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
5822 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
5823 action_raw_encap_data->conf.preserve = NULL;
5824 action->conf = &action_raw_encap_data->conf;
5829 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
5830 const char *str, unsigned int len, void *buf,
5833 struct buffer *out = buf;
5834 struct rte_flow_action *action;
5835 struct action_raw_encap_data *action_raw_encap_data = NULL;
5838 ret = parse_vc(ctx, token, str, len, buf, size);
5841 /* Nothing else to do if there is no buffer. */
5844 if (!out->args.vc.actions_n)
5846 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5847 /* Point to selected object. */
5848 ctx->object = out->args.vc.data;
5849 ctx->objmask = NULL;
5850 /* Copy the headers to the buffer. */
5851 action_raw_encap_data = ctx->object;
5852 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
5853 action_raw_encap_data->conf.preserve = NULL;
5854 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
5855 action->conf = &action_raw_encap_data->conf;
5860 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
5861 const char *str, unsigned int len, void *buf,
5864 struct buffer *out = buf;
5865 struct rte_flow_action *action;
5866 struct action_raw_decap_data *action_raw_decap_data = NULL;
5869 ret = parse_vc(ctx, token, str, len, buf, size);
5872 /* Nothing else to do if there is no buffer. */
5875 if (!out->args.vc.actions_n)
5877 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5878 /* Point to selected object. */
5879 ctx->object = out->args.vc.data;
5880 ctx->objmask = NULL;
5881 /* Copy the headers to the buffer. */
5882 action_raw_decap_data = ctx->object;
5883 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
5884 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
5885 action->conf = &action_raw_decap_data->conf;
5890 parse_vc_action_set_meta(struct context *ctx, const struct token *token,
5891 const char *str, unsigned int len, void *buf,
5896 ret = parse_vc(ctx, token, str, len, buf, size);
5899 ret = rte_flow_dynf_metadata_register();
5906 parse_vc_action_sample(struct context *ctx, const struct token *token,
5907 const char *str, unsigned int len, void *buf,
5910 struct buffer *out = buf;
5911 struct rte_flow_action *action;
5912 struct action_sample_data *action_sample_data = NULL;
5913 static struct rte_flow_action end_action = {
5914 RTE_FLOW_ACTION_TYPE_END, 0
5918 ret = parse_vc(ctx, token, str, len, buf, size);
5921 /* Nothing else to do if there is no buffer. */
5924 if (!out->args.vc.actions_n)
5926 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5927 /* Point to selected object. */
5928 ctx->object = out->args.vc.data;
5929 ctx->objmask = NULL;
5930 /* Copy the headers to the buffer. */
5931 action_sample_data = ctx->object;
5932 action_sample_data->conf.actions = &end_action;
5933 action->conf = &action_sample_data->conf;
5938 parse_vc_action_sample_index(struct context *ctx, const struct token *token,
5939 const char *str, unsigned int len, void *buf,
5942 struct action_sample_data *action_sample_data;
5943 struct rte_flow_action *action;
5944 const struct arg *arg;
5945 struct buffer *out = buf;
5949 RTE_SET_USED(token);
5952 if (ctx->curr != ACTION_SAMPLE_INDEX_VALUE)
5954 arg = ARGS_ENTRY_ARB_BOUNDED
5955 (offsetof(struct action_sample_data, idx),
5956 sizeof(((struct action_sample_data *)0)->idx),
5957 0, RAW_SAMPLE_CONFS_MAX_NUM - 1);
5958 if (push_args(ctx, arg))
5960 ret = parse_int(ctx, token, str, len, NULL, 0);
5967 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5968 action_sample_data = ctx->object;
5969 idx = action_sample_data->idx;
5970 action_sample_data->conf.actions = raw_sample_confs[idx].data;
5971 action->conf = &action_sample_data->conf;
5975 /** Parse tokens for destroy command. */
5977 parse_destroy(struct context *ctx, const struct token *token,
5978 const char *str, unsigned int len,
5979 void *buf, unsigned int size)
5981 struct buffer *out = buf;
5983 /* Token name must match. */
5984 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5986 /* Nothing else to do if there is no buffer. */
5989 if (!out->command) {
5990 if (ctx->curr != DESTROY)
5992 if (sizeof(*out) > size)
5994 out->command = ctx->curr;
5997 ctx->objmask = NULL;
5998 out->args.destroy.rule =
5999 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6003 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
6004 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
6007 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
6008 ctx->objmask = NULL;
6012 /** Parse tokens for flush command. */
6014 parse_flush(struct context *ctx, const struct token *token,
6015 const char *str, unsigned int len,
6016 void *buf, unsigned int size)
6018 struct buffer *out = buf;
6020 /* Token name must match. */
6021 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6023 /* Nothing else to do if there is no buffer. */
6026 if (!out->command) {
6027 if (ctx->curr != FLUSH)
6029 if (sizeof(*out) > size)
6031 out->command = ctx->curr;
6034 ctx->objmask = NULL;
6039 /** Parse tokens for dump command. */
6041 parse_dump(struct context *ctx, const struct token *token,
6042 const char *str, unsigned int len,
6043 void *buf, unsigned int size)
6045 struct buffer *out = buf;
6047 /* Token name must match. */
6048 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6050 /* Nothing else to do if there is no buffer. */
6053 if (!out->command) {
6054 if (ctx->curr != DUMP)
6056 if (sizeof(*out) > size)
6058 out->command = ctx->curr;
6061 ctx->objmask = NULL;
6066 /** Parse tokens for query command. */
6068 parse_query(struct context *ctx, const struct token *token,
6069 const char *str, unsigned int len,
6070 void *buf, unsigned int size)
6072 struct buffer *out = buf;
6074 /* Token name must match. */
6075 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6077 /* Nothing else to do if there is no buffer. */
6080 if (!out->command) {
6081 if (ctx->curr != QUERY)
6083 if (sizeof(*out) > size)
6085 out->command = ctx->curr;
6088 ctx->objmask = NULL;
6093 /** Parse action names. */
6095 parse_action(struct context *ctx, const struct token *token,
6096 const char *str, unsigned int len,
6097 void *buf, unsigned int size)
6099 struct buffer *out = buf;
6100 const struct arg *arg = pop_args(ctx);
6104 /* Argument is expected. */
6107 /* Parse action name. */
6108 for (i = 0; next_action[i]; ++i) {
6109 const struct parse_action_priv *priv;
6111 token = &token_list[next_action[i]];
6112 if (strcmp_partial(token->name, str, len))
6118 memcpy((uint8_t *)ctx->object + arg->offset,
6124 push_args(ctx, arg);
6128 /** Parse tokens for list command. */
6130 parse_list(struct context *ctx, const struct token *token,
6131 const char *str, unsigned int len,
6132 void *buf, unsigned int size)
6134 struct buffer *out = buf;
6136 /* Token name must match. */
6137 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6139 /* Nothing else to do if there is no buffer. */
6142 if (!out->command) {
6143 if (ctx->curr != LIST)
6145 if (sizeof(*out) > size)
6147 out->command = ctx->curr;
6150 ctx->objmask = NULL;
6151 out->args.list.group =
6152 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6156 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
6157 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
6160 ctx->object = out->args.list.group + out->args.list.group_n++;
6161 ctx->objmask = NULL;
6165 /** Parse tokens for list all aged flows command. */
6167 parse_aged(struct context *ctx, const struct token *token,
6168 const char *str, unsigned int len,
6169 void *buf, unsigned int size)
6171 struct buffer *out = buf;
6173 /* Token name must match. */
6174 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6176 /* Nothing else to do if there is no buffer. */
6179 if (!out->command) {
6180 if (ctx->curr != AGED)
6182 if (sizeof(*out) > size)
6184 out->command = ctx->curr;
6187 ctx->objmask = NULL;
6189 if (ctx->curr == AGED_DESTROY)
6190 out->args.aged.destroy = 1;
6194 /** Parse tokens for isolate command. */
6196 parse_isolate(struct context *ctx, const struct token *token,
6197 const char *str, unsigned int len,
6198 void *buf, unsigned int size)
6200 struct buffer *out = buf;
6202 /* Token name must match. */
6203 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6205 /* Nothing else to do if there is no buffer. */
6208 if (!out->command) {
6209 if (ctx->curr != ISOLATE)
6211 if (sizeof(*out) > size)
6213 out->command = ctx->curr;
6216 ctx->objmask = NULL;
6222 parse_tunnel(struct context *ctx, const struct token *token,
6223 const char *str, unsigned int len,
6224 void *buf, unsigned int size)
6226 struct buffer *out = buf;
6228 /* Token name must match. */
6229 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6231 /* Nothing else to do if there is no buffer. */
6234 if (!out->command) {
6235 if (ctx->curr != TUNNEL)
6237 if (sizeof(*out) > size)
6239 out->command = ctx->curr;
6242 ctx->objmask = NULL;
6244 switch (ctx->curr) {
6248 case TUNNEL_DESTROY:
6250 out->command = ctx->curr;
6252 case TUNNEL_CREATE_TYPE:
6253 case TUNNEL_DESTROY_ID:
6254 ctx->object = &out->args.vc.tunnel_ops;
6263 * Parse signed/unsigned integers 8 to 64-bit long.
6265 * Last argument (ctx->args) is retrieved to determine integer type and
6269 parse_int(struct context *ctx, const struct token *token,
6270 const char *str, unsigned int len,
6271 void *buf, unsigned int size)
6273 const struct arg *arg = pop_args(ctx);
6278 /* Argument is expected. */
6283 (uintmax_t)strtoimax(str, &end, 0) :
6284 strtoumax(str, &end, 0);
6285 if (errno || (size_t)(end - str) != len)
6288 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
6289 (intmax_t)u > (intmax_t)arg->max)) ||
6290 (!arg->sign && (u < arg->min || u > arg->max))))
6295 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
6296 !arg_entry_bf_fill(ctx->objmask, -1, arg))
6300 buf = (uint8_t *)ctx->object + arg->offset;
6302 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
6306 case sizeof(uint8_t):
6307 *(uint8_t *)buf = u;
6309 case sizeof(uint16_t):
6310 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
6312 case sizeof(uint8_t [3]):
6313 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6315 ((uint8_t *)buf)[0] = u;
6316 ((uint8_t *)buf)[1] = u >> 8;
6317 ((uint8_t *)buf)[2] = u >> 16;
6321 ((uint8_t *)buf)[0] = u >> 16;
6322 ((uint8_t *)buf)[1] = u >> 8;
6323 ((uint8_t *)buf)[2] = u;
6325 case sizeof(uint32_t):
6326 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
6328 case sizeof(uint64_t):
6329 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
6334 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
6336 buf = (uint8_t *)ctx->objmask + arg->offset;
6341 push_args(ctx, arg);
6348 * Three arguments (ctx->args) are retrieved from the stack to store data,
6349 * its actual length and address (in that order).
6352 parse_string(struct context *ctx, const struct token *token,
6353 const char *str, unsigned int len,
6354 void *buf, unsigned int size)
6356 const struct arg *arg_data = pop_args(ctx);
6357 const struct arg *arg_len = pop_args(ctx);
6358 const struct arg *arg_addr = pop_args(ctx);
6359 char tmp[16]; /* Ought to be enough. */
6362 /* Arguments are expected. */
6366 push_args(ctx, arg_data);
6370 push_args(ctx, arg_len);
6371 push_args(ctx, arg_data);
6374 size = arg_data->size;
6375 /* Bit-mask fill is not supported. */
6376 if (arg_data->mask || size < len)
6380 /* Let parse_int() fill length information first. */
6381 ret = snprintf(tmp, sizeof(tmp), "%u", len);
6384 push_args(ctx, arg_len);
6385 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
6390 buf = (uint8_t *)ctx->object + arg_data->offset;
6391 /* Output buffer is not necessarily NUL-terminated. */
6392 memcpy(buf, str, len);
6393 memset((uint8_t *)buf + len, 0x00, size - len);
6395 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
6396 /* Save address if requested. */
6397 if (arg_addr->size) {
6398 memcpy((uint8_t *)ctx->object + arg_addr->offset,
6400 (uint8_t *)ctx->object + arg_data->offset
6404 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
6406 (uint8_t *)ctx->objmask + arg_data->offset
6412 push_args(ctx, arg_addr);
6413 push_args(ctx, arg_len);
6414 push_args(ctx, arg_data);
6419 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
6425 /* Check input parameters */
6426 if ((src == NULL) ||
6432 /* Convert chars to bytes */
6433 for (i = 0, len = 0; i < *size; i += 2) {
6434 snprintf(tmp, 3, "%s", src + i);
6435 dst[len++] = strtoul(tmp, &c, 16);
6450 parse_hex(struct context *ctx, const struct token *token,
6451 const char *str, unsigned int len,
6452 void *buf, unsigned int size)
6454 const struct arg *arg_data = pop_args(ctx);
6455 const struct arg *arg_len = pop_args(ctx);
6456 const struct arg *arg_addr = pop_args(ctx);
6457 char tmp[16]; /* Ought to be enough. */
6459 unsigned int hexlen = len;
6460 unsigned int length = 256;
6461 uint8_t hex_tmp[length];
6463 /* Arguments are expected. */
6467 push_args(ctx, arg_data);
6471 push_args(ctx, arg_len);
6472 push_args(ctx, arg_data);
6475 size = arg_data->size;
6476 /* Bit-mask fill is not supported. */
6482 /* translate bytes string to array. */
6483 if (str[0] == '0' && ((str[1] == 'x') ||
6488 if (hexlen > length)
6490 ret = parse_hex_string(str, hex_tmp, &hexlen);
6493 /* Let parse_int() fill length information first. */
6494 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
6497 push_args(ctx, arg_len);
6498 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
6503 buf = (uint8_t *)ctx->object + arg_data->offset;
6504 /* Output buffer is not necessarily NUL-terminated. */
6505 memcpy(buf, hex_tmp, hexlen);
6506 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
6508 memset((uint8_t *)ctx->objmask + arg_data->offset,
6510 /* Save address if requested. */
6511 if (arg_addr->size) {
6512 memcpy((uint8_t *)ctx->object + arg_addr->offset,
6514 (uint8_t *)ctx->object + arg_data->offset
6518 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
6520 (uint8_t *)ctx->objmask + arg_data->offset
6526 push_args(ctx, arg_addr);
6527 push_args(ctx, arg_len);
6528 push_args(ctx, arg_data);
6534 * Parse a zero-ended string.
6537 parse_string0(struct context *ctx, const struct token *token __rte_unused,
6538 const char *str, unsigned int len,
6539 void *buf, unsigned int size)
6541 const struct arg *arg_data = pop_args(ctx);
6543 /* Arguments are expected. */
6546 size = arg_data->size;
6547 /* Bit-mask fill is not supported. */
6548 if (arg_data->mask || size < len + 1)
6552 buf = (uint8_t *)ctx->object + arg_data->offset;
6553 strncpy(buf, str, len);
6555 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
6558 push_args(ctx, arg_data);
6563 * Parse a MAC address.
6565 * Last argument (ctx->args) is retrieved to determine storage size and
6569 parse_mac_addr(struct context *ctx, const struct token *token,
6570 const char *str, unsigned int len,
6571 void *buf, unsigned int size)
6573 const struct arg *arg = pop_args(ctx);
6574 struct rte_ether_addr tmp;
6578 /* Argument is expected. */
6582 /* Bit-mask fill is not supported. */
6583 if (arg->mask || size != sizeof(tmp))
6585 /* Only network endian is supported. */
6588 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
6589 if (ret < 0 || (unsigned int)ret != len)
6593 buf = (uint8_t *)ctx->object + arg->offset;
6594 memcpy(buf, &tmp, size);
6596 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6599 push_args(ctx, arg);
6604 * Parse an IPv4 address.
6606 * Last argument (ctx->args) is retrieved to determine storage size and
6610 parse_ipv4_addr(struct context *ctx, const struct token *token,
6611 const char *str, unsigned int len,
6612 void *buf, unsigned int size)
6614 const struct arg *arg = pop_args(ctx);
6619 /* Argument is expected. */
6623 /* Bit-mask fill is not supported. */
6624 if (arg->mask || size != sizeof(tmp))
6626 /* Only network endian is supported. */
6629 memcpy(str2, str, len);
6631 ret = inet_pton(AF_INET, str2, &tmp);
6633 /* Attempt integer parsing. */
6634 push_args(ctx, arg);
6635 return parse_int(ctx, token, str, len, buf, size);
6639 buf = (uint8_t *)ctx->object + arg->offset;
6640 memcpy(buf, &tmp, size);
6642 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6645 push_args(ctx, arg);
6650 * Parse an IPv6 address.
6652 * Last argument (ctx->args) is retrieved to determine storage size and
6656 parse_ipv6_addr(struct context *ctx, const struct token *token,
6657 const char *str, unsigned int len,
6658 void *buf, unsigned int size)
6660 const struct arg *arg = pop_args(ctx);
6662 struct in6_addr tmp;
6666 /* Argument is expected. */
6670 /* Bit-mask fill is not supported. */
6671 if (arg->mask || size != sizeof(tmp))
6673 /* Only network endian is supported. */
6676 memcpy(str2, str, len);
6678 ret = inet_pton(AF_INET6, str2, &tmp);
6683 buf = (uint8_t *)ctx->object + arg->offset;
6684 memcpy(buf, &tmp, size);
6686 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6689 push_args(ctx, arg);
6693 /** Boolean values (even indices stand for false). */
6694 static const char *const boolean_name[] = {
6704 * Parse a boolean value.
6706 * Last argument (ctx->args) is retrieved to determine storage size and
6710 parse_boolean(struct context *ctx, const struct token *token,
6711 const char *str, unsigned int len,
6712 void *buf, unsigned int size)
6714 const struct arg *arg = pop_args(ctx);
6718 /* Argument is expected. */
6721 for (i = 0; boolean_name[i]; ++i)
6722 if (!strcmp_partial(boolean_name[i], str, len))
6724 /* Process token as integer. */
6725 if (boolean_name[i])
6726 str = i & 1 ? "1" : "0";
6727 push_args(ctx, arg);
6728 ret = parse_int(ctx, token, str, strlen(str), buf, size);
6729 return ret > 0 ? (int)len : ret;
6732 /** Parse port and update context. */
6734 parse_port(struct context *ctx, const struct token *token,
6735 const char *str, unsigned int len,
6736 void *buf, unsigned int size)
6738 struct buffer *out = &(struct buffer){ .port = 0 };
6746 ctx->objmask = NULL;
6747 size = sizeof(*out);
6749 ret = parse_int(ctx, token, str, len, out, size);
6751 ctx->port = out->port;
6758 parse_sa_id2ptr(struct context *ctx, const struct token *token,
6759 const char *str, unsigned int len,
6760 void *buf, unsigned int size)
6762 struct rte_flow_action *action = ctx->object;
6770 ctx->objmask = NULL;
6771 ret = parse_int(ctx, token, str, len, ctx->object, sizeof(id));
6772 ctx->object = action;
6773 if (ret != (int)len)
6775 /* set shared action */
6777 action->conf = port_shared_action_get_by_id(ctx->port, id);
6778 ret = (action->conf) ? ret : -1;
6783 /** Parse set command, initialize output buffer for subsequent tokens. */
6785 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
6786 const char *str, unsigned int len,
6787 void *buf, unsigned int size)
6789 struct buffer *out = buf;
6791 /* Token name must match. */
6792 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6794 /* Nothing else to do if there is no buffer. */
6797 /* Make sure buffer is large enough. */
6798 if (size < sizeof(*out))
6801 ctx->objmask = NULL;
6805 out->command = ctx->curr;
6806 /* For encap/decap we need is pattern */
6807 out->args.vc.pattern = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6812 /** Parse set command, initialize output buffer for subsequent tokens. */
6814 parse_set_sample_action(struct context *ctx, const struct token *token,
6815 const char *str, unsigned int len,
6816 void *buf, unsigned int size)
6818 struct buffer *out = buf;
6820 /* Token name must match. */
6821 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6823 /* Nothing else to do if there is no buffer. */
6826 /* Make sure buffer is large enough. */
6827 if (size < sizeof(*out))
6830 ctx->objmask = NULL;
6834 out->command = ctx->curr;
6835 /* For sampler we need is actions */
6836 out->args.vc.actions = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6842 * Parse set raw_encap/raw_decap command,
6843 * initialize output buffer for subsequent tokens.
6846 parse_set_init(struct context *ctx, const struct token *token,
6847 const char *str, unsigned int len,
6848 void *buf, unsigned int size)
6850 struct buffer *out = buf;
6852 /* Token name must match. */
6853 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6855 /* Nothing else to do if there is no buffer. */
6858 /* Make sure buffer is large enough. */
6859 if (size < sizeof(*out))
6861 /* Initialize buffer. */
6862 memset(out, 0x00, sizeof(*out));
6863 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
6866 ctx->objmask = NULL;
6867 if (!out->command) {
6868 if (ctx->curr != SET)
6870 if (sizeof(*out) > size)
6872 out->command = ctx->curr;
6873 out->args.vc.data = (uint8_t *)out + size;
6874 ctx->object = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6880 /** No completion. */
6882 comp_none(struct context *ctx, const struct token *token,
6883 unsigned int ent, char *buf, unsigned int size)
6893 /** Complete boolean values. */
6895 comp_boolean(struct context *ctx, const struct token *token,
6896 unsigned int ent, char *buf, unsigned int size)
6902 for (i = 0; boolean_name[i]; ++i)
6903 if (buf && i == ent)
6904 return strlcpy(buf, boolean_name[i], size);
6910 /** Complete action names. */
6912 comp_action(struct context *ctx, const struct token *token,
6913 unsigned int ent, char *buf, unsigned int size)
6919 for (i = 0; next_action[i]; ++i)
6920 if (buf && i == ent)
6921 return strlcpy(buf, token_list[next_action[i]].name,
6928 /** Complete available ports. */
6930 comp_port(struct context *ctx, const struct token *token,
6931 unsigned int ent, char *buf, unsigned int size)
6938 RTE_ETH_FOREACH_DEV(p) {
6939 if (buf && i == ent)
6940 return snprintf(buf, size, "%u", p);
6948 /** Complete available rule IDs. */
6950 comp_rule_id(struct context *ctx, const struct token *token,
6951 unsigned int ent, char *buf, unsigned int size)
6954 struct rte_port *port;
6955 struct port_flow *pf;
6958 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
6959 ctx->port == (portid_t)RTE_PORT_ALL)
6961 port = &ports[ctx->port];
6962 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
6963 if (buf && i == ent)
6964 return snprintf(buf, size, "%u", pf->id);
6972 /** Complete type field for RSS action. */
6974 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
6975 unsigned int ent, char *buf, unsigned int size)
6981 for (i = 0; rss_type_table[i].str; ++i)
6986 return strlcpy(buf, rss_type_table[ent].str, size);
6988 return snprintf(buf, size, "end");
6992 /** Complete queue field for RSS action. */
6994 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
6995 unsigned int ent, char *buf, unsigned int size)
7002 return snprintf(buf, size, "%u", ent);
7004 return snprintf(buf, size, "end");
7008 /** Complete index number for set raw_encap/raw_decap commands. */
7010 comp_set_raw_index(struct context *ctx, const struct token *token,
7011 unsigned int ent, char *buf, unsigned int size)
7017 RTE_SET_USED(token);
7018 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
7019 if (buf && idx == ent)
7020 return snprintf(buf, size, "%u", idx);
7026 /** Complete index number for set raw_encap/raw_decap commands. */
7028 comp_set_sample_index(struct context *ctx, const struct token *token,
7029 unsigned int ent, char *buf, unsigned int size)
7035 RTE_SET_USED(token);
7036 for (idx = 0; idx < RAW_SAMPLE_CONFS_MAX_NUM; ++idx) {
7037 if (buf && idx == ent)
7038 return snprintf(buf, size, "%u", idx);
7044 /** Internal context. */
7045 static struct context cmd_flow_context;
7047 /** Global parser instance (cmdline API). */
7048 cmdline_parse_inst_t cmd_flow;
7049 cmdline_parse_inst_t cmd_set_raw;
7051 /** Initialize context. */
7053 cmd_flow_context_init(struct context *ctx)
7055 /* A full memset() is not necessary. */
7065 ctx->objmask = NULL;
7068 /** Parse a token (cmdline API). */
7070 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
7073 struct context *ctx = &cmd_flow_context;
7074 const struct token *token;
7075 const enum index *list;
7080 token = &token_list[ctx->curr];
7081 /* Check argument length. */
7084 for (len = 0; src[len]; ++len)
7085 if (src[len] == '#' || isspace(src[len]))
7089 /* Last argument and EOL detection. */
7090 for (i = len; src[i]; ++i)
7091 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
7093 else if (!isspace(src[i])) {
7098 if (src[i] == '\r' || src[i] == '\n') {
7102 /* Initialize context if necessary. */
7103 if (!ctx->next_num) {
7106 ctx->next[ctx->next_num++] = token->next[0];
7108 /* Process argument through candidates. */
7109 ctx->prev = ctx->curr;
7110 list = ctx->next[ctx->next_num - 1];
7111 for (i = 0; list[i]; ++i) {
7112 const struct token *next = &token_list[list[i]];
7115 ctx->curr = list[i];
7117 tmp = next->call(ctx, next, src, len, result, size);
7119 tmp = parse_default(ctx, next, src, len, result, size);
7120 if (tmp == -1 || tmp != len)
7128 /* Push subsequent tokens if any. */
7130 for (i = 0; token->next[i]; ++i) {
7131 if (ctx->next_num == RTE_DIM(ctx->next))
7133 ctx->next[ctx->next_num++] = token->next[i];
7135 /* Push arguments if any. */
7137 for (i = 0; token->args[i]; ++i) {
7138 if (ctx->args_num == RTE_DIM(ctx->args))
7140 ctx->args[ctx->args_num++] = token->args[i];
7145 /** Return number of completion entries (cmdline API). */
7147 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
7149 struct context *ctx = &cmd_flow_context;
7150 const struct token *token = &token_list[ctx->curr];
7151 const enum index *list;
7155 /* Count number of tokens in current list. */
7157 list = ctx->next[ctx->next_num - 1];
7159 list = token->next[0];
7160 for (i = 0; list[i]; ++i)
7165 * If there is a single token, use its completion callback, otherwise
7166 * return the number of entries.
7168 token = &token_list[list[0]];
7169 if (i == 1 && token->comp) {
7170 /* Save index for cmd_flow_get_help(). */
7171 ctx->prev = list[0];
7172 return token->comp(ctx, token, 0, NULL, 0);
7177 /** Return a completion entry (cmdline API). */
7179 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
7180 char *dst, unsigned int size)
7182 struct context *ctx = &cmd_flow_context;
7183 const struct token *token = &token_list[ctx->curr];
7184 const enum index *list;
7188 /* Count number of tokens in current list. */
7190 list = ctx->next[ctx->next_num - 1];
7192 list = token->next[0];
7193 for (i = 0; list[i]; ++i)
7197 /* If there is a single token, use its completion callback. */
7198 token = &token_list[list[0]];
7199 if (i == 1 && token->comp) {
7200 /* Save index for cmd_flow_get_help(). */
7201 ctx->prev = list[0];
7202 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
7204 /* Otherwise make sure the index is valid and use defaults. */
7207 token = &token_list[list[index]];
7208 strlcpy(dst, token->name, size);
7209 /* Save index for cmd_flow_get_help(). */
7210 ctx->prev = list[index];
7214 /** Populate help strings for current token (cmdline API). */
7216 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
7218 struct context *ctx = &cmd_flow_context;
7219 const struct token *token = &token_list[ctx->prev];
7224 /* Set token type and update global help with details. */
7225 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
7227 cmd_flow.help_str = token->help;
7229 cmd_flow.help_str = token->name;
7233 /** Token definition template (cmdline API). */
7234 static struct cmdline_token_hdr cmd_flow_token_hdr = {
7235 .ops = &(struct cmdline_token_ops){
7236 .parse = cmd_flow_parse,
7237 .complete_get_nb = cmd_flow_complete_get_nb,
7238 .complete_get_elt = cmd_flow_complete_get_elt,
7239 .get_help = cmd_flow_get_help,
7244 /** Populate the next dynamic token. */
7246 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
7247 cmdline_parse_token_hdr_t **hdr_inst)
7249 struct context *ctx = &cmd_flow_context;
7251 /* Always reinitialize context before requesting the first token. */
7252 if (!(hdr_inst - cmd_flow.tokens))
7253 cmd_flow_context_init(ctx);
7254 /* Return NULL when no more tokens are expected. */
7255 if (!ctx->next_num && ctx->curr) {
7259 /* Determine if command should end here. */
7260 if (ctx->eol && ctx->last && ctx->next_num) {
7261 const enum index *list = ctx->next[ctx->next_num - 1];
7264 for (i = 0; list[i]; ++i) {
7271 *hdr = &cmd_flow_token_hdr;
7274 /** Dispatch parsed buffer to function calls. */
7276 cmd_flow_parsed(const struct buffer *in)
7278 switch (in->command) {
7279 case SHARED_ACTION_CREATE:
7280 port_shared_action_create(
7281 in->port, in->args.vc.attr.group,
7282 &((const struct rte_flow_shared_action_conf) {
7283 .ingress = in->args.vc.attr.ingress,
7284 .egress = in->args.vc.attr.egress,
7286 in->args.vc.actions);
7288 case SHARED_ACTION_DESTROY:
7289 port_shared_action_destroy(in->port,
7290 in->args.sa_destroy.action_id_n,
7291 in->args.sa_destroy.action_id);
7293 case SHARED_ACTION_UPDATE:
7294 port_shared_action_update(in->port, in->args.vc.attr.group,
7295 in->args.vc.actions);
7297 case SHARED_ACTION_QUERY:
7298 port_shared_action_query(in->port, in->args.sa.action_id);
7301 port_flow_validate(in->port, &in->args.vc.attr,
7302 in->args.vc.pattern, in->args.vc.actions,
7303 &in->args.vc.tunnel_ops);
7306 port_flow_create(in->port, &in->args.vc.attr,
7307 in->args.vc.pattern, in->args.vc.actions,
7308 &in->args.vc.tunnel_ops);
7311 port_flow_destroy(in->port, in->args.destroy.rule_n,
7312 in->args.destroy.rule);
7315 port_flow_flush(in->port);
7318 port_flow_dump(in->port, in->args.dump.file);
7321 port_flow_query(in->port, in->args.query.rule,
7322 &in->args.query.action);
7325 port_flow_list(in->port, in->args.list.group_n,
7326 in->args.list.group);
7329 port_flow_isolate(in->port, in->args.isolate.set);
7332 port_flow_aged(in->port, in->args.aged.destroy);
7335 port_flow_tunnel_create(in->port, &in->args.vc.tunnel_ops);
7337 case TUNNEL_DESTROY:
7338 port_flow_tunnel_destroy(in->port, in->args.vc.tunnel_ops.id);
7341 port_flow_tunnel_list(in->port);
7348 /** Token generator and output processing callback (cmdline API). */
7350 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
7353 cmd_flow_tok(arg0, arg2);
7355 cmd_flow_parsed(arg0);
7358 /** Global parser instance (cmdline API). */
7359 cmdline_parse_inst_t cmd_flow = {
7361 .data = NULL, /**< Unused. */
7362 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
7365 }, /**< Tokens are returned by cmd_flow_tok(). */
7368 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
7371 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
7373 struct rte_flow_item_ipv4 *ipv4;
7374 struct rte_flow_item_eth *eth;
7375 struct rte_flow_item_ipv6 *ipv6;
7376 struct rte_flow_item_vxlan *vxlan;
7377 struct rte_flow_item_vxlan_gpe *gpe;
7378 struct rte_flow_item_nvgre *nvgre;
7379 uint32_t ipv6_vtc_flow;
7381 switch (item->type) {
7382 case RTE_FLOW_ITEM_TYPE_ETH:
7383 eth = (struct rte_flow_item_eth *)buf;
7385 eth->type = rte_cpu_to_be_16(next_proto);
7387 case RTE_FLOW_ITEM_TYPE_IPV4:
7388 ipv4 = (struct rte_flow_item_ipv4 *)buf;
7389 ipv4->hdr.version_ihl = 0x45;
7390 if (next_proto && ipv4->hdr.next_proto_id == 0)
7391 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
7393 case RTE_FLOW_ITEM_TYPE_IPV6:
7394 ipv6 = (struct rte_flow_item_ipv6 *)buf;
7395 if (next_proto && ipv6->hdr.proto == 0)
7396 ipv6->hdr.proto = (uint8_t)next_proto;
7397 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
7398 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
7399 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
7400 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
7402 case RTE_FLOW_ITEM_TYPE_VXLAN:
7403 vxlan = (struct rte_flow_item_vxlan *)buf;
7404 vxlan->flags = 0x08;
7406 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7407 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
7410 case RTE_FLOW_ITEM_TYPE_NVGRE:
7411 nvgre = (struct rte_flow_item_nvgre *)buf;
7412 nvgre->protocol = rte_cpu_to_be_16(0x6558);
7413 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
7420 /** Helper of get item's default mask. */
7422 flow_item_default_mask(const struct rte_flow_item *item)
7424 const void *mask = NULL;
7425 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7427 switch (item->type) {
7428 case RTE_FLOW_ITEM_TYPE_ANY:
7429 mask = &rte_flow_item_any_mask;
7431 case RTE_FLOW_ITEM_TYPE_VF:
7432 mask = &rte_flow_item_vf_mask;
7434 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7435 mask = &rte_flow_item_port_id_mask;
7437 case RTE_FLOW_ITEM_TYPE_RAW:
7438 mask = &rte_flow_item_raw_mask;
7440 case RTE_FLOW_ITEM_TYPE_ETH:
7441 mask = &rte_flow_item_eth_mask;
7443 case RTE_FLOW_ITEM_TYPE_VLAN:
7444 mask = &rte_flow_item_vlan_mask;
7446 case RTE_FLOW_ITEM_TYPE_IPV4:
7447 mask = &rte_flow_item_ipv4_mask;
7449 case RTE_FLOW_ITEM_TYPE_IPV6:
7450 mask = &rte_flow_item_ipv6_mask;
7452 case RTE_FLOW_ITEM_TYPE_ICMP:
7453 mask = &rte_flow_item_icmp_mask;
7455 case RTE_FLOW_ITEM_TYPE_UDP:
7456 mask = &rte_flow_item_udp_mask;
7458 case RTE_FLOW_ITEM_TYPE_TCP:
7459 mask = &rte_flow_item_tcp_mask;
7461 case RTE_FLOW_ITEM_TYPE_SCTP:
7462 mask = &rte_flow_item_sctp_mask;
7464 case RTE_FLOW_ITEM_TYPE_VXLAN:
7465 mask = &rte_flow_item_vxlan_mask;
7467 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7468 mask = &rte_flow_item_vxlan_gpe_mask;
7470 case RTE_FLOW_ITEM_TYPE_E_TAG:
7471 mask = &rte_flow_item_e_tag_mask;
7473 case RTE_FLOW_ITEM_TYPE_NVGRE:
7474 mask = &rte_flow_item_nvgre_mask;
7476 case RTE_FLOW_ITEM_TYPE_MPLS:
7477 mask = &rte_flow_item_mpls_mask;
7479 case RTE_FLOW_ITEM_TYPE_GRE:
7480 mask = &rte_flow_item_gre_mask;
7482 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7483 mask = &gre_key_default_mask;
7485 case RTE_FLOW_ITEM_TYPE_META:
7486 mask = &rte_flow_item_meta_mask;
7488 case RTE_FLOW_ITEM_TYPE_FUZZY:
7489 mask = &rte_flow_item_fuzzy_mask;
7491 case RTE_FLOW_ITEM_TYPE_GTP:
7492 mask = &rte_flow_item_gtp_mask;
7494 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7495 mask = &rte_flow_item_gtp_psc_mask;
7497 case RTE_FLOW_ITEM_TYPE_GENEVE:
7498 mask = &rte_flow_item_geneve_mask;
7500 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
7501 mask = &rte_flow_item_pppoe_proto_id_mask;
7503 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
7504 mask = &rte_flow_item_l2tpv3oip_mask;
7506 case RTE_FLOW_ITEM_TYPE_ESP:
7507 mask = &rte_flow_item_esp_mask;
7509 case RTE_FLOW_ITEM_TYPE_AH:
7510 mask = &rte_flow_item_ah_mask;
7512 case RTE_FLOW_ITEM_TYPE_PFCP:
7513 mask = &rte_flow_item_pfcp_mask;
7521 /** Dispatch parsed buffer to function calls. */
7523 cmd_set_raw_parsed_sample(const struct buffer *in)
7525 uint32_t n = in->args.vc.actions_n;
7527 struct rte_flow_action *action = NULL;
7528 struct rte_flow_action *data = NULL;
7530 uint16_t idx = in->port; /* We borrow port field as index */
7531 uint32_t max_size = sizeof(struct rte_flow_action) *
7532 ACTION_SAMPLE_ACTIONS_NUM;
7534 RTE_ASSERT(in->command == SET_SAMPLE_ACTIONS);
7535 data = (struct rte_flow_action *)&raw_sample_confs[idx].data;
7536 memset(data, 0x00, max_size);
7537 for (; i <= n - 1; i++) {
7538 action = in->args.vc.actions + i;
7539 if (action->type == RTE_FLOW_ACTION_TYPE_END)
7541 switch (action->type) {
7542 case RTE_FLOW_ACTION_TYPE_MARK:
7543 size = sizeof(struct rte_flow_action_mark);
7544 rte_memcpy(&sample_mark[idx],
7545 (const void *)action->conf, size);
7546 action->conf = &sample_mark[idx];
7548 case RTE_FLOW_ACTION_TYPE_COUNT:
7549 size = sizeof(struct rte_flow_action_count);
7550 rte_memcpy(&sample_count[idx],
7551 (const void *)action->conf, size);
7552 action->conf = &sample_count[idx];
7554 case RTE_FLOW_ACTION_TYPE_QUEUE:
7555 size = sizeof(struct rte_flow_action_queue);
7556 rte_memcpy(&sample_queue[idx],
7557 (const void *)action->conf, size);
7558 action->conf = &sample_queue[idx];
7560 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7561 size = sizeof(struct rte_flow_action_raw_encap);
7562 rte_memcpy(&sample_encap[idx],
7563 (const void *)action->conf, size);
7564 action->conf = &sample_encap[idx];
7566 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7567 size = sizeof(struct rte_flow_action_port_id);
7568 rte_memcpy(&sample_port_id[idx],
7569 (const void *)action->conf, size);
7570 action->conf = &sample_port_id[idx];
7573 printf("Error - Not supported action\n");
7576 rte_memcpy(data, action, sizeof(struct rte_flow_action));
7581 /** Dispatch parsed buffer to function calls. */
7583 cmd_set_raw_parsed(const struct buffer *in)
7585 uint32_t n = in->args.vc.pattern_n;
7587 struct rte_flow_item *item = NULL;
7589 uint8_t *data = NULL;
7590 uint8_t *data_tail = NULL;
7591 size_t *total_size = NULL;
7592 uint16_t upper_layer = 0;
7594 uint16_t idx = in->port; /* We borrow port field as index */
7596 if (in->command == SET_SAMPLE_ACTIONS)
7597 return cmd_set_raw_parsed_sample(in);
7598 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
7599 in->command == SET_RAW_DECAP);
7600 if (in->command == SET_RAW_ENCAP) {
7601 total_size = &raw_encap_confs[idx].size;
7602 data = (uint8_t *)&raw_encap_confs[idx].data;
7604 total_size = &raw_decap_confs[idx].size;
7605 data = (uint8_t *)&raw_decap_confs[idx].data;
7608 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
7609 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
7610 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
7611 for (i = n - 1 ; i >= 0; --i) {
7612 item = in->args.vc.pattern + i;
7613 if (item->spec == NULL)
7614 item->spec = flow_item_default_mask(item);
7615 switch (item->type) {
7616 case RTE_FLOW_ITEM_TYPE_ETH:
7617 size = sizeof(struct rte_flow_item_eth);
7619 case RTE_FLOW_ITEM_TYPE_VLAN:
7620 size = sizeof(struct rte_flow_item_vlan);
7621 proto = RTE_ETHER_TYPE_VLAN;
7623 case RTE_FLOW_ITEM_TYPE_IPV4:
7624 size = sizeof(struct rte_flow_item_ipv4);
7625 proto = RTE_ETHER_TYPE_IPV4;
7627 case RTE_FLOW_ITEM_TYPE_IPV6:
7628 size = sizeof(struct rte_flow_item_ipv6);
7629 proto = RTE_ETHER_TYPE_IPV6;
7631 case RTE_FLOW_ITEM_TYPE_UDP:
7632 size = sizeof(struct rte_flow_item_udp);
7635 case RTE_FLOW_ITEM_TYPE_TCP:
7636 size = sizeof(struct rte_flow_item_tcp);
7639 case RTE_FLOW_ITEM_TYPE_VXLAN:
7640 size = sizeof(struct rte_flow_item_vxlan);
7642 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7643 size = sizeof(struct rte_flow_item_vxlan_gpe);
7645 case RTE_FLOW_ITEM_TYPE_GRE:
7646 size = sizeof(struct rte_flow_item_gre);
7649 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7650 size = sizeof(rte_be32_t);
7653 case RTE_FLOW_ITEM_TYPE_MPLS:
7654 size = sizeof(struct rte_flow_item_mpls);
7657 case RTE_FLOW_ITEM_TYPE_NVGRE:
7658 size = sizeof(struct rte_flow_item_nvgre);
7661 case RTE_FLOW_ITEM_TYPE_GENEVE:
7662 size = sizeof(struct rte_flow_item_geneve);
7664 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
7665 size = sizeof(struct rte_flow_item_l2tpv3oip);
7668 case RTE_FLOW_ITEM_TYPE_ESP:
7669 size = sizeof(struct rte_flow_item_esp);
7672 case RTE_FLOW_ITEM_TYPE_AH:
7673 size = sizeof(struct rte_flow_item_ah);
7676 case RTE_FLOW_ITEM_TYPE_GTP:
7677 size = sizeof(struct rte_flow_item_gtp);
7679 case RTE_FLOW_ITEM_TYPE_PFCP:
7680 size = sizeof(struct rte_flow_item_pfcp);
7683 printf("Error - Not supported item\n");
7685 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
7688 *total_size += size;
7689 rte_memcpy(data_tail - (*total_size), item->spec, size);
7690 /* update some fields which cannot be set by cmdline */
7691 update_fields((data_tail - (*total_size)), item,
7693 upper_layer = proto;
7695 if (verbose_level & 0x1)
7696 printf("total data size is %zu\n", (*total_size));
7697 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
7698 memmove(data, (data_tail - (*total_size)), *total_size);
7701 /** Populate help strings for current token (cmdline API). */
7703 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
7706 struct context *ctx = &cmd_flow_context;
7707 const struct token *token = &token_list[ctx->prev];
7712 /* Set token type and update global help with details. */
7713 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
7715 cmd_set_raw.help_str = token->help;
7717 cmd_set_raw.help_str = token->name;
7721 /** Token definition template (cmdline API). */
7722 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
7723 .ops = &(struct cmdline_token_ops){
7724 .parse = cmd_flow_parse,
7725 .complete_get_nb = cmd_flow_complete_get_nb,
7726 .complete_get_elt = cmd_flow_complete_get_elt,
7727 .get_help = cmd_set_raw_get_help,
7732 /** Populate the next dynamic token. */
7734 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
7735 cmdline_parse_token_hdr_t **hdr_inst)
7737 struct context *ctx = &cmd_flow_context;
7739 /* Always reinitialize context before requesting the first token. */
7740 if (!(hdr_inst - cmd_set_raw.tokens)) {
7741 cmd_flow_context_init(ctx);
7742 ctx->curr = START_SET;
7744 /* Return NULL when no more tokens are expected. */
7745 if (!ctx->next_num && (ctx->curr != START_SET)) {
7749 /* Determine if command should end here. */
7750 if (ctx->eol && ctx->last && ctx->next_num) {
7751 const enum index *list = ctx->next[ctx->next_num - 1];
7754 for (i = 0; list[i]; ++i) {
7761 *hdr = &cmd_set_raw_token_hdr;
7764 /** Token generator and output processing callback (cmdline API). */
7766 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
7769 cmd_set_raw_tok(arg0, arg2);
7771 cmd_set_raw_parsed(arg0);
7774 /** Global parser instance (cmdline API). */
7775 cmdline_parse_inst_t cmd_set_raw = {
7776 .f = cmd_set_raw_cb,
7777 .data = NULL, /**< Unused. */
7778 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
7781 }, /**< Tokens are returned by cmd_flow_tok(). */
7784 /* *** display raw_encap/raw_decap buf */
7785 struct cmd_show_set_raw_result {
7786 cmdline_fixed_string_t cmd_show;
7787 cmdline_fixed_string_t cmd_what;
7788 cmdline_fixed_string_t cmd_all;
7793 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
7795 struct cmd_show_set_raw_result *res = parsed_result;
7796 uint16_t index = res->cmd_index;
7798 uint8_t *raw_data = NULL;
7799 size_t raw_size = 0;
7800 char title[16] = {0};
7804 if (!strcmp(res->cmd_all, "all")) {
7807 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
7808 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
7812 if (!strcmp(res->cmd_what, "raw_encap")) {
7813 raw_data = (uint8_t *)&raw_encap_confs[index].data;
7814 raw_size = raw_encap_confs[index].size;
7815 snprintf(title, 16, "\nindex: %u", index);
7816 rte_hexdump(stdout, title, raw_data, raw_size);
7818 raw_data = (uint8_t *)&raw_decap_confs[index].data;
7819 raw_size = raw_decap_confs[index].size;
7820 snprintf(title, 16, "\nindex: %u", index);
7821 rte_hexdump(stdout, title, raw_data, raw_size);
7823 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
7826 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
7827 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7829 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
7830 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7831 cmd_what, "raw_encap#raw_decap");
7832 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
7833 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
7835 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
7836 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7838 cmdline_parse_inst_t cmd_show_set_raw = {
7839 .f = cmd_show_set_raw_parsed,
7841 .help_str = "show <raw_encap|raw_decap> <index>",
7843 (void *)&cmd_show_set_raw_cmd_show,
7844 (void *)&cmd_show_set_raw_cmd_what,
7845 (void *)&cmd_show_set_raw_cmd_index,
7849 cmdline_parse_inst_t cmd_show_set_raw_all = {
7850 .f = cmd_show_set_raw_parsed,
7852 .help_str = "show <raw_encap|raw_decap> all",
7854 (void *)&cmd_show_set_raw_cmd_show,
7855 (void *)&cmd_show_set_raw_cmd_what,
7856 (void *)&cmd_show_set_raw_cmd_all,