1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
26 #include <rte_vxlan.h>
30 /** Parser token indices. */
55 /* Top-level command. */
57 /* Sub-leve commands. */
64 /* Top-level command. */
66 /* Sub-level commands. */
79 /* Tunnel arguments. */
86 /* Destroy arguments. */
89 /* Query arguments. */
95 /* Destroy aged flow arguments. */
98 /* Validate/create arguments. */
107 /* Shared action arguments */
108 SHARED_ACTION_CREATE,
109 SHARED_ACTION_UPDATE,
110 SHARED_ACTION_DESTROY,
113 /* Shared action create arguments */
114 SHARED_ACTION_CREATE_ID,
115 SHARED_ACTION_INGRESS,
116 SHARED_ACTION_EGRESS,
117 SHARED_ACTION_TRANSFER,
120 /* Shared action destroy arguments */
121 SHARED_ACTION_DESTROY_ID,
123 /* Validate/create pattern. */
161 ITEM_VLAN_INNER_TYPE,
162 ITEM_VLAN_HAS_MORE_VLAN,
165 ITEM_IPV4_FRAGMENT_OFFSET,
177 ITEM_IPV6_HAS_FRAG_EXT,
198 ITEM_E_TAG_GRP_ECID_B,
207 ITEM_GRE_C_RSVD0_VER,
225 ITEM_ARP_ETH_IPV4_SHA,
226 ITEM_ARP_ETH_IPV4_SPA,
227 ITEM_ARP_ETH_IPV4_THA,
228 ITEM_ARP_ETH_IPV4_TPA,
230 ITEM_IPV6_EXT_NEXT_HDR,
232 ITEM_IPV6_FRAG_EXT_NEXT_HDR,
233 ITEM_IPV6_FRAG_EXT_FRAG_DATA,
238 ITEM_ICMP6_ND_NS_TARGET_ADDR,
240 ITEM_ICMP6_ND_NA_TARGET_ADDR,
242 ITEM_ICMP6_ND_OPT_TYPE,
243 ITEM_ICMP6_ND_OPT_SLA_ETH,
244 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
245 ITEM_ICMP6_ND_OPT_TLA_ETH,
246 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
259 ITEM_HIGIG2_CLASSIFICATION,
265 ITEM_L2TPV3OIP_SESSION_ID,
275 ITEM_ECPRI_COMMON_TYPE,
276 ITEM_ECPRI_COMMON_TYPE_IQ_DATA,
277 ITEM_ECPRI_COMMON_TYPE_RTC_CTRL,
278 ITEM_ECPRI_COMMON_TYPE_DLY_MSR,
279 ITEM_ECPRI_MSG_IQ_DATA_PCID,
280 ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
281 ITEM_ECPRI_MSG_DLY_MSR_MSRID,
283 /* Validate/create actions. */
303 ACTION_RSS_FUNC_DEFAULT,
304 ACTION_RSS_FUNC_TOEPLITZ,
305 ACTION_RSS_FUNC_SIMPLE_XOR,
306 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
318 ACTION_PHY_PORT_ORIGINAL,
319 ACTION_PHY_PORT_INDEX,
321 ACTION_PORT_ID_ORIGINAL,
325 ACTION_OF_SET_MPLS_TTL,
326 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
327 ACTION_OF_DEC_MPLS_TTL,
328 ACTION_OF_SET_NW_TTL,
329 ACTION_OF_SET_NW_TTL_NW_TTL,
330 ACTION_OF_DEC_NW_TTL,
331 ACTION_OF_COPY_TTL_OUT,
332 ACTION_OF_COPY_TTL_IN,
335 ACTION_OF_PUSH_VLAN_ETHERTYPE,
336 ACTION_OF_SET_VLAN_VID,
337 ACTION_OF_SET_VLAN_VID_VLAN_VID,
338 ACTION_OF_SET_VLAN_PCP,
339 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
341 ACTION_OF_POP_MPLS_ETHERTYPE,
343 ACTION_OF_PUSH_MPLS_ETHERTYPE,
350 ACTION_MPLSOGRE_ENCAP,
351 ACTION_MPLSOGRE_DECAP,
352 ACTION_MPLSOUDP_ENCAP,
353 ACTION_MPLSOUDP_DECAP,
355 ACTION_SET_IPV4_SRC_IPV4_SRC,
357 ACTION_SET_IPV4_DST_IPV4_DST,
359 ACTION_SET_IPV6_SRC_IPV6_SRC,
361 ACTION_SET_IPV6_DST_IPV6_DST,
363 ACTION_SET_TP_SRC_TP_SRC,
365 ACTION_SET_TP_DST_TP_DST,
371 ACTION_SET_MAC_SRC_MAC_SRC,
373 ACTION_SET_MAC_DST_MAC_DST,
375 ACTION_INC_TCP_SEQ_VALUE,
377 ACTION_DEC_TCP_SEQ_VALUE,
379 ACTION_INC_TCP_ACK_VALUE,
381 ACTION_DEC_TCP_ACK_VALUE,
384 ACTION_RAW_ENCAP_INDEX,
385 ACTION_RAW_ENCAP_INDEX_VALUE,
386 ACTION_RAW_DECAP_INDEX,
387 ACTION_RAW_DECAP_INDEX_VALUE,
390 ACTION_SET_TAG_INDEX,
393 ACTION_SET_META_DATA,
394 ACTION_SET_META_MASK,
395 ACTION_SET_IPV4_DSCP,
396 ACTION_SET_IPV4_DSCP_VALUE,
397 ACTION_SET_IPV6_DSCP,
398 ACTION_SET_IPV6_DSCP_VALUE,
404 ACTION_SAMPLE_INDEX_VALUE,
406 SHARED_ACTION_ID2PTR,
409 /** Maximum size for pattern in struct rte_flow_item_raw. */
410 #define ITEM_RAW_PATTERN_SIZE 40
412 /** Storage size for struct rte_flow_item_raw including pattern. */
413 #define ITEM_RAW_SIZE \
414 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
416 /** Maximum number of queue indices in struct rte_flow_action_rss. */
417 #define ACTION_RSS_QUEUE_NUM 128
419 /** Storage for struct rte_flow_action_rss including external data. */
420 struct action_rss_data {
421 struct rte_flow_action_rss conf;
422 uint8_t key[RSS_HASH_KEY_LENGTH];
423 uint16_t queue[ACTION_RSS_QUEUE_NUM];
426 /** Maximum data size in struct rte_flow_action_raw_encap. */
427 #define ACTION_RAW_ENCAP_MAX_DATA 128
428 #define RAW_ENCAP_CONFS_MAX_NUM 8
430 /** Storage for struct rte_flow_action_raw_encap. */
431 struct raw_encap_conf {
432 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
433 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
437 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
439 /** Storage for struct rte_flow_action_raw_encap including external data. */
440 struct action_raw_encap_data {
441 struct rte_flow_action_raw_encap conf;
442 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
443 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
447 /** Storage for struct rte_flow_action_raw_decap. */
448 struct raw_decap_conf {
449 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
453 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
455 /** Storage for struct rte_flow_action_raw_decap including external data. */
456 struct action_raw_decap_data {
457 struct rte_flow_action_raw_decap conf;
458 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
462 struct vxlan_encap_conf vxlan_encap_conf = {
466 .vni = "\x00\x00\x00",
468 .udp_dst = RTE_BE16(RTE_VXLAN_DEFAULT_PORT),
469 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
470 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
471 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
472 "\x00\x00\x00\x00\x00\x00\x00\x01",
473 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
474 "\x00\x00\x00\x00\x00\x00\x11\x11",
478 .eth_src = "\x00\x00\x00\x00\x00\x00",
479 .eth_dst = "\xff\xff\xff\xff\xff\xff",
482 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
483 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
485 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
486 struct action_vxlan_encap_data {
487 struct rte_flow_action_vxlan_encap conf;
488 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
489 struct rte_flow_item_eth item_eth;
490 struct rte_flow_item_vlan item_vlan;
492 struct rte_flow_item_ipv4 item_ipv4;
493 struct rte_flow_item_ipv6 item_ipv6;
495 struct rte_flow_item_udp item_udp;
496 struct rte_flow_item_vxlan item_vxlan;
499 struct nvgre_encap_conf nvgre_encap_conf = {
502 .tni = "\x00\x00\x00",
503 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
504 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
505 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
506 "\x00\x00\x00\x00\x00\x00\x00\x01",
507 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
508 "\x00\x00\x00\x00\x00\x00\x11\x11",
510 .eth_src = "\x00\x00\x00\x00\x00\x00",
511 .eth_dst = "\xff\xff\xff\xff\xff\xff",
514 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
515 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
517 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
518 struct action_nvgre_encap_data {
519 struct rte_flow_action_nvgre_encap conf;
520 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
521 struct rte_flow_item_eth item_eth;
522 struct rte_flow_item_vlan item_vlan;
524 struct rte_flow_item_ipv4 item_ipv4;
525 struct rte_flow_item_ipv6 item_ipv6;
527 struct rte_flow_item_nvgre item_nvgre;
530 struct l2_encap_conf l2_encap_conf;
532 struct l2_decap_conf l2_decap_conf;
534 struct mplsogre_encap_conf mplsogre_encap_conf;
536 struct mplsogre_decap_conf mplsogre_decap_conf;
538 struct mplsoudp_encap_conf mplsoudp_encap_conf;
540 struct mplsoudp_decap_conf mplsoudp_decap_conf;
542 #define ACTION_SAMPLE_ACTIONS_NUM 10
543 #define RAW_SAMPLE_CONFS_MAX_NUM 8
544 /** Storage for struct rte_flow_action_sample including external data. */
545 struct action_sample_data {
546 struct rte_flow_action_sample conf;
549 /** Storage for struct rte_flow_action_sample. */
550 struct raw_sample_conf {
551 struct rte_flow_action data[ACTION_SAMPLE_ACTIONS_NUM];
553 struct raw_sample_conf raw_sample_confs[RAW_SAMPLE_CONFS_MAX_NUM];
554 struct rte_flow_action_mark sample_mark[RAW_SAMPLE_CONFS_MAX_NUM];
555 struct rte_flow_action_queue sample_queue[RAW_SAMPLE_CONFS_MAX_NUM];
556 struct rte_flow_action_count sample_count[RAW_SAMPLE_CONFS_MAX_NUM];
557 struct rte_flow_action_port_id sample_port_id[RAW_SAMPLE_CONFS_MAX_NUM];
558 struct rte_flow_action_raw_encap sample_encap[RAW_SAMPLE_CONFS_MAX_NUM];
560 /** Maximum number of subsequent tokens and arguments on the stack. */
561 #define CTX_STACK_SIZE 16
563 /** Parser context. */
565 /** Stack of subsequent token lists to process. */
566 const enum index *next[CTX_STACK_SIZE];
567 /** Arguments for stacked tokens. */
568 const void *args[CTX_STACK_SIZE];
569 enum index curr; /**< Current token index. */
570 enum index prev; /**< Index of the last token seen. */
571 int next_num; /**< Number of entries in next[]. */
572 int args_num; /**< Number of entries in args[]. */
573 uint32_t eol:1; /**< EOL has been detected. */
574 uint32_t last:1; /**< No more arguments. */
575 portid_t port; /**< Current port ID (for completions). */
576 uint32_t objdata; /**< Object-specific data. */
577 void *object; /**< Address of current object for relative offsets. */
578 void *objmask; /**< Object a full mask must be written to. */
581 /** Token argument. */
583 uint32_t hton:1; /**< Use network byte ordering. */
584 uint32_t sign:1; /**< Value is signed. */
585 uint32_t bounded:1; /**< Value is bounded. */
586 uintmax_t min; /**< Minimum value if bounded. */
587 uintmax_t max; /**< Maximum value if bounded. */
588 uint32_t offset; /**< Relative offset from ctx->object. */
589 uint32_t size; /**< Field size. */
590 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
593 /** Parser token definition. */
595 /** Type displayed during completion (defaults to "TOKEN"). */
597 /** Help displayed during completion (defaults to token name). */
599 /** Private data used by parser functions. */
602 * Lists of subsequent tokens to push on the stack. Each call to the
603 * parser consumes the last entry of that stack.
605 const enum index *const *next;
606 /** Arguments stack for subsequent tokens that need them. */
607 const struct arg *const *args;
609 * Token-processing callback, returns -1 in case of error, the
610 * length of the matched string otherwise. If NULL, attempts to
611 * match the token name.
613 * If buf is not NULL, the result should be stored in it according
614 * to context. An error is returned if not large enough.
616 int (*call)(struct context *ctx, const struct token *token,
617 const char *str, unsigned int len,
618 void *buf, unsigned int size);
620 * Callback that provides possible values for this token, used for
621 * completion. Returns -1 in case of error, the number of possible
622 * values otherwise. If NULL, the token name is used.
624 * If buf is not NULL, entry index ent is written to buf and the
625 * full length of the entry is returned (same behavior as
628 int (*comp)(struct context *ctx, const struct token *token,
629 unsigned int ent, char *buf, unsigned int size);
630 /** Mandatory token name, no default value. */
634 /** Static initializer for the next field. */
635 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
637 /** Static initializer for a NEXT() entry. */
638 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
640 /** Static initializer for the args field. */
641 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
643 /** Static initializer for ARGS() to target a field. */
644 #define ARGS_ENTRY(s, f) \
645 (&(const struct arg){ \
646 .offset = offsetof(s, f), \
647 .size = sizeof(((s *)0)->f), \
650 /** Static initializer for ARGS() to target a bit-field. */
651 #define ARGS_ENTRY_BF(s, f, b) \
652 (&(const struct arg){ \
654 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
657 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
658 #define ARGS_ENTRY_MASK(s, f, m) \
659 (&(const struct arg){ \
660 .offset = offsetof(s, f), \
661 .size = sizeof(((s *)0)->f), \
662 .mask = (const void *)(m), \
665 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
666 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
667 (&(const struct arg){ \
669 .offset = offsetof(s, f), \
670 .size = sizeof(((s *)0)->f), \
671 .mask = (const void *)(m), \
674 /** Static initializer for ARGS() to target a pointer. */
675 #define ARGS_ENTRY_PTR(s, f) \
676 (&(const struct arg){ \
677 .size = sizeof(*((s *)0)->f), \
680 /** Static initializer for ARGS() with arbitrary offset and size. */
681 #define ARGS_ENTRY_ARB(o, s) \
682 (&(const struct arg){ \
687 /** Same as ARGS_ENTRY_ARB() with bounded values. */
688 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
689 (&(const struct arg){ \
697 /** Same as ARGS_ENTRY() using network byte ordering. */
698 #define ARGS_ENTRY_HTON(s, f) \
699 (&(const struct arg){ \
701 .offset = offsetof(s, f), \
702 .size = sizeof(((s *)0)->f), \
705 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
706 #define ARG_ENTRY_HTON(s) \
707 (&(const struct arg){ \
713 /** Parser output buffer layout expected by cmd_flow_parsed(). */
715 enum index command; /**< Flow command. */
716 portid_t port; /**< Affected port ID. */
720 uint32_t action_id_n;
721 } sa_destroy; /**< Shared action destroy arguments. */
724 } sa; /* Shared action query arguments */
726 struct rte_flow_attr attr;
727 struct tunnel_ops tunnel_ops;
728 struct rte_flow_item *pattern;
729 struct rte_flow_action *actions;
733 } vc; /**< Validate/create arguments. */
737 } destroy; /**< Destroy arguments. */
740 } dump; /**< Dump arguments. */
743 struct rte_flow_action action;
744 } query; /**< Query arguments. */
748 } list; /**< List arguments. */
751 } isolate; /**< Isolated mode arguments. */
754 } aged; /**< Aged arguments. */
755 } args; /**< Command arguments. */
758 /** Private data for pattern items. */
759 struct parse_item_priv {
760 enum rte_flow_item_type type; /**< Item type. */
761 uint32_t size; /**< Size of item specification structure. */
764 #define PRIV_ITEM(t, s) \
765 (&(const struct parse_item_priv){ \
766 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
770 /** Private data for actions. */
771 struct parse_action_priv {
772 enum rte_flow_action_type type; /**< Action type. */
773 uint32_t size; /**< Size of action configuration structure. */
776 #define PRIV_ACTION(t, s) \
777 (&(const struct parse_action_priv){ \
778 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
782 static const enum index next_sa_create_attr[] = {
783 SHARED_ACTION_CREATE_ID,
784 SHARED_ACTION_INGRESS,
785 SHARED_ACTION_EGRESS,
786 SHARED_ACTION_TRANSFER,
791 static const enum index next_sa_subcmd[] = {
792 SHARED_ACTION_CREATE,
793 SHARED_ACTION_UPDATE,
794 SHARED_ACTION_DESTROY,
799 static const enum index next_vc_attr[] = {
811 static const enum index tunnel_create_attr[] = {
818 static const enum index tunnel_destroy_attr[] = {
825 static const enum index tunnel_list_attr[] = {
831 static const enum index next_destroy_attr[] = {
837 static const enum index next_dump_attr[] = {
843 static const enum index next_list_attr[] = {
849 static const enum index next_aged_attr[] = {
855 static const enum index next_sa_destroy_attr[] = {
856 SHARED_ACTION_DESTROY_ID,
861 static const enum index item_param[] = {
870 static const enum index next_item[] = {
907 ITEM_ICMP6_ND_OPT_SLA_ETH,
908 ITEM_ICMP6_ND_OPT_TLA_ETH,
926 static const enum index item_fuzzy[] = {
932 static const enum index item_any[] = {
938 static const enum index item_vf[] = {
944 static const enum index item_phy_port[] = {
950 static const enum index item_port_id[] = {
956 static const enum index item_mark[] = {
962 static const enum index item_raw[] = {
972 static const enum index item_eth[] = {
981 static const enum index item_vlan[] = {
986 ITEM_VLAN_INNER_TYPE,
987 ITEM_VLAN_HAS_MORE_VLAN,
992 static const enum index item_ipv4[] = {
994 ITEM_IPV4_FRAGMENT_OFFSET,
1003 static const enum index item_ipv6[] = {
1010 ITEM_IPV6_HAS_FRAG_EXT,
1015 static const enum index item_icmp[] = {
1024 static const enum index item_udp[] = {
1031 static const enum index item_tcp[] = {
1039 static const enum index item_sctp[] = {
1048 static const enum index item_vxlan[] = {
1054 static const enum index item_e_tag[] = {
1055 ITEM_E_TAG_GRP_ECID_B,
1060 static const enum index item_nvgre[] = {
1066 static const enum index item_mpls[] = {
1074 static const enum index item_gre[] = {
1076 ITEM_GRE_C_RSVD0_VER,
1084 static const enum index item_gre_key[] = {
1090 static const enum index item_gtp[] = {
1098 static const enum index item_geneve[] = {
1105 static const enum index item_vxlan_gpe[] = {
1111 static const enum index item_arp_eth_ipv4[] = {
1112 ITEM_ARP_ETH_IPV4_SHA,
1113 ITEM_ARP_ETH_IPV4_SPA,
1114 ITEM_ARP_ETH_IPV4_THA,
1115 ITEM_ARP_ETH_IPV4_TPA,
1120 static const enum index item_ipv6_ext[] = {
1121 ITEM_IPV6_EXT_NEXT_HDR,
1126 static const enum index item_ipv6_frag_ext[] = {
1127 ITEM_IPV6_FRAG_EXT_NEXT_HDR,
1128 ITEM_IPV6_FRAG_EXT_FRAG_DATA,
1133 static const enum index item_icmp6[] = {
1140 static const enum index item_icmp6_nd_ns[] = {
1141 ITEM_ICMP6_ND_NS_TARGET_ADDR,
1146 static const enum index item_icmp6_nd_na[] = {
1147 ITEM_ICMP6_ND_NA_TARGET_ADDR,
1152 static const enum index item_icmp6_nd_opt[] = {
1153 ITEM_ICMP6_ND_OPT_TYPE,
1158 static const enum index item_icmp6_nd_opt_sla_eth[] = {
1159 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
1164 static const enum index item_icmp6_nd_opt_tla_eth[] = {
1165 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
1170 static const enum index item_meta[] = {
1176 static const enum index item_gtp_psc[] = {
1183 static const enum index item_pppoed[] = {
1189 static const enum index item_pppoes[] = {
1195 static const enum index item_pppoe_proto_id[] = {
1200 static const enum index item_higig2[] = {
1201 ITEM_HIGIG2_CLASSIFICATION,
1207 static const enum index item_esp[] = {
1213 static const enum index item_ah[] = {
1219 static const enum index item_pfcp[] = {
1226 static const enum index next_set_raw[] = {
1232 static const enum index item_tag[] = {
1239 static const enum index item_l2tpv3oip[] = {
1240 ITEM_L2TPV3OIP_SESSION_ID,
1245 static const enum index item_ecpri[] = {
1251 static const enum index item_ecpri_common[] = {
1252 ITEM_ECPRI_COMMON_TYPE,
1256 static const enum index item_ecpri_common_type[] = {
1257 ITEM_ECPRI_COMMON_TYPE_IQ_DATA,
1258 ITEM_ECPRI_COMMON_TYPE_RTC_CTRL,
1259 ITEM_ECPRI_COMMON_TYPE_DLY_MSR,
1263 static const enum index next_action[] = {
1279 ACTION_OF_SET_MPLS_TTL,
1280 ACTION_OF_DEC_MPLS_TTL,
1281 ACTION_OF_SET_NW_TTL,
1282 ACTION_OF_DEC_NW_TTL,
1283 ACTION_OF_COPY_TTL_OUT,
1284 ACTION_OF_COPY_TTL_IN,
1286 ACTION_OF_PUSH_VLAN,
1287 ACTION_OF_SET_VLAN_VID,
1288 ACTION_OF_SET_VLAN_PCP,
1290 ACTION_OF_PUSH_MPLS,
1297 ACTION_MPLSOGRE_ENCAP,
1298 ACTION_MPLSOGRE_DECAP,
1299 ACTION_MPLSOUDP_ENCAP,
1300 ACTION_MPLSOUDP_DECAP,
1301 ACTION_SET_IPV4_SRC,
1302 ACTION_SET_IPV4_DST,
1303 ACTION_SET_IPV6_SRC,
1304 ACTION_SET_IPV6_DST,
1320 ACTION_SET_IPV4_DSCP,
1321 ACTION_SET_IPV6_DSCP,
1328 static const enum index action_mark[] = {
1334 static const enum index action_queue[] = {
1340 static const enum index action_count[] = {
1342 ACTION_COUNT_SHARED,
1347 static const enum index action_rss[] = {
1358 static const enum index action_vf[] = {
1365 static const enum index action_phy_port[] = {
1366 ACTION_PHY_PORT_ORIGINAL,
1367 ACTION_PHY_PORT_INDEX,
1372 static const enum index action_port_id[] = {
1373 ACTION_PORT_ID_ORIGINAL,
1379 static const enum index action_meter[] = {
1385 static const enum index action_of_set_mpls_ttl[] = {
1386 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1391 static const enum index action_of_set_nw_ttl[] = {
1392 ACTION_OF_SET_NW_TTL_NW_TTL,
1397 static const enum index action_of_push_vlan[] = {
1398 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1403 static const enum index action_of_set_vlan_vid[] = {
1404 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1409 static const enum index action_of_set_vlan_pcp[] = {
1410 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1415 static const enum index action_of_pop_mpls[] = {
1416 ACTION_OF_POP_MPLS_ETHERTYPE,
1421 static const enum index action_of_push_mpls[] = {
1422 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1427 static const enum index action_set_ipv4_src[] = {
1428 ACTION_SET_IPV4_SRC_IPV4_SRC,
1433 static const enum index action_set_mac_src[] = {
1434 ACTION_SET_MAC_SRC_MAC_SRC,
1439 static const enum index action_set_ipv4_dst[] = {
1440 ACTION_SET_IPV4_DST_IPV4_DST,
1445 static const enum index action_set_ipv6_src[] = {
1446 ACTION_SET_IPV6_SRC_IPV6_SRC,
1451 static const enum index action_set_ipv6_dst[] = {
1452 ACTION_SET_IPV6_DST_IPV6_DST,
1457 static const enum index action_set_tp_src[] = {
1458 ACTION_SET_TP_SRC_TP_SRC,
1463 static const enum index action_set_tp_dst[] = {
1464 ACTION_SET_TP_DST_TP_DST,
1469 static const enum index action_set_ttl[] = {
1475 static const enum index action_jump[] = {
1481 static const enum index action_set_mac_dst[] = {
1482 ACTION_SET_MAC_DST_MAC_DST,
1487 static const enum index action_inc_tcp_seq[] = {
1488 ACTION_INC_TCP_SEQ_VALUE,
1493 static const enum index action_dec_tcp_seq[] = {
1494 ACTION_DEC_TCP_SEQ_VALUE,
1499 static const enum index action_inc_tcp_ack[] = {
1500 ACTION_INC_TCP_ACK_VALUE,
1505 static const enum index action_dec_tcp_ack[] = {
1506 ACTION_DEC_TCP_ACK_VALUE,
1511 static const enum index action_raw_encap[] = {
1512 ACTION_RAW_ENCAP_INDEX,
1517 static const enum index action_raw_decap[] = {
1518 ACTION_RAW_DECAP_INDEX,
1523 static const enum index action_set_tag[] = {
1524 ACTION_SET_TAG_DATA,
1525 ACTION_SET_TAG_INDEX,
1526 ACTION_SET_TAG_MASK,
1531 static const enum index action_set_meta[] = {
1532 ACTION_SET_META_DATA,
1533 ACTION_SET_META_MASK,
1538 static const enum index action_set_ipv4_dscp[] = {
1539 ACTION_SET_IPV4_DSCP_VALUE,
1544 static const enum index action_set_ipv6_dscp[] = {
1545 ACTION_SET_IPV6_DSCP_VALUE,
1550 static const enum index action_age[] = {
1557 static const enum index action_sample[] = {
1559 ACTION_SAMPLE_RATIO,
1560 ACTION_SAMPLE_INDEX,
1565 static const enum index next_action_sample[] = {
1575 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1576 const char *, unsigned int,
1577 void *, unsigned int);
1578 static int parse_set_sample_action(struct context *, const struct token *,
1579 const char *, unsigned int,
1580 void *, unsigned int);
1581 static int parse_set_init(struct context *, const struct token *,
1582 const char *, unsigned int,
1583 void *, unsigned int);
1584 static int parse_init(struct context *, const struct token *,
1585 const char *, unsigned int,
1586 void *, unsigned int);
1587 static int parse_vc(struct context *, const struct token *,
1588 const char *, unsigned int,
1589 void *, unsigned int);
1590 static int parse_vc_spec(struct context *, const struct token *,
1591 const char *, unsigned int, void *, unsigned int);
1592 static int parse_vc_conf(struct context *, const struct token *,
1593 const char *, unsigned int, void *, unsigned int);
1594 static int parse_vc_item_ecpri_type(struct context *, const struct token *,
1595 const char *, unsigned int,
1596 void *, unsigned int);
1597 static int parse_vc_action_rss(struct context *, const struct token *,
1598 const char *, unsigned int, void *,
1600 static int parse_vc_action_rss_func(struct context *, const struct token *,
1601 const char *, unsigned int, void *,
1603 static int parse_vc_action_rss_type(struct context *, const struct token *,
1604 const char *, unsigned int, void *,
1606 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1607 const char *, unsigned int, void *,
1609 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1610 const char *, unsigned int, void *,
1612 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1613 const char *, unsigned int, void *,
1615 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1616 const char *, unsigned int, void *,
1618 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1619 const char *, unsigned int, void *,
1621 static int parse_vc_action_mplsogre_encap(struct context *,
1622 const struct token *, const char *,
1623 unsigned int, void *, unsigned int);
1624 static int parse_vc_action_mplsogre_decap(struct context *,
1625 const struct token *, const char *,
1626 unsigned int, void *, unsigned int);
1627 static int parse_vc_action_mplsoudp_encap(struct context *,
1628 const struct token *, const char *,
1629 unsigned int, void *, unsigned int);
1630 static int parse_vc_action_mplsoudp_decap(struct context *,
1631 const struct token *, const char *,
1632 unsigned int, void *, unsigned int);
1633 static int parse_vc_action_raw_encap(struct context *,
1634 const struct token *, const char *,
1635 unsigned int, void *, unsigned int);
1636 static int parse_vc_action_raw_decap(struct context *,
1637 const struct token *, const char *,
1638 unsigned int, void *, unsigned int);
1639 static int parse_vc_action_raw_encap_index(struct context *,
1640 const struct token *, const char *,
1641 unsigned int, void *, unsigned int);
1642 static int parse_vc_action_raw_decap_index(struct context *,
1643 const struct token *, const char *,
1644 unsigned int, void *, unsigned int);
1645 static int parse_vc_action_set_meta(struct context *ctx,
1646 const struct token *token, const char *str,
1647 unsigned int len, void *buf,
1649 static int parse_vc_action_sample(struct context *ctx,
1650 const struct token *token, const char *str,
1651 unsigned int len, void *buf,
1654 parse_vc_action_sample_index(struct context *ctx, const struct token *token,
1655 const char *str, unsigned int len, void *buf,
1657 static int parse_destroy(struct context *, const struct token *,
1658 const char *, unsigned int,
1659 void *, unsigned int);
1660 static int parse_flush(struct context *, const struct token *,
1661 const char *, unsigned int,
1662 void *, unsigned int);
1663 static int parse_dump(struct context *, const struct token *,
1664 const char *, unsigned int,
1665 void *, unsigned int);
1666 static int parse_query(struct context *, const struct token *,
1667 const char *, unsigned int,
1668 void *, unsigned int);
1669 static int parse_action(struct context *, const struct token *,
1670 const char *, unsigned int,
1671 void *, unsigned int);
1672 static int parse_list(struct context *, const struct token *,
1673 const char *, unsigned int,
1674 void *, unsigned int);
1675 static int parse_aged(struct context *, const struct token *,
1676 const char *, unsigned int,
1677 void *, unsigned int);
1678 static int parse_isolate(struct context *, const struct token *,
1679 const char *, unsigned int,
1680 void *, unsigned int);
1681 static int parse_tunnel(struct context *, const struct token *,
1682 const char *, unsigned int,
1683 void *, unsigned int);
1684 static int parse_int(struct context *, const struct token *,
1685 const char *, unsigned int,
1686 void *, unsigned int);
1687 static int parse_prefix(struct context *, const struct token *,
1688 const char *, unsigned int,
1689 void *, unsigned int);
1690 static int parse_boolean(struct context *, const struct token *,
1691 const char *, unsigned int,
1692 void *, unsigned int);
1693 static int parse_string(struct context *, const struct token *,
1694 const char *, unsigned int,
1695 void *, unsigned int);
1696 static int parse_hex(struct context *ctx, const struct token *token,
1697 const char *str, unsigned int len,
1698 void *buf, unsigned int size);
1699 static int parse_string0(struct context *, const struct token *,
1700 const char *, unsigned int,
1701 void *, unsigned int);
1702 static int parse_mac_addr(struct context *, const struct token *,
1703 const char *, unsigned int,
1704 void *, unsigned int);
1705 static int parse_ipv4_addr(struct context *, const struct token *,
1706 const char *, unsigned int,
1707 void *, unsigned int);
1708 static int parse_ipv6_addr(struct context *, const struct token *,
1709 const char *, unsigned int,
1710 void *, unsigned int);
1711 static int parse_port(struct context *, const struct token *,
1712 const char *, unsigned int,
1713 void *, unsigned int);
1714 static int parse_sa(struct context *, const struct token *,
1715 const char *, unsigned int,
1716 void *, unsigned int);
1717 static int parse_sa_destroy(struct context *ctx, const struct token *token,
1718 const char *str, unsigned int len,
1719 void *buf, unsigned int size);
1720 static int parse_sa_id2ptr(struct context *ctx, const struct token *token,
1721 const char *str, unsigned int len, void *buf,
1723 static int comp_none(struct context *, const struct token *,
1724 unsigned int, char *, unsigned int);
1725 static int comp_boolean(struct context *, const struct token *,
1726 unsigned int, char *, unsigned int);
1727 static int comp_action(struct context *, const struct token *,
1728 unsigned int, char *, unsigned int);
1729 static int comp_port(struct context *, const struct token *,
1730 unsigned int, char *, unsigned int);
1731 static int comp_rule_id(struct context *, const struct token *,
1732 unsigned int, char *, unsigned int);
1733 static int comp_vc_action_rss_type(struct context *, const struct token *,
1734 unsigned int, char *, unsigned int);
1735 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1736 unsigned int, char *, unsigned int);
1737 static int comp_set_raw_index(struct context *, const struct token *,
1738 unsigned int, char *, unsigned int);
1739 static int comp_set_sample_index(struct context *, const struct token *,
1740 unsigned int, char *, unsigned int);
1742 /** Token definitions. */
1743 static const struct token token_list[] = {
1744 /* Special tokens. */
1747 .help = "null entry, abused as the entry point",
1748 .next = NEXT(NEXT_ENTRY(FLOW)),
1753 .help = "command may end here",
1756 .name = "START_SET",
1757 .help = "null entry, abused as the entry point for set",
1758 .next = NEXT(NEXT_ENTRY(SET)),
1763 .help = "set command may end here",
1765 /* Common tokens. */
1769 .help = "integer value",
1774 .name = "{unsigned}",
1776 .help = "unsigned integer value",
1783 .help = "prefix length for bit-mask",
1784 .call = parse_prefix,
1788 .name = "{boolean}",
1790 .help = "any boolean value",
1791 .call = parse_boolean,
1792 .comp = comp_boolean,
1797 .help = "fixed string",
1798 .call = parse_string,
1804 .help = "fixed string",
1808 .name = "{file path}",
1810 .help = "file path",
1811 .call = parse_string0,
1815 .name = "{MAC address}",
1817 .help = "standard MAC address notation",
1818 .call = parse_mac_addr,
1822 .name = "{IPv4 address}",
1823 .type = "IPV4 ADDRESS",
1824 .help = "standard IPv4 address notation",
1825 .call = parse_ipv4_addr,
1829 .name = "{IPv6 address}",
1830 .type = "IPV6 ADDRESS",
1831 .help = "standard IPv6 address notation",
1832 .call = parse_ipv6_addr,
1836 .name = "{rule id}",
1838 .help = "rule identifier",
1840 .comp = comp_rule_id,
1843 .name = "{port_id}",
1845 .help = "port identifier",
1850 .name = "{group_id}",
1852 .help = "group identifier",
1856 [PRIORITY_LEVEL] = {
1859 .help = "priority level",
1863 [SHARED_ACTION_ID] = {
1864 .name = "{shared_action_id}",
1865 .type = "SHARED_ACTION_ID",
1866 .help = "shared action id",
1870 /* Top-level command. */
1873 .type = "{command} {port_id} [{arg} [...]]",
1874 .help = "manage ingress/egress flow rules",
1875 .next = NEXT(NEXT_ENTRY
1889 /* Top-level command. */
1891 .name = "shared_action",
1892 .type = "{command} {port_id} [{arg} [...]]",
1893 .help = "manage shared actions",
1894 .next = NEXT(next_sa_subcmd, NEXT_ENTRY(PORT_ID)),
1895 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1898 /* Sub-level commands. */
1899 [SHARED_ACTION_CREATE] = {
1901 .help = "create shared action",
1902 .next = NEXT(next_sa_create_attr),
1905 [SHARED_ACTION_UPDATE] = {
1907 .help = "update shared action",
1908 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_SPEC),
1909 NEXT_ENTRY(SHARED_ACTION_ID)),
1910 .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
1913 [SHARED_ACTION_DESTROY] = {
1915 .help = "destroy shared action",
1916 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_DESTROY_ID)),
1917 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1918 .call = parse_sa_destroy,
1920 [SHARED_ACTION_QUERY] = {
1922 .help = "query shared action",
1923 .next = NEXT(NEXT_ENTRY(END), NEXT_ENTRY(SHARED_ACTION_ID)),
1924 .args = ARGS(ARGS_ENTRY(struct buffer, args.sa.action_id)),
1929 .help = "check whether a flow rule can be created",
1930 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1931 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1936 .help = "create a flow rule",
1937 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1938 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1943 .help = "destroy specific flow rules",
1944 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1945 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1946 .call = parse_destroy,
1950 .help = "destroy all flow rules",
1951 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1952 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1953 .call = parse_flush,
1957 .help = "dump all flow rules to file",
1958 .next = NEXT(next_dump_attr, NEXT_ENTRY(PORT_ID)),
1959 .args = ARGS(ARGS_ENTRY(struct buffer, args.dump.file),
1960 ARGS_ENTRY(struct buffer, port)),
1965 .help = "query an existing flow rule",
1966 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1967 NEXT_ENTRY(RULE_ID),
1968 NEXT_ENTRY(PORT_ID)),
1969 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1970 ARGS_ENTRY(struct buffer, args.query.rule),
1971 ARGS_ENTRY(struct buffer, port)),
1972 .call = parse_query,
1976 .help = "list existing flow rules",
1977 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1978 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1983 .help = "list and destroy aged flows",
1984 .next = NEXT(next_aged_attr, NEXT_ENTRY(PORT_ID)),
1985 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1990 .help = "restrict ingress traffic to the defined flow rules",
1991 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1992 NEXT_ENTRY(PORT_ID)),
1993 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1994 ARGS_ENTRY(struct buffer, port)),
1995 .call = parse_isolate,
1999 .help = "new tunnel API",
2000 .next = NEXT(NEXT_ENTRY
2001 (TUNNEL_CREATE, TUNNEL_LIST, TUNNEL_DESTROY)),
2002 .call = parse_tunnel,
2004 /* Tunnel arguments. */
2007 .help = "create new tunnel object",
2008 .next = NEXT(tunnel_create_attr, NEXT_ENTRY(PORT_ID)),
2009 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2010 .call = parse_tunnel,
2012 [TUNNEL_CREATE_TYPE] = {
2014 .help = "create new tunnel",
2015 .next = NEXT(tunnel_create_attr, NEXT_ENTRY(FILE_PATH)),
2016 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, type)),
2017 .call = parse_tunnel,
2019 [TUNNEL_DESTROY] = {
2021 .help = "destroy tunel",
2022 .next = NEXT(tunnel_destroy_attr, NEXT_ENTRY(PORT_ID)),
2023 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2024 .call = parse_tunnel,
2026 [TUNNEL_DESTROY_ID] = {
2028 .help = "tunnel identifier to testroy",
2029 .next = NEXT(tunnel_destroy_attr, NEXT_ENTRY(UNSIGNED)),
2030 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2031 .call = parse_tunnel,
2035 .help = "list existing tunnels",
2036 .next = NEXT(tunnel_list_attr, NEXT_ENTRY(PORT_ID)),
2037 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2038 .call = parse_tunnel,
2040 /* Destroy arguments. */
2043 .help = "specify a rule identifier",
2044 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
2045 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
2046 .call = parse_destroy,
2048 /* Query arguments. */
2052 .help = "action to query, must be part of the rule",
2053 .call = parse_action,
2054 .comp = comp_action,
2056 /* List arguments. */
2059 .help = "specify a group",
2060 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
2061 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
2066 .help = "specify aged flows need be destroyed",
2070 /* Validate/create attributes. */
2073 .help = "specify a group",
2074 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
2075 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
2080 .help = "specify a priority level",
2081 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
2082 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
2087 .help = "affect rule to ingress",
2088 .next = NEXT(next_vc_attr),
2093 .help = "affect rule to egress",
2094 .next = NEXT(next_vc_attr),
2099 .help = "apply rule directly to endpoints found in pattern",
2100 .next = NEXT(next_vc_attr),
2104 .name = "tunnel_set",
2105 .help = "tunnel steer rule",
2106 .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
2107 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2111 .name = "tunnel_match",
2112 .help = "tunnel match rule",
2113 .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
2114 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2117 /* Validate/create pattern. */
2120 .help = "submit a list of pattern items",
2121 .next = NEXT(next_item),
2126 .help = "match value perfectly (with full bit-mask)",
2127 .call = parse_vc_spec,
2129 [ITEM_PARAM_SPEC] = {
2131 .help = "match value according to configured bit-mask",
2132 .call = parse_vc_spec,
2134 [ITEM_PARAM_LAST] = {
2136 .help = "specify upper bound to establish a range",
2137 .call = parse_vc_spec,
2139 [ITEM_PARAM_MASK] = {
2141 .help = "specify bit-mask with relevant bits set to one",
2142 .call = parse_vc_spec,
2144 [ITEM_PARAM_PREFIX] = {
2146 .help = "generate bit-mask from a prefix length",
2147 .call = parse_vc_spec,
2151 .help = "specify next pattern item",
2152 .next = NEXT(next_item),
2156 .help = "end list of pattern items",
2157 .priv = PRIV_ITEM(END, 0),
2158 .next = NEXT(NEXT_ENTRY(ACTIONS)),
2163 .help = "no-op pattern item",
2164 .priv = PRIV_ITEM(VOID, 0),
2165 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2170 .help = "perform actions when pattern does not match",
2171 .priv = PRIV_ITEM(INVERT, 0),
2172 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2177 .help = "match any protocol for the current layer",
2178 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
2179 .next = NEXT(item_any),
2184 .help = "number of layers covered",
2185 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
2186 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
2190 .help = "match traffic from/to the physical function",
2191 .priv = PRIV_ITEM(PF, 0),
2192 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2197 .help = "match traffic from/to a virtual function ID",
2198 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
2199 .next = NEXT(item_vf),
2205 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
2206 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
2210 .help = "match traffic from/to a specific physical port",
2211 .priv = PRIV_ITEM(PHY_PORT,
2212 sizeof(struct rte_flow_item_phy_port)),
2213 .next = NEXT(item_phy_port),
2216 [ITEM_PHY_PORT_INDEX] = {
2218 .help = "physical port index",
2219 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
2220 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
2224 .help = "match traffic from/to a given DPDK port ID",
2225 .priv = PRIV_ITEM(PORT_ID,
2226 sizeof(struct rte_flow_item_port_id)),
2227 .next = NEXT(item_port_id),
2230 [ITEM_PORT_ID_ID] = {
2232 .help = "DPDK port ID",
2233 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
2234 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
2238 .help = "match traffic against value set in previously matched rule",
2239 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
2240 .next = NEXT(item_mark),
2245 .help = "Integer value to match against",
2246 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
2247 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
2251 .help = "match an arbitrary byte string",
2252 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
2253 .next = NEXT(item_raw),
2256 [ITEM_RAW_RELATIVE] = {
2258 .help = "look for pattern after the previous item",
2259 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
2260 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
2263 [ITEM_RAW_SEARCH] = {
2265 .help = "search pattern from offset (see also limit)",
2266 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
2267 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
2270 [ITEM_RAW_OFFSET] = {
2272 .help = "absolute or relative offset for pattern",
2273 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
2274 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
2276 [ITEM_RAW_LIMIT] = {
2278 .help = "search area limit for start of pattern",
2279 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
2280 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
2282 [ITEM_RAW_PATTERN] = {
2284 .help = "byte string to look for",
2285 .next = NEXT(item_raw,
2287 NEXT_ENTRY(ITEM_PARAM_IS,
2290 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
2291 ARGS_ENTRY(struct rte_flow_item_raw, length),
2292 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
2293 ITEM_RAW_PATTERN_SIZE)),
2297 .help = "match Ethernet header",
2298 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
2299 .next = NEXT(item_eth),
2304 .help = "destination MAC",
2305 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
2306 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
2310 .help = "source MAC",
2311 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
2312 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
2316 .help = "EtherType",
2317 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
2318 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
2320 [ITEM_ETH_HAS_VLAN] = {
2322 .help = "packet header contains VLAN",
2323 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
2324 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_eth,
2329 .help = "match 802.1Q/ad VLAN tag",
2330 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
2331 .next = NEXT(item_vlan),
2336 .help = "tag control information",
2337 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2338 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
2342 .help = "priority code point",
2343 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2344 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2349 .help = "drop eligible indicator",
2350 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2351 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2356 .help = "VLAN identifier",
2357 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2358 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2361 [ITEM_VLAN_INNER_TYPE] = {
2362 .name = "inner_type",
2363 .help = "inner EtherType",
2364 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2365 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
2368 [ITEM_VLAN_HAS_MORE_VLAN] = {
2369 .name = "has_more_vlan",
2370 .help = "packet header contains another VLAN",
2371 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2372 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_vlan,
2377 .help = "match IPv4 header",
2378 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
2379 .next = NEXT(item_ipv4),
2384 .help = "type of service",
2385 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2386 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2387 hdr.type_of_service)),
2389 [ITEM_IPV4_FRAGMENT_OFFSET] = {
2390 .name = "fragment_offset",
2391 .help = "fragmentation flags and fragment offset",
2392 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2393 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2394 hdr.fragment_offset)),
2398 .help = "time to live",
2399 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2400 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2403 [ITEM_IPV4_PROTO] = {
2405 .help = "next protocol ID",
2406 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2407 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2408 hdr.next_proto_id)),
2412 .help = "source address",
2413 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2414 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2419 .help = "destination address",
2420 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2421 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2426 .help = "match IPv6 header",
2427 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
2428 .next = NEXT(item_ipv6),
2433 .help = "traffic class",
2434 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2435 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2437 "\x0f\xf0\x00\x00")),
2439 [ITEM_IPV6_FLOW] = {
2441 .help = "flow label",
2442 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2443 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2445 "\x00\x0f\xff\xff")),
2447 [ITEM_IPV6_PROTO] = {
2449 .help = "protocol (next header)",
2450 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2451 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2456 .help = "hop limit",
2457 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2458 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2463 .help = "source address",
2464 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2465 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2470 .help = "destination address",
2471 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2472 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2475 [ITEM_IPV6_HAS_FRAG_EXT] = {
2476 .name = "has_frag_ext",
2477 .help = "fragment packet attribute",
2478 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2479 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_ipv6,
2484 .help = "match ICMP header",
2485 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2486 .next = NEXT(item_icmp),
2489 [ITEM_ICMP_TYPE] = {
2491 .help = "ICMP packet type",
2492 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2493 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2496 [ITEM_ICMP_CODE] = {
2498 .help = "ICMP packet code",
2499 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2500 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2503 [ITEM_ICMP_IDENT] = {
2505 .help = "ICMP packet identifier",
2506 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2507 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2512 .help = "ICMP packet sequence number",
2513 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2514 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2519 .help = "match UDP header",
2520 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2521 .next = NEXT(item_udp),
2526 .help = "UDP source port",
2527 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2528 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2533 .help = "UDP destination port",
2534 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2535 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2540 .help = "match TCP header",
2541 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2542 .next = NEXT(item_tcp),
2547 .help = "TCP source port",
2548 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2549 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2554 .help = "TCP destination port",
2555 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2556 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2559 [ITEM_TCP_FLAGS] = {
2561 .help = "TCP flags",
2562 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2563 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2568 .help = "match SCTP header",
2569 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2570 .next = NEXT(item_sctp),
2575 .help = "SCTP source port",
2576 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2577 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2582 .help = "SCTP destination port",
2583 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2584 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2589 .help = "validation tag",
2590 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2591 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2594 [ITEM_SCTP_CKSUM] = {
2597 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2598 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2603 .help = "match VXLAN header",
2604 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2605 .next = NEXT(item_vxlan),
2608 [ITEM_VXLAN_VNI] = {
2610 .help = "VXLAN identifier",
2611 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2612 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2616 .help = "match E-Tag header",
2617 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2618 .next = NEXT(item_e_tag),
2621 [ITEM_E_TAG_GRP_ECID_B] = {
2622 .name = "grp_ecid_b",
2623 .help = "GRP and E-CID base",
2624 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2625 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2631 .help = "match NVGRE header",
2632 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2633 .next = NEXT(item_nvgre),
2636 [ITEM_NVGRE_TNI] = {
2638 .help = "virtual subnet ID",
2639 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2640 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2644 .help = "match MPLS header",
2645 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2646 .next = NEXT(item_mpls),
2649 [ITEM_MPLS_LABEL] = {
2651 .help = "MPLS label",
2652 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2653 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2659 .help = "MPLS Traffic Class",
2660 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2661 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2667 .help = "MPLS Bottom-of-Stack",
2668 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2669 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2675 .help = "match GRE header",
2676 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2677 .next = NEXT(item_gre),
2680 [ITEM_GRE_PROTO] = {
2682 .help = "GRE protocol type",
2683 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2684 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2687 [ITEM_GRE_C_RSVD0_VER] = {
2688 .name = "c_rsvd0_ver",
2690 "checksum (1b), undefined (1b), key bit (1b),"
2691 " sequence number (1b), reserved 0 (9b),"
2693 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2694 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2697 [ITEM_GRE_C_BIT] = {
2699 .help = "checksum bit (C)",
2700 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2701 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2703 "\x80\x00\x00\x00")),
2705 [ITEM_GRE_S_BIT] = {
2707 .help = "sequence number bit (S)",
2708 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2709 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2711 "\x10\x00\x00\x00")),
2713 [ITEM_GRE_K_BIT] = {
2715 .help = "key bit (K)",
2716 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2717 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2719 "\x20\x00\x00\x00")),
2723 .help = "fuzzy pattern match, expect faster than default",
2724 .priv = PRIV_ITEM(FUZZY,
2725 sizeof(struct rte_flow_item_fuzzy)),
2726 .next = NEXT(item_fuzzy),
2729 [ITEM_FUZZY_THRESH] = {
2731 .help = "match accuracy threshold",
2732 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2733 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2738 .help = "match GTP header",
2739 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2740 .next = NEXT(item_gtp),
2743 [ITEM_GTP_FLAGS] = {
2744 .name = "v_pt_rsv_flags",
2745 .help = "GTP flags",
2746 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2747 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp,
2750 [ITEM_GTP_MSG_TYPE] = {
2752 .help = "GTP message type",
2753 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2754 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp, msg_type)),
2758 .help = "tunnel endpoint identifier",
2759 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2760 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2764 .help = "match GTP header",
2765 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2766 .next = NEXT(item_gtp),
2771 .help = "match GTP header",
2772 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2773 .next = NEXT(item_gtp),
2778 .help = "match GENEVE header",
2779 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2780 .next = NEXT(item_geneve),
2783 [ITEM_GENEVE_VNI] = {
2785 .help = "virtual network identifier",
2786 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2787 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2789 [ITEM_GENEVE_PROTO] = {
2791 .help = "GENEVE protocol type",
2792 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2793 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2796 [ITEM_VXLAN_GPE] = {
2797 .name = "vxlan-gpe",
2798 .help = "match VXLAN-GPE header",
2799 .priv = PRIV_ITEM(VXLAN_GPE,
2800 sizeof(struct rte_flow_item_vxlan_gpe)),
2801 .next = NEXT(item_vxlan_gpe),
2804 [ITEM_VXLAN_GPE_VNI] = {
2806 .help = "VXLAN-GPE identifier",
2807 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2808 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2811 [ITEM_ARP_ETH_IPV4] = {
2812 .name = "arp_eth_ipv4",
2813 .help = "match ARP header for Ethernet/IPv4",
2814 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2815 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2816 .next = NEXT(item_arp_eth_ipv4),
2819 [ITEM_ARP_ETH_IPV4_SHA] = {
2821 .help = "sender hardware address",
2822 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2824 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2827 [ITEM_ARP_ETH_IPV4_SPA] = {
2829 .help = "sender IPv4 address",
2830 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2832 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2835 [ITEM_ARP_ETH_IPV4_THA] = {
2837 .help = "target hardware address",
2838 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2840 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2843 [ITEM_ARP_ETH_IPV4_TPA] = {
2845 .help = "target IPv4 address",
2846 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2848 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2853 .help = "match presence of any IPv6 extension header",
2854 .priv = PRIV_ITEM(IPV6_EXT,
2855 sizeof(struct rte_flow_item_ipv6_ext)),
2856 .next = NEXT(item_ipv6_ext),
2859 [ITEM_IPV6_EXT_NEXT_HDR] = {
2861 .help = "next header",
2862 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2863 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2866 [ITEM_IPV6_FRAG_EXT] = {
2867 .name = "ipv6_frag_ext",
2868 .help = "match presence of IPv6 fragment extension header",
2869 .priv = PRIV_ITEM(IPV6_FRAG_EXT,
2870 sizeof(struct rte_flow_item_ipv6_frag_ext)),
2871 .next = NEXT(item_ipv6_frag_ext),
2874 [ITEM_IPV6_FRAG_EXT_NEXT_HDR] = {
2876 .help = "next header",
2877 .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
2879 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_ipv6_frag_ext,
2882 [ITEM_IPV6_FRAG_EXT_FRAG_DATA] = {
2883 .name = "frag_data",
2884 .help = "Fragment flags and offset",
2885 .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
2887 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_frag_ext,
2892 .help = "match any ICMPv6 header",
2893 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2894 .next = NEXT(item_icmp6),
2897 [ITEM_ICMP6_TYPE] = {
2899 .help = "ICMPv6 type",
2900 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2901 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2904 [ITEM_ICMP6_CODE] = {
2906 .help = "ICMPv6 code",
2907 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2908 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2911 [ITEM_ICMP6_ND_NS] = {
2912 .name = "icmp6_nd_ns",
2913 .help = "match ICMPv6 neighbor discovery solicitation",
2914 .priv = PRIV_ITEM(ICMP6_ND_NS,
2915 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2916 .next = NEXT(item_icmp6_nd_ns),
2919 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2920 .name = "target_addr",
2921 .help = "target address",
2922 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2924 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2927 [ITEM_ICMP6_ND_NA] = {
2928 .name = "icmp6_nd_na",
2929 .help = "match ICMPv6 neighbor discovery advertisement",
2930 .priv = PRIV_ITEM(ICMP6_ND_NA,
2931 sizeof(struct rte_flow_item_icmp6_nd_na)),
2932 .next = NEXT(item_icmp6_nd_na),
2935 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2936 .name = "target_addr",
2937 .help = "target address",
2938 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2940 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2943 [ITEM_ICMP6_ND_OPT] = {
2944 .name = "icmp6_nd_opt",
2945 .help = "match presence of any ICMPv6 neighbor discovery"
2947 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2948 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2949 .next = NEXT(item_icmp6_nd_opt),
2952 [ITEM_ICMP6_ND_OPT_TYPE] = {
2954 .help = "ND option type",
2955 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2957 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2960 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2961 .name = "icmp6_nd_opt_sla_eth",
2962 .help = "match ICMPv6 neighbor discovery source Ethernet"
2963 " link-layer address option",
2965 (ICMP6_ND_OPT_SLA_ETH,
2966 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2967 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2970 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2972 .help = "source Ethernet LLA",
2973 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2975 .args = ARGS(ARGS_ENTRY_HTON
2976 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2978 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2979 .name = "icmp6_nd_opt_tla_eth",
2980 .help = "match ICMPv6 neighbor discovery target Ethernet"
2981 " link-layer address option",
2983 (ICMP6_ND_OPT_TLA_ETH,
2984 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2985 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2988 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2990 .help = "target Ethernet LLA",
2991 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2993 .args = ARGS(ARGS_ENTRY_HTON
2994 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2998 .help = "match metadata header",
2999 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
3000 .next = NEXT(item_meta),
3003 [ITEM_META_DATA] = {
3005 .help = "metadata value",
3006 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
3007 .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta,
3008 data, "\xff\xff\xff\xff")),
3012 .help = "match GRE key",
3013 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
3014 .next = NEXT(item_gre_key),
3017 [ITEM_GRE_KEY_VALUE] = {
3019 .help = "key value",
3020 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
3021 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3025 .help = "match GTP extension header with type 0x85",
3026 .priv = PRIV_ITEM(GTP_PSC,
3027 sizeof(struct rte_flow_item_gtp_psc)),
3028 .next = NEXT(item_gtp_psc),
3031 [ITEM_GTP_PSC_QFI] = {
3033 .help = "QoS flow identifier",
3034 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
3035 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
3038 [ITEM_GTP_PSC_PDU_T] = {
3041 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
3042 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
3047 .help = "match PPPoE session header",
3048 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
3049 .next = NEXT(item_pppoes),
3054 .help = "match PPPoE discovery header",
3055 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
3056 .next = NEXT(item_pppoed),
3059 [ITEM_PPPOE_SEID] = {
3061 .help = "session identifier",
3062 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
3063 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
3066 [ITEM_PPPOE_PROTO_ID] = {
3067 .name = "pppoe_proto_id",
3068 .help = "match PPPoE session protocol identifier",
3069 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
3070 sizeof(struct rte_flow_item_pppoe_proto_id)),
3071 .next = NEXT(item_pppoe_proto_id, NEXT_ENTRY(UNSIGNED),
3073 .args = ARGS(ARGS_ENTRY_HTON
3074 (struct rte_flow_item_pppoe_proto_id, proto_id)),
3079 .help = "matches higig2 header",
3080 .priv = PRIV_ITEM(HIGIG2,
3081 sizeof(struct rte_flow_item_higig2_hdr)),
3082 .next = NEXT(item_higig2),
3085 [ITEM_HIGIG2_CLASSIFICATION] = {
3086 .name = "classification",
3087 .help = "matches classification of higig2 header",
3088 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
3089 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
3090 hdr.ppt1.classification)),
3092 [ITEM_HIGIG2_VID] = {
3094 .help = "matches vid of higig2 header",
3095 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
3096 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
3101 .help = "match tag value",
3102 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
3103 .next = NEXT(item_tag),
3108 .help = "tag value to match",
3109 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
3110 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)),
3112 [ITEM_TAG_INDEX] = {
3114 .help = "index of tag array to match",
3115 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
3116 NEXT_ENTRY(ITEM_PARAM_IS)),
3117 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
3119 [ITEM_L2TPV3OIP] = {
3120 .name = "l2tpv3oip",
3121 .help = "match L2TPv3 over IP header",
3122 .priv = PRIV_ITEM(L2TPV3OIP,
3123 sizeof(struct rte_flow_item_l2tpv3oip)),
3124 .next = NEXT(item_l2tpv3oip),
3127 [ITEM_L2TPV3OIP_SESSION_ID] = {
3128 .name = "session_id",
3129 .help = "session identifier",
3130 .next = NEXT(item_l2tpv3oip, NEXT_ENTRY(UNSIGNED), item_param),
3131 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_l2tpv3oip,
3136 .help = "match ESP header",
3137 .priv = PRIV_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
3138 .next = NEXT(item_esp),
3143 .help = "security policy index",
3144 .next = NEXT(item_esp, NEXT_ENTRY(UNSIGNED), item_param),
3145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_esp,
3150 .help = "match AH header",
3151 .priv = PRIV_ITEM(AH, sizeof(struct rte_flow_item_ah)),
3152 .next = NEXT(item_ah),
3157 .help = "security parameters index",
3158 .next = NEXT(item_ah, NEXT_ENTRY(UNSIGNED), item_param),
3159 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ah, spi)),
3163 .help = "match pfcp header",
3164 .priv = PRIV_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
3165 .next = NEXT(item_pfcp),
3168 [ITEM_PFCP_S_FIELD] = {
3171 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
3172 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp,
3175 [ITEM_PFCP_SEID] = {
3177 .help = "session endpoint identifier",
3178 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
3179 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp, seid)),
3183 .help = "match eCPRI header",
3184 .priv = PRIV_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
3185 .next = NEXT(item_ecpri),
3188 [ITEM_ECPRI_COMMON] = {
3190 .help = "eCPRI common header",
3191 .next = NEXT(item_ecpri_common),
3193 [ITEM_ECPRI_COMMON_TYPE] = {
3195 .help = "type of common header",
3196 .next = NEXT(item_ecpri_common_type),
3197 .args = ARGS(ARG_ENTRY_HTON(struct rte_flow_item_ecpri)),
3199 [ITEM_ECPRI_COMMON_TYPE_IQ_DATA] = {
3201 .help = "Type #0: IQ Data",
3202 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_IQ_DATA_PCID,
3204 .call = parse_vc_item_ecpri_type,
3206 [ITEM_ECPRI_MSG_IQ_DATA_PCID] = {
3208 .help = "Physical Channel ID",
3209 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_IQ_DATA_PCID,
3210 ITEM_ECPRI_COMMON, ITEM_NEXT),
3211 NEXT_ENTRY(UNSIGNED), item_param),
3212 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3215 [ITEM_ECPRI_COMMON_TYPE_RTC_CTRL] = {
3217 .help = "Type #2: Real-Time Control Data",
3218 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
3220 .call = parse_vc_item_ecpri_type,
3222 [ITEM_ECPRI_MSG_RTC_CTRL_RTCID] = {
3224 .help = "Real-Time Control Data ID",
3225 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
3226 ITEM_ECPRI_COMMON, ITEM_NEXT),
3227 NEXT_ENTRY(UNSIGNED), item_param),
3228 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3231 [ITEM_ECPRI_COMMON_TYPE_DLY_MSR] = {
3232 .name = "delay_measure",
3233 .help = "Type #5: One-Way Delay Measurement",
3234 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_DLY_MSR_MSRID,
3236 .call = parse_vc_item_ecpri_type,
3238 [ITEM_ECPRI_MSG_DLY_MSR_MSRID] = {
3240 .help = "Measurement ID",
3241 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_DLY_MSR_MSRID,
3242 ITEM_ECPRI_COMMON, ITEM_NEXT),
3243 NEXT_ENTRY(UNSIGNED), item_param),
3244 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3247 /* Validate/create actions. */
3250 .help = "submit a list of associated actions",
3251 .next = NEXT(next_action),
3256 .help = "specify next action",
3257 .next = NEXT(next_action),
3261 .help = "end list of actions",
3262 .priv = PRIV_ACTION(END, 0),
3267 .help = "no-op action",
3268 .priv = PRIV_ACTION(VOID, 0),
3269 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3272 [ACTION_PASSTHRU] = {
3274 .help = "let subsequent rule process matched packets",
3275 .priv = PRIV_ACTION(PASSTHRU, 0),
3276 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3281 .help = "redirect traffic to a given group",
3282 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
3283 .next = NEXT(action_jump),
3286 [ACTION_JUMP_GROUP] = {
3288 .help = "group to redirect traffic to",
3289 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
3290 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
3291 .call = parse_vc_conf,
3295 .help = "attach 32 bit value to packets",
3296 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
3297 .next = NEXT(action_mark),
3300 [ACTION_MARK_ID] = {
3302 .help = "32 bit value to return with packets",
3303 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
3304 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
3305 .call = parse_vc_conf,
3309 .help = "flag packets",
3310 .priv = PRIV_ACTION(FLAG, 0),
3311 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3316 .help = "assign packets to a given queue index",
3317 .priv = PRIV_ACTION(QUEUE,
3318 sizeof(struct rte_flow_action_queue)),
3319 .next = NEXT(action_queue),
3322 [ACTION_QUEUE_INDEX] = {
3324 .help = "queue index to use",
3325 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
3326 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
3327 .call = parse_vc_conf,
3331 .help = "drop packets (note: passthru has priority)",
3332 .priv = PRIV_ACTION(DROP, 0),
3333 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3338 .help = "enable counters for this rule",
3339 .priv = PRIV_ACTION(COUNT,
3340 sizeof(struct rte_flow_action_count)),
3341 .next = NEXT(action_count),
3344 [ACTION_COUNT_ID] = {
3345 .name = "identifier",
3346 .help = "counter identifier to use",
3347 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
3348 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
3349 .call = parse_vc_conf,
3351 [ACTION_COUNT_SHARED] = {
3353 .help = "shared counter",
3354 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
3355 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
3357 .call = parse_vc_conf,
3361 .help = "spread packets among several queues",
3362 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
3363 .next = NEXT(action_rss),
3364 .call = parse_vc_action_rss,
3366 [ACTION_RSS_FUNC] = {
3368 .help = "RSS hash function to apply",
3369 .next = NEXT(action_rss,
3370 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
3371 ACTION_RSS_FUNC_TOEPLITZ,
3372 ACTION_RSS_FUNC_SIMPLE_XOR,
3373 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
3375 [ACTION_RSS_FUNC_DEFAULT] = {
3377 .help = "default hash function",
3378 .call = parse_vc_action_rss_func,
3380 [ACTION_RSS_FUNC_TOEPLITZ] = {
3382 .help = "Toeplitz hash function",
3383 .call = parse_vc_action_rss_func,
3385 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
3386 .name = "simple_xor",
3387 .help = "simple XOR hash function",
3388 .call = parse_vc_action_rss_func,
3390 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
3391 .name = "symmetric_toeplitz",
3392 .help = "Symmetric Toeplitz hash function",
3393 .call = parse_vc_action_rss_func,
3395 [ACTION_RSS_LEVEL] = {
3397 .help = "encapsulation level for \"types\"",
3398 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
3399 .args = ARGS(ARGS_ENTRY_ARB
3400 (offsetof(struct action_rss_data, conf) +
3401 offsetof(struct rte_flow_action_rss, level),
3402 sizeof(((struct rte_flow_action_rss *)0)->
3405 [ACTION_RSS_TYPES] = {
3407 .help = "specific RSS hash types",
3408 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
3410 [ACTION_RSS_TYPE] = {
3412 .help = "RSS hash type",
3413 .call = parse_vc_action_rss_type,
3414 .comp = comp_vc_action_rss_type,
3416 [ACTION_RSS_KEY] = {
3418 .help = "RSS hash key",
3419 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
3420 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
3422 (offsetof(struct action_rss_data, conf) +
3423 offsetof(struct rte_flow_action_rss, key_len),
3424 sizeof(((struct rte_flow_action_rss *)0)->
3426 ARGS_ENTRY(struct action_rss_data, key)),
3428 [ACTION_RSS_KEY_LEN] = {
3430 .help = "RSS hash key length in bytes",
3431 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
3432 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3433 (offsetof(struct action_rss_data, conf) +
3434 offsetof(struct rte_flow_action_rss, key_len),
3435 sizeof(((struct rte_flow_action_rss *)0)->
3438 RSS_HASH_KEY_LENGTH)),
3440 [ACTION_RSS_QUEUES] = {
3442 .help = "queue indices to use",
3443 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
3444 .call = parse_vc_conf,
3446 [ACTION_RSS_QUEUE] = {
3448 .help = "queue index",
3449 .call = parse_vc_action_rss_queue,
3450 .comp = comp_vc_action_rss_queue,
3454 .help = "direct traffic to physical function",
3455 .priv = PRIV_ACTION(PF, 0),
3456 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3461 .help = "direct traffic to a virtual function ID",
3462 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
3463 .next = NEXT(action_vf),
3466 [ACTION_VF_ORIGINAL] = {
3468 .help = "use original VF ID if possible",
3469 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
3470 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
3472 .call = parse_vc_conf,
3477 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
3478 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
3479 .call = parse_vc_conf,
3481 [ACTION_PHY_PORT] = {
3483 .help = "direct packets to physical port index",
3484 .priv = PRIV_ACTION(PHY_PORT,
3485 sizeof(struct rte_flow_action_phy_port)),
3486 .next = NEXT(action_phy_port),
3489 [ACTION_PHY_PORT_ORIGINAL] = {
3491 .help = "use original port index if possible",
3492 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
3493 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
3495 .call = parse_vc_conf,
3497 [ACTION_PHY_PORT_INDEX] = {
3499 .help = "physical port index",
3500 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
3501 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
3503 .call = parse_vc_conf,
3505 [ACTION_PORT_ID] = {
3507 .help = "direct matching traffic to a given DPDK port ID",
3508 .priv = PRIV_ACTION(PORT_ID,
3509 sizeof(struct rte_flow_action_port_id)),
3510 .next = NEXT(action_port_id),
3513 [ACTION_PORT_ID_ORIGINAL] = {
3515 .help = "use original DPDK port ID if possible",
3516 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
3517 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
3519 .call = parse_vc_conf,
3521 [ACTION_PORT_ID_ID] = {
3523 .help = "DPDK port ID",
3524 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
3525 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
3526 .call = parse_vc_conf,
3530 .help = "meter the directed packets at given id",
3531 .priv = PRIV_ACTION(METER,
3532 sizeof(struct rte_flow_action_meter)),
3533 .next = NEXT(action_meter),
3536 [ACTION_METER_ID] = {
3538 .help = "meter id to use",
3539 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
3540 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
3541 .call = parse_vc_conf,
3543 [ACTION_OF_SET_MPLS_TTL] = {
3544 .name = "of_set_mpls_ttl",
3545 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
3548 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
3549 .next = NEXT(action_of_set_mpls_ttl),
3552 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
3555 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
3556 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
3558 .call = parse_vc_conf,
3560 [ACTION_OF_DEC_MPLS_TTL] = {
3561 .name = "of_dec_mpls_ttl",
3562 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
3563 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
3564 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3567 [ACTION_OF_SET_NW_TTL] = {
3568 .name = "of_set_nw_ttl",
3569 .help = "OpenFlow's OFPAT_SET_NW_TTL",
3572 sizeof(struct rte_flow_action_of_set_nw_ttl)),
3573 .next = NEXT(action_of_set_nw_ttl),
3576 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
3579 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
3580 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
3582 .call = parse_vc_conf,
3584 [ACTION_OF_DEC_NW_TTL] = {
3585 .name = "of_dec_nw_ttl",
3586 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
3587 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
3588 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3591 [ACTION_OF_COPY_TTL_OUT] = {
3592 .name = "of_copy_ttl_out",
3593 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
3594 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
3595 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3598 [ACTION_OF_COPY_TTL_IN] = {
3599 .name = "of_copy_ttl_in",
3600 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
3601 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
3602 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3605 [ACTION_OF_POP_VLAN] = {
3606 .name = "of_pop_vlan",
3607 .help = "OpenFlow's OFPAT_POP_VLAN",
3608 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
3609 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3612 [ACTION_OF_PUSH_VLAN] = {
3613 .name = "of_push_vlan",
3614 .help = "OpenFlow's OFPAT_PUSH_VLAN",
3617 sizeof(struct rte_flow_action_of_push_vlan)),
3618 .next = NEXT(action_of_push_vlan),
3621 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
3622 .name = "ethertype",
3623 .help = "EtherType",
3624 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
3625 .args = ARGS(ARGS_ENTRY_HTON
3626 (struct rte_flow_action_of_push_vlan,
3628 .call = parse_vc_conf,
3630 [ACTION_OF_SET_VLAN_VID] = {
3631 .name = "of_set_vlan_vid",
3632 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
3635 sizeof(struct rte_flow_action_of_set_vlan_vid)),
3636 .next = NEXT(action_of_set_vlan_vid),
3639 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
3642 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
3643 .args = ARGS(ARGS_ENTRY_HTON
3644 (struct rte_flow_action_of_set_vlan_vid,
3646 .call = parse_vc_conf,
3648 [ACTION_OF_SET_VLAN_PCP] = {
3649 .name = "of_set_vlan_pcp",
3650 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
3653 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
3654 .next = NEXT(action_of_set_vlan_pcp),
3657 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
3659 .help = "VLAN priority",
3660 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
3661 .args = ARGS(ARGS_ENTRY_HTON
3662 (struct rte_flow_action_of_set_vlan_pcp,
3664 .call = parse_vc_conf,
3666 [ACTION_OF_POP_MPLS] = {
3667 .name = "of_pop_mpls",
3668 .help = "OpenFlow's OFPAT_POP_MPLS",
3669 .priv = PRIV_ACTION(OF_POP_MPLS,
3670 sizeof(struct rte_flow_action_of_pop_mpls)),
3671 .next = NEXT(action_of_pop_mpls),
3674 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3675 .name = "ethertype",
3676 .help = "EtherType",
3677 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3678 .args = ARGS(ARGS_ENTRY_HTON
3679 (struct rte_flow_action_of_pop_mpls,
3681 .call = parse_vc_conf,
3683 [ACTION_OF_PUSH_MPLS] = {
3684 .name = "of_push_mpls",
3685 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3688 sizeof(struct rte_flow_action_of_push_mpls)),
3689 .next = NEXT(action_of_push_mpls),
3692 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3693 .name = "ethertype",
3694 .help = "EtherType",
3695 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3696 .args = ARGS(ARGS_ENTRY_HTON
3697 (struct rte_flow_action_of_push_mpls,
3699 .call = parse_vc_conf,
3701 [ACTION_VXLAN_ENCAP] = {
3702 .name = "vxlan_encap",
3703 .help = "VXLAN encapsulation, uses configuration set by \"set"
3705 .priv = PRIV_ACTION(VXLAN_ENCAP,
3706 sizeof(struct action_vxlan_encap_data)),
3707 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3708 .call = parse_vc_action_vxlan_encap,
3710 [ACTION_VXLAN_DECAP] = {
3711 .name = "vxlan_decap",
3712 .help = "Performs a decapsulation action by stripping all"
3713 " headers of the VXLAN tunnel network overlay from the"
3715 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3716 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3719 [ACTION_NVGRE_ENCAP] = {
3720 .name = "nvgre_encap",
3721 .help = "NVGRE encapsulation, uses configuration set by \"set"
3723 .priv = PRIV_ACTION(NVGRE_ENCAP,
3724 sizeof(struct action_nvgre_encap_data)),
3725 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3726 .call = parse_vc_action_nvgre_encap,
3728 [ACTION_NVGRE_DECAP] = {
3729 .name = "nvgre_decap",
3730 .help = "Performs a decapsulation action by stripping all"
3731 " headers of the NVGRE tunnel network overlay from the"
3733 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3734 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3737 [ACTION_L2_ENCAP] = {
3739 .help = "l2 encap, uses configuration set by"
3740 " \"set l2_encap\"",
3741 .priv = PRIV_ACTION(RAW_ENCAP,
3742 sizeof(struct action_raw_encap_data)),
3743 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3744 .call = parse_vc_action_l2_encap,
3746 [ACTION_L2_DECAP] = {
3748 .help = "l2 decap, uses configuration set by"
3749 " \"set l2_decap\"",
3750 .priv = PRIV_ACTION(RAW_DECAP,
3751 sizeof(struct action_raw_decap_data)),
3752 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3753 .call = parse_vc_action_l2_decap,
3755 [ACTION_MPLSOGRE_ENCAP] = {
3756 .name = "mplsogre_encap",
3757 .help = "mplsogre encapsulation, uses configuration set by"
3758 " \"set mplsogre_encap\"",
3759 .priv = PRIV_ACTION(RAW_ENCAP,
3760 sizeof(struct action_raw_encap_data)),
3761 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3762 .call = parse_vc_action_mplsogre_encap,
3764 [ACTION_MPLSOGRE_DECAP] = {
3765 .name = "mplsogre_decap",
3766 .help = "mplsogre decapsulation, uses configuration set by"
3767 " \"set mplsogre_decap\"",
3768 .priv = PRIV_ACTION(RAW_DECAP,
3769 sizeof(struct action_raw_decap_data)),
3770 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3771 .call = parse_vc_action_mplsogre_decap,
3773 [ACTION_MPLSOUDP_ENCAP] = {
3774 .name = "mplsoudp_encap",
3775 .help = "mplsoudp encapsulation, uses configuration set by"
3776 " \"set mplsoudp_encap\"",
3777 .priv = PRIV_ACTION(RAW_ENCAP,
3778 sizeof(struct action_raw_encap_data)),
3779 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3780 .call = parse_vc_action_mplsoudp_encap,
3782 [ACTION_MPLSOUDP_DECAP] = {
3783 .name = "mplsoudp_decap",
3784 .help = "mplsoudp decapsulation, uses configuration set by"
3785 " \"set mplsoudp_decap\"",
3786 .priv = PRIV_ACTION(RAW_DECAP,
3787 sizeof(struct action_raw_decap_data)),
3788 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3789 .call = parse_vc_action_mplsoudp_decap,
3791 [ACTION_SET_IPV4_SRC] = {
3792 .name = "set_ipv4_src",
3793 .help = "Set a new IPv4 source address in the outermost"
3795 .priv = PRIV_ACTION(SET_IPV4_SRC,
3796 sizeof(struct rte_flow_action_set_ipv4)),
3797 .next = NEXT(action_set_ipv4_src),
3800 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3801 .name = "ipv4_addr",
3802 .help = "new IPv4 source address to set",
3803 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3804 .args = ARGS(ARGS_ENTRY_HTON
3805 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3806 .call = parse_vc_conf,
3808 [ACTION_SET_IPV4_DST] = {
3809 .name = "set_ipv4_dst",
3810 .help = "Set a new IPv4 destination address in the outermost"
3812 .priv = PRIV_ACTION(SET_IPV4_DST,
3813 sizeof(struct rte_flow_action_set_ipv4)),
3814 .next = NEXT(action_set_ipv4_dst),
3817 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3818 .name = "ipv4_addr",
3819 .help = "new IPv4 destination address to set",
3820 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3821 .args = ARGS(ARGS_ENTRY_HTON
3822 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3823 .call = parse_vc_conf,
3825 [ACTION_SET_IPV6_SRC] = {
3826 .name = "set_ipv6_src",
3827 .help = "Set a new IPv6 source address in the outermost"
3829 .priv = PRIV_ACTION(SET_IPV6_SRC,
3830 sizeof(struct rte_flow_action_set_ipv6)),
3831 .next = NEXT(action_set_ipv6_src),
3834 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3835 .name = "ipv6_addr",
3836 .help = "new IPv6 source address to set",
3837 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3838 .args = ARGS(ARGS_ENTRY_HTON
3839 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3840 .call = parse_vc_conf,
3842 [ACTION_SET_IPV6_DST] = {
3843 .name = "set_ipv6_dst",
3844 .help = "Set a new IPv6 destination address in the outermost"
3846 .priv = PRIV_ACTION(SET_IPV6_DST,
3847 sizeof(struct rte_flow_action_set_ipv6)),
3848 .next = NEXT(action_set_ipv6_dst),
3851 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3852 .name = "ipv6_addr",
3853 .help = "new IPv6 destination address to set",
3854 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3855 .args = ARGS(ARGS_ENTRY_HTON
3856 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3857 .call = parse_vc_conf,
3859 [ACTION_SET_TP_SRC] = {
3860 .name = "set_tp_src",
3861 .help = "set a new source port number in the outermost"
3863 .priv = PRIV_ACTION(SET_TP_SRC,
3864 sizeof(struct rte_flow_action_set_tp)),
3865 .next = NEXT(action_set_tp_src),
3868 [ACTION_SET_TP_SRC_TP_SRC] = {
3870 .help = "new source port number to set",
3871 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3872 .args = ARGS(ARGS_ENTRY_HTON
3873 (struct rte_flow_action_set_tp, port)),
3874 .call = parse_vc_conf,
3876 [ACTION_SET_TP_DST] = {
3877 .name = "set_tp_dst",
3878 .help = "set a new destination port number in the outermost"
3880 .priv = PRIV_ACTION(SET_TP_DST,
3881 sizeof(struct rte_flow_action_set_tp)),
3882 .next = NEXT(action_set_tp_dst),
3885 [ACTION_SET_TP_DST_TP_DST] = {
3887 .help = "new destination port number to set",
3888 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3889 .args = ARGS(ARGS_ENTRY_HTON
3890 (struct rte_flow_action_set_tp, port)),
3891 .call = parse_vc_conf,
3893 [ACTION_MAC_SWAP] = {
3895 .help = "Swap the source and destination MAC addresses"
3896 " in the outermost Ethernet header",
3897 .priv = PRIV_ACTION(MAC_SWAP, 0),
3898 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3901 [ACTION_DEC_TTL] = {
3903 .help = "decrease network TTL if available",
3904 .priv = PRIV_ACTION(DEC_TTL, 0),
3905 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3908 [ACTION_SET_TTL] = {
3910 .help = "set ttl value",
3911 .priv = PRIV_ACTION(SET_TTL,
3912 sizeof(struct rte_flow_action_set_ttl)),
3913 .next = NEXT(action_set_ttl),
3916 [ACTION_SET_TTL_TTL] = {
3917 .name = "ttl_value",
3918 .help = "new ttl value to set",
3919 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3920 .args = ARGS(ARGS_ENTRY_HTON
3921 (struct rte_flow_action_set_ttl, ttl_value)),
3922 .call = parse_vc_conf,
3924 [ACTION_SET_MAC_SRC] = {
3925 .name = "set_mac_src",
3926 .help = "set source mac address",
3927 .priv = PRIV_ACTION(SET_MAC_SRC,
3928 sizeof(struct rte_flow_action_set_mac)),
3929 .next = NEXT(action_set_mac_src),
3932 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3934 .help = "new source mac address",
3935 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3936 .args = ARGS(ARGS_ENTRY_HTON
3937 (struct rte_flow_action_set_mac, mac_addr)),
3938 .call = parse_vc_conf,
3940 [ACTION_SET_MAC_DST] = {
3941 .name = "set_mac_dst",
3942 .help = "set destination mac address",
3943 .priv = PRIV_ACTION(SET_MAC_DST,
3944 sizeof(struct rte_flow_action_set_mac)),
3945 .next = NEXT(action_set_mac_dst),
3948 [ACTION_SET_MAC_DST_MAC_DST] = {
3950 .help = "new destination mac address to set",
3951 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3952 .args = ARGS(ARGS_ENTRY_HTON
3953 (struct rte_flow_action_set_mac, mac_addr)),
3954 .call = parse_vc_conf,
3956 [ACTION_INC_TCP_SEQ] = {
3957 .name = "inc_tcp_seq",
3958 .help = "increase TCP sequence number",
3959 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3960 .next = NEXT(action_inc_tcp_seq),
3963 [ACTION_INC_TCP_SEQ_VALUE] = {
3965 .help = "the value to increase TCP sequence number by",
3966 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3967 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3968 .call = parse_vc_conf,
3970 [ACTION_DEC_TCP_SEQ] = {
3971 .name = "dec_tcp_seq",
3972 .help = "decrease TCP sequence number",
3973 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3974 .next = NEXT(action_dec_tcp_seq),
3977 [ACTION_DEC_TCP_SEQ_VALUE] = {
3979 .help = "the value to decrease TCP sequence number by",
3980 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3981 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3982 .call = parse_vc_conf,
3984 [ACTION_INC_TCP_ACK] = {
3985 .name = "inc_tcp_ack",
3986 .help = "increase TCP acknowledgment number",
3987 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3988 .next = NEXT(action_inc_tcp_ack),
3991 [ACTION_INC_TCP_ACK_VALUE] = {
3993 .help = "the value to increase TCP acknowledgment number by",
3994 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3995 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3996 .call = parse_vc_conf,
3998 [ACTION_DEC_TCP_ACK] = {
3999 .name = "dec_tcp_ack",
4000 .help = "decrease TCP acknowledgment number",
4001 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
4002 .next = NEXT(action_dec_tcp_ack),
4005 [ACTION_DEC_TCP_ACK_VALUE] = {
4007 .help = "the value to decrease TCP acknowledgment number by",
4008 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
4009 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
4010 .call = parse_vc_conf,
4012 [ACTION_RAW_ENCAP] = {
4013 .name = "raw_encap",
4014 .help = "encapsulation data, defined by set raw_encap",
4015 .priv = PRIV_ACTION(RAW_ENCAP,
4016 sizeof(struct action_raw_encap_data)),
4017 .next = NEXT(action_raw_encap),
4018 .call = parse_vc_action_raw_encap,
4020 [ACTION_RAW_ENCAP_INDEX] = {
4022 .help = "the index of raw_encap_confs",
4023 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
4025 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
4028 .help = "unsigned integer value",
4029 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4030 .call = parse_vc_action_raw_encap_index,
4031 .comp = comp_set_raw_index,
4033 [ACTION_RAW_DECAP] = {
4034 .name = "raw_decap",
4035 .help = "decapsulation data, defined by set raw_encap",
4036 .priv = PRIV_ACTION(RAW_DECAP,
4037 sizeof(struct action_raw_decap_data)),
4038 .next = NEXT(action_raw_decap),
4039 .call = parse_vc_action_raw_decap,
4041 [ACTION_RAW_DECAP_INDEX] = {
4043 .help = "the index of raw_encap_confs",
4044 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
4046 [ACTION_RAW_DECAP_INDEX_VALUE] = {
4049 .help = "unsigned integer value",
4050 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4051 .call = parse_vc_action_raw_decap_index,
4052 .comp = comp_set_raw_index,
4054 /* Top level command. */
4057 .help = "set raw encap/decap/sample data",
4058 .type = "set raw_encap|raw_decap <index> <pattern>"
4059 " or set sample_actions <index> <action>",
4060 .next = NEXT(NEXT_ENTRY
4063 SET_SAMPLE_ACTIONS)),
4064 .call = parse_set_init,
4066 /* Sub-level commands. */
4068 .name = "raw_encap",
4069 .help = "set raw encap data",
4070 .next = NEXT(next_set_raw),
4071 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4072 (offsetof(struct buffer, port),
4073 sizeof(((struct buffer *)0)->port),
4074 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
4075 .call = parse_set_raw_encap_decap,
4078 .name = "raw_decap",
4079 .help = "set raw decap data",
4080 .next = NEXT(next_set_raw),
4081 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4082 (offsetof(struct buffer, port),
4083 sizeof(((struct buffer *)0)->port),
4084 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
4085 .call = parse_set_raw_encap_decap,
4090 .help = "index of raw_encap/raw_decap data",
4091 .next = NEXT(next_item),
4094 [SET_SAMPLE_INDEX] = {
4097 .help = "index of sample actions",
4098 .next = NEXT(next_action_sample),
4101 [SET_SAMPLE_ACTIONS] = {
4102 .name = "sample_actions",
4103 .help = "set sample actions list",
4104 .next = NEXT(NEXT_ENTRY(SET_SAMPLE_INDEX)),
4105 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4106 (offsetof(struct buffer, port),
4107 sizeof(((struct buffer *)0)->port),
4108 0, RAW_SAMPLE_CONFS_MAX_NUM - 1)),
4109 .call = parse_set_sample_action,
4111 [ACTION_SET_TAG] = {
4114 .priv = PRIV_ACTION(SET_TAG,
4115 sizeof(struct rte_flow_action_set_tag)),
4116 .next = NEXT(action_set_tag),
4119 [ACTION_SET_TAG_INDEX] = {
4121 .help = "index of tag array",
4122 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4123 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
4124 .call = parse_vc_conf,
4126 [ACTION_SET_TAG_DATA] = {
4128 .help = "tag value",
4129 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4130 .args = ARGS(ARGS_ENTRY
4131 (struct rte_flow_action_set_tag, data)),
4132 .call = parse_vc_conf,
4134 [ACTION_SET_TAG_MASK] = {
4136 .help = "mask for tag value",
4137 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4138 .args = ARGS(ARGS_ENTRY
4139 (struct rte_flow_action_set_tag, mask)),
4140 .call = parse_vc_conf,
4142 [ACTION_SET_META] = {
4144 .help = "set metadata",
4145 .priv = PRIV_ACTION(SET_META,
4146 sizeof(struct rte_flow_action_set_meta)),
4147 .next = NEXT(action_set_meta),
4148 .call = parse_vc_action_set_meta,
4150 [ACTION_SET_META_DATA] = {
4152 .help = "metadata value",
4153 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
4154 .args = ARGS(ARGS_ENTRY
4155 (struct rte_flow_action_set_meta, data)),
4156 .call = parse_vc_conf,
4158 [ACTION_SET_META_MASK] = {
4160 .help = "mask for metadata value",
4161 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
4162 .args = ARGS(ARGS_ENTRY
4163 (struct rte_flow_action_set_meta, mask)),
4164 .call = parse_vc_conf,
4166 [ACTION_SET_IPV4_DSCP] = {
4167 .name = "set_ipv4_dscp",
4168 .help = "set DSCP value",
4169 .priv = PRIV_ACTION(SET_IPV4_DSCP,
4170 sizeof(struct rte_flow_action_set_dscp)),
4171 .next = NEXT(action_set_ipv4_dscp),
4174 [ACTION_SET_IPV4_DSCP_VALUE] = {
4175 .name = "dscp_value",
4176 .help = "new IPv4 DSCP value to set",
4177 .next = NEXT(action_set_ipv4_dscp, NEXT_ENTRY(UNSIGNED)),
4178 .args = ARGS(ARGS_ENTRY
4179 (struct rte_flow_action_set_dscp, dscp)),
4180 .call = parse_vc_conf,
4182 [ACTION_SET_IPV6_DSCP] = {
4183 .name = "set_ipv6_dscp",
4184 .help = "set DSCP value",
4185 .priv = PRIV_ACTION(SET_IPV6_DSCP,
4186 sizeof(struct rte_flow_action_set_dscp)),
4187 .next = NEXT(action_set_ipv6_dscp),
4190 [ACTION_SET_IPV6_DSCP_VALUE] = {
4191 .name = "dscp_value",
4192 .help = "new IPv6 DSCP value to set",
4193 .next = NEXT(action_set_ipv6_dscp, NEXT_ENTRY(UNSIGNED)),
4194 .args = ARGS(ARGS_ENTRY
4195 (struct rte_flow_action_set_dscp, dscp)),
4196 .call = parse_vc_conf,
4200 .help = "set a specific metadata header",
4201 .next = NEXT(action_age),
4202 .priv = PRIV_ACTION(AGE,
4203 sizeof(struct rte_flow_action_age)),
4206 [ACTION_AGE_TIMEOUT] = {
4208 .help = "flow age timeout value",
4209 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_age,
4211 .next = NEXT(action_age, NEXT_ENTRY(UNSIGNED)),
4212 .call = parse_vc_conf,
4216 .help = "set a sample action",
4217 .next = NEXT(action_sample),
4218 .priv = PRIV_ACTION(SAMPLE,
4219 sizeof(struct action_sample_data)),
4220 .call = parse_vc_action_sample,
4222 [ACTION_SAMPLE_RATIO] = {
4224 .help = "flow sample ratio value",
4225 .next = NEXT(action_sample, NEXT_ENTRY(UNSIGNED)),
4226 .args = ARGS(ARGS_ENTRY_ARB
4227 (offsetof(struct action_sample_data, conf) +
4228 offsetof(struct rte_flow_action_sample, ratio),
4229 sizeof(((struct rte_flow_action_sample *)0)->
4232 [ACTION_SAMPLE_INDEX] = {
4234 .help = "the index of sample actions list",
4235 .next = NEXT(NEXT_ENTRY(ACTION_SAMPLE_INDEX_VALUE)),
4237 [ACTION_SAMPLE_INDEX_VALUE] = {
4240 .help = "unsigned integer value",
4241 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4242 .call = parse_vc_action_sample_index,
4243 .comp = comp_set_sample_index,
4245 /* Shared action destroy arguments. */
4246 [SHARED_ACTION_DESTROY_ID] = {
4247 .name = "action_id",
4248 .help = "specify a shared action id to destroy",
4249 .next = NEXT(next_sa_destroy_attr,
4250 NEXT_ENTRY(SHARED_ACTION_ID)),
4251 .args = ARGS(ARGS_ENTRY_PTR(struct buffer,
4252 args.sa_destroy.action_id)),
4253 .call = parse_sa_destroy,
4255 /* Shared action create arguments. */
4256 [SHARED_ACTION_CREATE_ID] = {
4257 .name = "action_id",
4258 .help = "specify a shared action id to create",
4259 .next = NEXT(next_sa_create_attr,
4260 NEXT_ENTRY(SHARED_ACTION_ID)),
4261 .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
4265 .help = "apply shared action by id",
4266 .priv = PRIV_ACTION(SHARED, 0),
4267 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_ID2PTR)),
4268 .args = ARGS(ARGS_ENTRY_ARB(0, sizeof(uint32_t))),
4271 [SHARED_ACTION_ID2PTR] = {
4272 .name = "{action_id}",
4273 .type = "SHARED_ACTION_ID",
4274 .help = "shared action id",
4275 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4276 .call = parse_sa_id2ptr,
4279 [SHARED_ACTION_INGRESS] = {
4281 .help = "affect rule to ingress",
4282 .next = NEXT(next_sa_create_attr),
4285 [SHARED_ACTION_EGRESS] = {
4287 .help = "affect rule to egress",
4288 .next = NEXT(next_sa_create_attr),
4291 [SHARED_ACTION_TRANSFER] = {
4293 .help = "affect rule to transfer",
4294 .next = NEXT(next_sa_create_attr),
4297 [SHARED_ACTION_SPEC] = {
4299 .help = "specify action to share",
4300 .next = NEXT(next_action),
4304 /** Remove and return last entry from argument stack. */
4305 static const struct arg *
4306 pop_args(struct context *ctx)
4308 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
4311 /** Add entry on top of the argument stack. */
4313 push_args(struct context *ctx, const struct arg *arg)
4315 if (ctx->args_num == CTX_STACK_SIZE)
4317 ctx->args[ctx->args_num++] = arg;
4321 /** Spread value into buffer according to bit-mask. */
4323 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
4325 uint32_t i = arg->size;
4333 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4342 unsigned int shift = 0;
4343 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
4345 for (shift = 0; arg->mask[i] >> shift; ++shift) {
4346 if (!(arg->mask[i] & (1 << shift)))
4351 *buf &= ~(1 << shift);
4352 *buf |= (val & 1) << shift;
4360 /** Compare a string with a partial one of a given length. */
4362 strcmp_partial(const char *full, const char *partial, size_t partial_len)
4364 int r = strncmp(full, partial, partial_len);
4368 if (strlen(full) <= partial_len)
4370 return full[partial_len];
4374 * Parse a prefix length and generate a bit-mask.
4376 * Last argument (ctx->args) is retrieved to determine mask size, storage
4377 * location and whether the result must use network byte ordering.
4380 parse_prefix(struct context *ctx, const struct token *token,
4381 const char *str, unsigned int len,
4382 void *buf, unsigned int size)
4384 const struct arg *arg = pop_args(ctx);
4385 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
4392 /* Argument is expected. */
4396 u = strtoumax(str, &end, 0);
4397 if (errno || (size_t)(end - str) != len)
4402 extra = arg_entry_bf_fill(NULL, 0, arg);
4411 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
4412 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4419 if (bytes > size || bytes + !!extra > size)
4423 buf = (uint8_t *)ctx->object + arg->offset;
4424 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4426 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
4427 memset(buf, 0x00, size - bytes);
4429 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
4433 memset(buf, 0xff, bytes);
4434 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
4436 ((uint8_t *)buf)[bytes] = conv[extra];
4439 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4442 push_args(ctx, arg);
4446 /** Default parsing function for token name matching. */
4448 parse_default(struct context *ctx, const struct token *token,
4449 const char *str, unsigned int len,
4450 void *buf, unsigned int size)
4455 if (strcmp_partial(token->name, str, len))
4460 /** Parse flow command, initialize output buffer for subsequent tokens. */
4462 parse_init(struct context *ctx, const struct token *token,
4463 const char *str, unsigned int len,
4464 void *buf, unsigned int size)
4466 struct buffer *out = buf;
4468 /* Token name must match. */
4469 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4471 /* Nothing else to do if there is no buffer. */
4474 /* Make sure buffer is large enough. */
4475 if (size < sizeof(*out))
4477 /* Initialize buffer. */
4478 memset(out, 0x00, sizeof(*out));
4479 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
4482 ctx->objmask = NULL;
4486 /** Parse tokens for shared action commands. */
4488 parse_sa(struct context *ctx, const struct token *token,
4489 const char *str, unsigned int len,
4490 void *buf, unsigned int size)
4492 struct buffer *out = buf;
4494 /* Token name must match. */
4495 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4497 /* Nothing else to do if there is no buffer. */
4500 if (!out->command) {
4501 if (ctx->curr != SHARED_ACTION)
4503 if (sizeof(*out) > size)
4505 out->command = ctx->curr;
4508 ctx->objmask = NULL;
4509 out->args.vc.data = (uint8_t *)out + size;
4512 switch (ctx->curr) {
4513 case SHARED_ACTION_CREATE:
4514 case SHARED_ACTION_UPDATE:
4515 out->args.vc.actions =
4516 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4518 out->args.vc.attr.group = UINT32_MAX;
4520 case SHARED_ACTION_QUERY:
4521 out->command = ctx->curr;
4524 ctx->objmask = NULL;
4526 case SHARED_ACTION_EGRESS:
4527 out->args.vc.attr.egress = 1;
4529 case SHARED_ACTION_INGRESS:
4530 out->args.vc.attr.ingress = 1;
4532 case SHARED_ACTION_TRANSFER:
4533 out->args.vc.attr.transfer = 1;
4541 /** Parse tokens for shared action destroy command. */
4543 parse_sa_destroy(struct context *ctx, const struct token *token,
4544 const char *str, unsigned int len,
4545 void *buf, unsigned int size)
4547 struct buffer *out = buf;
4548 uint32_t *action_id;
4550 /* Token name must match. */
4551 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4553 /* Nothing else to do if there is no buffer. */
4556 if (!out->command || out->command == SHARED_ACTION) {
4557 if (ctx->curr != SHARED_ACTION_DESTROY)
4559 if (sizeof(*out) > size)
4561 out->command = ctx->curr;
4564 ctx->objmask = NULL;
4565 out->args.sa_destroy.action_id =
4566 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4570 action_id = out->args.sa_destroy.action_id
4571 + out->args.sa_destroy.action_id_n++;
4572 if ((uint8_t *)action_id > (uint8_t *)out + size)
4575 ctx->object = action_id;
4576 ctx->objmask = NULL;
4580 /** Parse tokens for validate/create commands. */
4582 parse_vc(struct context *ctx, const struct token *token,
4583 const char *str, unsigned int len,
4584 void *buf, unsigned int size)
4586 struct buffer *out = buf;
4590 /* Token name must match. */
4591 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4593 /* Nothing else to do if there is no buffer. */
4596 if (!out->command) {
4597 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
4599 if (sizeof(*out) > size)
4601 out->command = ctx->curr;
4604 ctx->objmask = NULL;
4605 out->args.vc.data = (uint8_t *)out + size;
4609 switch (ctx->curr) {
4611 ctx->object = &out->args.vc.attr;
4615 ctx->object = &out->args.vc.tunnel_ops;
4618 ctx->objmask = NULL;
4619 switch (ctx->curr) {
4624 out->args.vc.tunnel_ops.enabled = 1;
4625 out->args.vc.tunnel_ops.actions = 1;
4628 out->args.vc.tunnel_ops.enabled = 1;
4629 out->args.vc.tunnel_ops.items = 1;
4632 out->args.vc.attr.ingress = 1;
4635 out->args.vc.attr.egress = 1;
4638 out->args.vc.attr.transfer = 1;
4641 out->args.vc.pattern =
4642 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4644 ctx->object = out->args.vc.pattern;
4645 ctx->objmask = NULL;
4648 out->args.vc.actions =
4649 (void *)RTE_ALIGN_CEIL((uintptr_t)
4650 (out->args.vc.pattern +
4651 out->args.vc.pattern_n),
4653 ctx->object = out->args.vc.actions;
4654 ctx->objmask = NULL;
4661 if (!out->args.vc.actions) {
4662 const struct parse_item_priv *priv = token->priv;
4663 struct rte_flow_item *item =
4664 out->args.vc.pattern + out->args.vc.pattern_n;
4666 data_size = priv->size * 3; /* spec, last, mask */
4667 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
4668 (out->args.vc.data - data_size),
4670 if ((uint8_t *)item + sizeof(*item) > data)
4672 *item = (struct rte_flow_item){
4675 ++out->args.vc.pattern_n;
4677 ctx->objmask = NULL;
4679 const struct parse_action_priv *priv = token->priv;
4680 struct rte_flow_action *action =
4681 out->args.vc.actions + out->args.vc.actions_n;
4683 data_size = priv->size; /* configuration */
4684 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
4685 (out->args.vc.data - data_size),
4687 if ((uint8_t *)action + sizeof(*action) > data)
4689 *action = (struct rte_flow_action){
4691 .conf = data_size ? data : NULL,
4693 ++out->args.vc.actions_n;
4694 ctx->object = action;
4695 ctx->objmask = NULL;
4697 memset(data, 0, data_size);
4698 out->args.vc.data = data;
4699 ctx->objdata = data_size;
4703 /** Parse pattern item parameter type. */
4705 parse_vc_spec(struct context *ctx, const struct token *token,
4706 const char *str, unsigned int len,
4707 void *buf, unsigned int size)
4709 struct buffer *out = buf;
4710 struct rte_flow_item *item;
4716 /* Token name must match. */
4717 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4719 /* Parse parameter types. */
4720 switch (ctx->curr) {
4721 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
4727 case ITEM_PARAM_SPEC:
4730 case ITEM_PARAM_LAST:
4733 case ITEM_PARAM_PREFIX:
4734 /* Modify next token to expect a prefix. */
4735 if (ctx->next_num < 2)
4737 ctx->next[ctx->next_num - 2] = prefix;
4739 case ITEM_PARAM_MASK:
4745 /* Nothing else to do if there is no buffer. */
4748 if (!out->args.vc.pattern_n)
4750 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4751 data_size = ctx->objdata / 3; /* spec, last, mask */
4752 /* Point to selected object. */
4753 ctx->object = out->args.vc.data + (data_size * index);
4755 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
4756 item->mask = ctx->objmask;
4758 ctx->objmask = NULL;
4759 /* Update relevant item pointer. */
4760 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
4765 /** Parse action configuration field. */
4767 parse_vc_conf(struct context *ctx, const struct token *token,
4768 const char *str, unsigned int len,
4769 void *buf, unsigned int size)
4771 struct buffer *out = buf;
4774 /* Token name must match. */
4775 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4777 /* Nothing else to do if there is no buffer. */
4780 /* Point to selected object. */
4781 ctx->object = out->args.vc.data;
4782 ctx->objmask = NULL;
4786 /** Parse eCPRI common header type field. */
4788 parse_vc_item_ecpri_type(struct context *ctx, const struct token *token,
4789 const char *str, unsigned int len,
4790 void *buf, unsigned int size)
4792 struct rte_flow_item_ecpri *ecpri;
4793 struct rte_flow_item_ecpri *ecpri_mask;
4794 struct rte_flow_item *item;
4797 struct buffer *out = buf;
4798 const struct arg *arg;
4801 /* Token name must match. */
4802 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4804 switch (ctx->curr) {
4805 case ITEM_ECPRI_COMMON_TYPE_IQ_DATA:
4806 msg_type = RTE_ECPRI_MSG_TYPE_IQ_DATA;
4808 case ITEM_ECPRI_COMMON_TYPE_RTC_CTRL:
4809 msg_type = RTE_ECPRI_MSG_TYPE_RTC_CTRL;
4811 case ITEM_ECPRI_COMMON_TYPE_DLY_MSR:
4812 msg_type = RTE_ECPRI_MSG_TYPE_DLY_MSR;
4819 arg = pop_args(ctx);
4822 ecpri = (struct rte_flow_item_ecpri *)out->args.vc.data;
4823 ecpri->hdr.common.type = msg_type;
4824 data_size = ctx->objdata / 3; /* spec, last, mask */
4825 ecpri_mask = (struct rte_flow_item_ecpri *)(out->args.vc.data +
4827 ecpri_mask->hdr.common.type = 0xFF;
4829 ecpri->hdr.common.u32 = rte_cpu_to_be_32(ecpri->hdr.common.u32);
4830 ecpri_mask->hdr.common.u32 =
4831 rte_cpu_to_be_32(ecpri_mask->hdr.common.u32);
4833 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4835 item->mask = ecpri_mask;
4839 /** Parse RSS action. */
4841 parse_vc_action_rss(struct context *ctx, const struct token *token,
4842 const char *str, unsigned int len,
4843 void *buf, unsigned int size)
4845 struct buffer *out = buf;
4846 struct rte_flow_action *action;
4847 struct action_rss_data *action_rss_data;
4851 ret = parse_vc(ctx, token, str, len, buf, size);
4854 /* Nothing else to do if there is no buffer. */
4857 if (!out->args.vc.actions_n)
4859 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4860 /* Point to selected object. */
4861 ctx->object = out->args.vc.data;
4862 ctx->objmask = NULL;
4863 /* Set up default configuration. */
4864 action_rss_data = ctx->object;
4865 *action_rss_data = (struct action_rss_data){
4866 .conf = (struct rte_flow_action_rss){
4867 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4871 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
4873 .queue = action_rss_data->queue,
4877 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
4878 action_rss_data->queue[i] = i;
4879 action->conf = &action_rss_data->conf;
4884 * Parse func field for RSS action.
4886 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
4887 * ACTION_RSS_FUNC_* index that called this function.
4890 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
4891 const char *str, unsigned int len,
4892 void *buf, unsigned int size)
4894 struct action_rss_data *action_rss_data;
4895 enum rte_eth_hash_function func;
4899 /* Token name must match. */
4900 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4902 switch (ctx->curr) {
4903 case ACTION_RSS_FUNC_DEFAULT:
4904 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
4906 case ACTION_RSS_FUNC_TOEPLITZ:
4907 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
4909 case ACTION_RSS_FUNC_SIMPLE_XOR:
4910 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
4912 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
4913 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
4920 action_rss_data = ctx->object;
4921 action_rss_data->conf.func = func;
4926 * Parse type field for RSS action.
4928 * Valid tokens are type field names and the "end" token.
4931 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
4932 const char *str, unsigned int len,
4933 void *buf, unsigned int size)
4935 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
4936 struct action_rss_data *action_rss_data;
4942 if (ctx->curr != ACTION_RSS_TYPE)
4944 if (!(ctx->objdata >> 16) && ctx->object) {
4945 action_rss_data = ctx->object;
4946 action_rss_data->conf.types = 0;
4948 if (!strcmp_partial("end", str, len)) {
4949 ctx->objdata &= 0xffff;
4952 for (i = 0; rss_type_table[i].str; ++i)
4953 if (!strcmp_partial(rss_type_table[i].str, str, len))
4955 if (!rss_type_table[i].str)
4957 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
4959 if (ctx->next_num == RTE_DIM(ctx->next))
4961 ctx->next[ctx->next_num++] = next;
4964 action_rss_data = ctx->object;
4965 action_rss_data->conf.types |= rss_type_table[i].rss_type;
4970 * Parse queue field for RSS action.
4972 * Valid tokens are queue indices and the "end" token.
4975 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
4976 const char *str, unsigned int len,
4977 void *buf, unsigned int size)
4979 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
4980 struct action_rss_data *action_rss_data;
4981 const struct arg *arg;
4988 if (ctx->curr != ACTION_RSS_QUEUE)
4990 i = ctx->objdata >> 16;
4991 if (!strcmp_partial("end", str, len)) {
4992 ctx->objdata &= 0xffff;
4995 if (i >= ACTION_RSS_QUEUE_NUM)
4997 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4998 i * sizeof(action_rss_data->queue[i]),
4999 sizeof(action_rss_data->queue[i]));
5000 if (push_args(ctx, arg))
5002 ret = parse_int(ctx, token, str, len, NULL, 0);
5008 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
5010 if (ctx->next_num == RTE_DIM(ctx->next))
5012 ctx->next[ctx->next_num++] = next;
5016 action_rss_data = ctx->object;
5017 action_rss_data->conf.queue_num = i;
5018 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
5022 /** Parse VXLAN encap action. */
5024 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
5025 const char *str, unsigned int len,
5026 void *buf, unsigned int size)
5028 struct buffer *out = buf;
5029 struct rte_flow_action *action;
5030 struct action_vxlan_encap_data *action_vxlan_encap_data;
5033 ret = parse_vc(ctx, token, str, len, buf, size);
5036 /* Nothing else to do if there is no buffer. */
5039 if (!out->args.vc.actions_n)
5041 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5042 /* Point to selected object. */
5043 ctx->object = out->args.vc.data;
5044 ctx->objmask = NULL;
5045 /* Set up default configuration. */
5046 action_vxlan_encap_data = ctx->object;
5047 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
5048 .conf = (struct rte_flow_action_vxlan_encap){
5049 .definition = action_vxlan_encap_data->items,
5053 .type = RTE_FLOW_ITEM_TYPE_ETH,
5054 .spec = &action_vxlan_encap_data->item_eth,
5055 .mask = &rte_flow_item_eth_mask,
5058 .type = RTE_FLOW_ITEM_TYPE_VLAN,
5059 .spec = &action_vxlan_encap_data->item_vlan,
5060 .mask = &rte_flow_item_vlan_mask,
5063 .type = RTE_FLOW_ITEM_TYPE_IPV4,
5064 .spec = &action_vxlan_encap_data->item_ipv4,
5065 .mask = &rte_flow_item_ipv4_mask,
5068 .type = RTE_FLOW_ITEM_TYPE_UDP,
5069 .spec = &action_vxlan_encap_data->item_udp,
5070 .mask = &rte_flow_item_udp_mask,
5073 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
5074 .spec = &action_vxlan_encap_data->item_vxlan,
5075 .mask = &rte_flow_item_vxlan_mask,
5078 .type = RTE_FLOW_ITEM_TYPE_END,
5083 .tci = vxlan_encap_conf.vlan_tci,
5087 .src_addr = vxlan_encap_conf.ipv4_src,
5088 .dst_addr = vxlan_encap_conf.ipv4_dst,
5091 .src_port = vxlan_encap_conf.udp_src,
5092 .dst_port = vxlan_encap_conf.udp_dst,
5094 .item_vxlan.flags = 0,
5096 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
5097 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5098 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
5099 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5100 if (!vxlan_encap_conf.select_ipv4) {
5101 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
5102 &vxlan_encap_conf.ipv6_src,
5103 sizeof(vxlan_encap_conf.ipv6_src));
5104 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
5105 &vxlan_encap_conf.ipv6_dst,
5106 sizeof(vxlan_encap_conf.ipv6_dst));
5107 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
5108 .type = RTE_FLOW_ITEM_TYPE_IPV6,
5109 .spec = &action_vxlan_encap_data->item_ipv6,
5110 .mask = &rte_flow_item_ipv6_mask,
5113 if (!vxlan_encap_conf.select_vlan)
5114 action_vxlan_encap_data->items[1].type =
5115 RTE_FLOW_ITEM_TYPE_VOID;
5116 if (vxlan_encap_conf.select_tos_ttl) {
5117 if (vxlan_encap_conf.select_ipv4) {
5118 static struct rte_flow_item_ipv4 ipv4_mask_tos;
5120 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
5121 sizeof(ipv4_mask_tos));
5122 ipv4_mask_tos.hdr.type_of_service = 0xff;
5123 ipv4_mask_tos.hdr.time_to_live = 0xff;
5124 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
5125 vxlan_encap_conf.ip_tos;
5126 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
5127 vxlan_encap_conf.ip_ttl;
5128 action_vxlan_encap_data->items[2].mask =
5131 static struct rte_flow_item_ipv6 ipv6_mask_tos;
5133 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
5134 sizeof(ipv6_mask_tos));
5135 ipv6_mask_tos.hdr.vtc_flow |=
5136 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
5137 ipv6_mask_tos.hdr.hop_limits = 0xff;
5138 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
5140 ((uint32_t)vxlan_encap_conf.ip_tos <<
5141 RTE_IPV6_HDR_TC_SHIFT);
5142 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
5143 vxlan_encap_conf.ip_ttl;
5144 action_vxlan_encap_data->items[2].mask =
5148 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
5149 RTE_DIM(vxlan_encap_conf.vni));
5150 action->conf = &action_vxlan_encap_data->conf;
5154 /** Parse NVGRE encap action. */
5156 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
5157 const char *str, unsigned int len,
5158 void *buf, unsigned int size)
5160 struct buffer *out = buf;
5161 struct rte_flow_action *action;
5162 struct action_nvgre_encap_data *action_nvgre_encap_data;
5165 ret = parse_vc(ctx, token, str, len, buf, size);
5168 /* Nothing else to do if there is no buffer. */
5171 if (!out->args.vc.actions_n)
5173 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5174 /* Point to selected object. */
5175 ctx->object = out->args.vc.data;
5176 ctx->objmask = NULL;
5177 /* Set up default configuration. */
5178 action_nvgre_encap_data = ctx->object;
5179 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
5180 .conf = (struct rte_flow_action_nvgre_encap){
5181 .definition = action_nvgre_encap_data->items,
5185 .type = RTE_FLOW_ITEM_TYPE_ETH,
5186 .spec = &action_nvgre_encap_data->item_eth,
5187 .mask = &rte_flow_item_eth_mask,
5190 .type = RTE_FLOW_ITEM_TYPE_VLAN,
5191 .spec = &action_nvgre_encap_data->item_vlan,
5192 .mask = &rte_flow_item_vlan_mask,
5195 .type = RTE_FLOW_ITEM_TYPE_IPV4,
5196 .spec = &action_nvgre_encap_data->item_ipv4,
5197 .mask = &rte_flow_item_ipv4_mask,
5200 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
5201 .spec = &action_nvgre_encap_data->item_nvgre,
5202 .mask = &rte_flow_item_nvgre_mask,
5205 .type = RTE_FLOW_ITEM_TYPE_END,
5210 .tci = nvgre_encap_conf.vlan_tci,
5214 .src_addr = nvgre_encap_conf.ipv4_src,
5215 .dst_addr = nvgre_encap_conf.ipv4_dst,
5217 .item_nvgre.flow_id = 0,
5219 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
5220 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5221 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
5222 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5223 if (!nvgre_encap_conf.select_ipv4) {
5224 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
5225 &nvgre_encap_conf.ipv6_src,
5226 sizeof(nvgre_encap_conf.ipv6_src));
5227 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
5228 &nvgre_encap_conf.ipv6_dst,
5229 sizeof(nvgre_encap_conf.ipv6_dst));
5230 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
5231 .type = RTE_FLOW_ITEM_TYPE_IPV6,
5232 .spec = &action_nvgre_encap_data->item_ipv6,
5233 .mask = &rte_flow_item_ipv6_mask,
5236 if (!nvgre_encap_conf.select_vlan)
5237 action_nvgre_encap_data->items[1].type =
5238 RTE_FLOW_ITEM_TYPE_VOID;
5239 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
5240 RTE_DIM(nvgre_encap_conf.tni));
5241 action->conf = &action_nvgre_encap_data->conf;
5245 /** Parse l2 encap action. */
5247 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
5248 const char *str, unsigned int len,
5249 void *buf, unsigned int size)
5251 struct buffer *out = buf;
5252 struct rte_flow_action *action;
5253 struct action_raw_encap_data *action_encap_data;
5254 struct rte_flow_item_eth eth = { .type = 0, };
5255 struct rte_flow_item_vlan vlan = {
5256 .tci = mplsoudp_encap_conf.vlan_tci,
5262 ret = parse_vc(ctx, token, str, len, buf, size);
5265 /* Nothing else to do if there is no buffer. */
5268 if (!out->args.vc.actions_n)
5270 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5271 /* Point to selected object. */
5272 ctx->object = out->args.vc.data;
5273 ctx->objmask = NULL;
5274 /* Copy the headers to the buffer. */
5275 action_encap_data = ctx->object;
5276 *action_encap_data = (struct action_raw_encap_data) {
5277 .conf = (struct rte_flow_action_raw_encap){
5278 .data = action_encap_data->data,
5282 header = action_encap_data->data;
5283 if (l2_encap_conf.select_vlan)
5284 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5285 else if (l2_encap_conf.select_ipv4)
5286 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5288 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5289 memcpy(eth.dst.addr_bytes,
5290 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5291 memcpy(eth.src.addr_bytes,
5292 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5293 memcpy(header, ð, sizeof(eth));
5294 header += sizeof(eth);
5295 if (l2_encap_conf.select_vlan) {
5296 if (l2_encap_conf.select_ipv4)
5297 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5299 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5300 memcpy(header, &vlan, sizeof(vlan));
5301 header += sizeof(vlan);
5303 action_encap_data->conf.size = header -
5304 action_encap_data->data;
5305 action->conf = &action_encap_data->conf;
5309 /** Parse l2 decap action. */
5311 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
5312 const char *str, unsigned int len,
5313 void *buf, unsigned int size)
5315 struct buffer *out = buf;
5316 struct rte_flow_action *action;
5317 struct action_raw_decap_data *action_decap_data;
5318 struct rte_flow_item_eth eth = { .type = 0, };
5319 struct rte_flow_item_vlan vlan = {
5320 .tci = mplsoudp_encap_conf.vlan_tci,
5326 ret = parse_vc(ctx, token, str, len, buf, size);
5329 /* Nothing else to do if there is no buffer. */
5332 if (!out->args.vc.actions_n)
5334 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5335 /* Point to selected object. */
5336 ctx->object = out->args.vc.data;
5337 ctx->objmask = NULL;
5338 /* Copy the headers to the buffer. */
5339 action_decap_data = ctx->object;
5340 *action_decap_data = (struct action_raw_decap_data) {
5341 .conf = (struct rte_flow_action_raw_decap){
5342 .data = action_decap_data->data,
5346 header = action_decap_data->data;
5347 if (l2_decap_conf.select_vlan)
5348 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5349 memcpy(header, ð, sizeof(eth));
5350 header += sizeof(eth);
5351 if (l2_decap_conf.select_vlan) {
5352 memcpy(header, &vlan, sizeof(vlan));
5353 header += sizeof(vlan);
5355 action_decap_data->conf.size = header -
5356 action_decap_data->data;
5357 action->conf = &action_decap_data->conf;
5361 #define ETHER_TYPE_MPLS_UNICAST 0x8847
5363 /** Parse MPLSOGRE encap action. */
5365 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
5366 const char *str, unsigned int len,
5367 void *buf, unsigned int size)
5369 struct buffer *out = buf;
5370 struct rte_flow_action *action;
5371 struct action_raw_encap_data *action_encap_data;
5372 struct rte_flow_item_eth eth = { .type = 0, };
5373 struct rte_flow_item_vlan vlan = {
5374 .tci = mplsogre_encap_conf.vlan_tci,
5377 struct rte_flow_item_ipv4 ipv4 = {
5379 .src_addr = mplsogre_encap_conf.ipv4_src,
5380 .dst_addr = mplsogre_encap_conf.ipv4_dst,
5381 .next_proto_id = IPPROTO_GRE,
5382 .version_ihl = RTE_IPV4_VHL_DEF,
5383 .time_to_live = IPDEFTTL,
5386 struct rte_flow_item_ipv6 ipv6 = {
5388 .proto = IPPROTO_GRE,
5389 .hop_limits = IPDEFTTL,
5392 struct rte_flow_item_gre gre = {
5393 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
5395 struct rte_flow_item_mpls mpls = {
5401 ret = parse_vc(ctx, token, str, len, buf, size);
5404 /* Nothing else to do if there is no buffer. */
5407 if (!out->args.vc.actions_n)
5409 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5410 /* Point to selected object. */
5411 ctx->object = out->args.vc.data;
5412 ctx->objmask = NULL;
5413 /* Copy the headers to the buffer. */
5414 action_encap_data = ctx->object;
5415 *action_encap_data = (struct action_raw_encap_data) {
5416 .conf = (struct rte_flow_action_raw_encap){
5417 .data = action_encap_data->data,
5422 header = action_encap_data->data;
5423 if (mplsogre_encap_conf.select_vlan)
5424 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5425 else if (mplsogre_encap_conf.select_ipv4)
5426 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5428 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5429 memcpy(eth.dst.addr_bytes,
5430 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5431 memcpy(eth.src.addr_bytes,
5432 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5433 memcpy(header, ð, sizeof(eth));
5434 header += sizeof(eth);
5435 if (mplsogre_encap_conf.select_vlan) {
5436 if (mplsogre_encap_conf.select_ipv4)
5437 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5439 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5440 memcpy(header, &vlan, sizeof(vlan));
5441 header += sizeof(vlan);
5443 if (mplsogre_encap_conf.select_ipv4) {
5444 memcpy(header, &ipv4, sizeof(ipv4));
5445 header += sizeof(ipv4);
5447 memcpy(&ipv6.hdr.src_addr,
5448 &mplsogre_encap_conf.ipv6_src,
5449 sizeof(mplsogre_encap_conf.ipv6_src));
5450 memcpy(&ipv6.hdr.dst_addr,
5451 &mplsogre_encap_conf.ipv6_dst,
5452 sizeof(mplsogre_encap_conf.ipv6_dst));
5453 memcpy(header, &ipv6, sizeof(ipv6));
5454 header += sizeof(ipv6);
5456 memcpy(header, &gre, sizeof(gre));
5457 header += sizeof(gre);
5458 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
5459 RTE_DIM(mplsogre_encap_conf.label));
5460 mpls.label_tc_s[2] |= 0x1;
5461 memcpy(header, &mpls, sizeof(mpls));
5462 header += sizeof(mpls);
5463 action_encap_data->conf.size = header -
5464 action_encap_data->data;
5465 action->conf = &action_encap_data->conf;
5469 /** Parse MPLSOGRE decap action. */
5471 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
5472 const char *str, unsigned int len,
5473 void *buf, unsigned int size)
5475 struct buffer *out = buf;
5476 struct rte_flow_action *action;
5477 struct action_raw_decap_data *action_decap_data;
5478 struct rte_flow_item_eth eth = { .type = 0, };
5479 struct rte_flow_item_vlan vlan = {.tci = 0};
5480 struct rte_flow_item_ipv4 ipv4 = {
5482 .next_proto_id = IPPROTO_GRE,
5485 struct rte_flow_item_ipv6 ipv6 = {
5487 .proto = IPPROTO_GRE,
5490 struct rte_flow_item_gre gre = {
5491 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
5493 struct rte_flow_item_mpls mpls;
5497 ret = parse_vc(ctx, token, str, len, buf, size);
5500 /* Nothing else to do if there is no buffer. */
5503 if (!out->args.vc.actions_n)
5505 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5506 /* Point to selected object. */
5507 ctx->object = out->args.vc.data;
5508 ctx->objmask = NULL;
5509 /* Copy the headers to the buffer. */
5510 action_decap_data = ctx->object;
5511 *action_decap_data = (struct action_raw_decap_data) {
5512 .conf = (struct rte_flow_action_raw_decap){
5513 .data = action_decap_data->data,
5517 header = action_decap_data->data;
5518 if (mplsogre_decap_conf.select_vlan)
5519 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5520 else if (mplsogre_encap_conf.select_ipv4)
5521 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5523 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5524 memcpy(eth.dst.addr_bytes,
5525 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5526 memcpy(eth.src.addr_bytes,
5527 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5528 memcpy(header, ð, sizeof(eth));
5529 header += sizeof(eth);
5530 if (mplsogre_encap_conf.select_vlan) {
5531 if (mplsogre_encap_conf.select_ipv4)
5532 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5534 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5535 memcpy(header, &vlan, sizeof(vlan));
5536 header += sizeof(vlan);
5538 if (mplsogre_encap_conf.select_ipv4) {
5539 memcpy(header, &ipv4, sizeof(ipv4));
5540 header += sizeof(ipv4);
5542 memcpy(header, &ipv6, sizeof(ipv6));
5543 header += sizeof(ipv6);
5545 memcpy(header, &gre, sizeof(gre));
5546 header += sizeof(gre);
5547 memset(&mpls, 0, sizeof(mpls));
5548 memcpy(header, &mpls, sizeof(mpls));
5549 header += sizeof(mpls);
5550 action_decap_data->conf.size = header -
5551 action_decap_data->data;
5552 action->conf = &action_decap_data->conf;
5556 /** Parse MPLSOUDP encap action. */
5558 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
5559 const char *str, unsigned int len,
5560 void *buf, unsigned int size)
5562 struct buffer *out = buf;
5563 struct rte_flow_action *action;
5564 struct action_raw_encap_data *action_encap_data;
5565 struct rte_flow_item_eth eth = { .type = 0, };
5566 struct rte_flow_item_vlan vlan = {
5567 .tci = mplsoudp_encap_conf.vlan_tci,
5570 struct rte_flow_item_ipv4 ipv4 = {
5572 .src_addr = mplsoudp_encap_conf.ipv4_src,
5573 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
5574 .next_proto_id = IPPROTO_UDP,
5575 .version_ihl = RTE_IPV4_VHL_DEF,
5576 .time_to_live = IPDEFTTL,
5579 struct rte_flow_item_ipv6 ipv6 = {
5581 .proto = IPPROTO_UDP,
5582 .hop_limits = IPDEFTTL,
5585 struct rte_flow_item_udp udp = {
5587 .src_port = mplsoudp_encap_conf.udp_src,
5588 .dst_port = mplsoudp_encap_conf.udp_dst,
5591 struct rte_flow_item_mpls mpls;
5595 ret = parse_vc(ctx, token, str, len, buf, size);
5598 /* Nothing else to do if there is no buffer. */
5601 if (!out->args.vc.actions_n)
5603 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5604 /* Point to selected object. */
5605 ctx->object = out->args.vc.data;
5606 ctx->objmask = NULL;
5607 /* Copy the headers to the buffer. */
5608 action_encap_data = ctx->object;
5609 *action_encap_data = (struct action_raw_encap_data) {
5610 .conf = (struct rte_flow_action_raw_encap){
5611 .data = action_encap_data->data,
5616 header = action_encap_data->data;
5617 if (mplsoudp_encap_conf.select_vlan)
5618 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5619 else if (mplsoudp_encap_conf.select_ipv4)
5620 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5622 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5623 memcpy(eth.dst.addr_bytes,
5624 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5625 memcpy(eth.src.addr_bytes,
5626 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5627 memcpy(header, ð, sizeof(eth));
5628 header += sizeof(eth);
5629 if (mplsoudp_encap_conf.select_vlan) {
5630 if (mplsoudp_encap_conf.select_ipv4)
5631 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5633 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5634 memcpy(header, &vlan, sizeof(vlan));
5635 header += sizeof(vlan);
5637 if (mplsoudp_encap_conf.select_ipv4) {
5638 memcpy(header, &ipv4, sizeof(ipv4));
5639 header += sizeof(ipv4);
5641 memcpy(&ipv6.hdr.src_addr,
5642 &mplsoudp_encap_conf.ipv6_src,
5643 sizeof(mplsoudp_encap_conf.ipv6_src));
5644 memcpy(&ipv6.hdr.dst_addr,
5645 &mplsoudp_encap_conf.ipv6_dst,
5646 sizeof(mplsoudp_encap_conf.ipv6_dst));
5647 memcpy(header, &ipv6, sizeof(ipv6));
5648 header += sizeof(ipv6);
5650 memcpy(header, &udp, sizeof(udp));
5651 header += sizeof(udp);
5652 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
5653 RTE_DIM(mplsoudp_encap_conf.label));
5654 mpls.label_tc_s[2] |= 0x1;
5655 memcpy(header, &mpls, sizeof(mpls));
5656 header += sizeof(mpls);
5657 action_encap_data->conf.size = header -
5658 action_encap_data->data;
5659 action->conf = &action_encap_data->conf;
5663 /** Parse MPLSOUDP decap action. */
5665 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
5666 const char *str, unsigned int len,
5667 void *buf, unsigned int size)
5669 struct buffer *out = buf;
5670 struct rte_flow_action *action;
5671 struct action_raw_decap_data *action_decap_data;
5672 struct rte_flow_item_eth eth = { .type = 0, };
5673 struct rte_flow_item_vlan vlan = {.tci = 0};
5674 struct rte_flow_item_ipv4 ipv4 = {
5676 .next_proto_id = IPPROTO_UDP,
5679 struct rte_flow_item_ipv6 ipv6 = {
5681 .proto = IPPROTO_UDP,
5684 struct rte_flow_item_udp udp = {
5686 .dst_port = rte_cpu_to_be_16(6635),
5689 struct rte_flow_item_mpls mpls;
5693 ret = parse_vc(ctx, token, str, len, buf, size);
5696 /* Nothing else to do if there is no buffer. */
5699 if (!out->args.vc.actions_n)
5701 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5702 /* Point to selected object. */
5703 ctx->object = out->args.vc.data;
5704 ctx->objmask = NULL;
5705 /* Copy the headers to the buffer. */
5706 action_decap_data = ctx->object;
5707 *action_decap_data = (struct action_raw_decap_data) {
5708 .conf = (struct rte_flow_action_raw_decap){
5709 .data = action_decap_data->data,
5713 header = action_decap_data->data;
5714 if (mplsoudp_decap_conf.select_vlan)
5715 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5716 else if (mplsoudp_encap_conf.select_ipv4)
5717 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5719 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5720 memcpy(eth.dst.addr_bytes,
5721 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5722 memcpy(eth.src.addr_bytes,
5723 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5724 memcpy(header, ð, sizeof(eth));
5725 header += sizeof(eth);
5726 if (mplsoudp_encap_conf.select_vlan) {
5727 if (mplsoudp_encap_conf.select_ipv4)
5728 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5730 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5731 memcpy(header, &vlan, sizeof(vlan));
5732 header += sizeof(vlan);
5734 if (mplsoudp_encap_conf.select_ipv4) {
5735 memcpy(header, &ipv4, sizeof(ipv4));
5736 header += sizeof(ipv4);
5738 memcpy(header, &ipv6, sizeof(ipv6));
5739 header += sizeof(ipv6);
5741 memcpy(header, &udp, sizeof(udp));
5742 header += sizeof(udp);
5743 memset(&mpls, 0, sizeof(mpls));
5744 memcpy(header, &mpls, sizeof(mpls));
5745 header += sizeof(mpls);
5746 action_decap_data->conf.size = header -
5747 action_decap_data->data;
5748 action->conf = &action_decap_data->conf;
5753 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
5754 const char *str, unsigned int len, void *buf,
5757 struct action_raw_decap_data *action_raw_decap_data;
5758 struct rte_flow_action *action;
5759 const struct arg *arg;
5760 struct buffer *out = buf;
5764 RTE_SET_USED(token);
5767 arg = ARGS_ENTRY_ARB_BOUNDED
5768 (offsetof(struct action_raw_decap_data, idx),
5769 sizeof(((struct action_raw_decap_data *)0)->idx),
5770 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5771 if (push_args(ctx, arg))
5773 ret = parse_int(ctx, token, str, len, NULL, 0);
5780 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5781 action_raw_decap_data = ctx->object;
5782 idx = action_raw_decap_data->idx;
5783 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
5784 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
5785 action->conf = &action_raw_decap_data->conf;
5791 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
5792 const char *str, unsigned int len, void *buf,
5795 struct action_raw_encap_data *action_raw_encap_data;
5796 struct rte_flow_action *action;
5797 const struct arg *arg;
5798 struct buffer *out = buf;
5802 RTE_SET_USED(token);
5805 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
5807 arg = ARGS_ENTRY_ARB_BOUNDED
5808 (offsetof(struct action_raw_encap_data, idx),
5809 sizeof(((struct action_raw_encap_data *)0)->idx),
5810 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5811 if (push_args(ctx, arg))
5813 ret = parse_int(ctx, token, str, len, NULL, 0);
5820 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5821 action_raw_encap_data = ctx->object;
5822 idx = action_raw_encap_data->idx;
5823 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
5824 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
5825 action_raw_encap_data->conf.preserve = NULL;
5826 action->conf = &action_raw_encap_data->conf;
5831 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
5832 const char *str, unsigned int len, void *buf,
5835 struct buffer *out = buf;
5836 struct rte_flow_action *action;
5837 struct action_raw_encap_data *action_raw_encap_data = NULL;
5840 ret = parse_vc(ctx, token, str, len, buf, size);
5843 /* Nothing else to do if there is no buffer. */
5846 if (!out->args.vc.actions_n)
5848 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5849 /* Point to selected object. */
5850 ctx->object = out->args.vc.data;
5851 ctx->objmask = NULL;
5852 /* Copy the headers to the buffer. */
5853 action_raw_encap_data = ctx->object;
5854 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
5855 action_raw_encap_data->conf.preserve = NULL;
5856 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
5857 action->conf = &action_raw_encap_data->conf;
5862 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
5863 const char *str, unsigned int len, void *buf,
5866 struct buffer *out = buf;
5867 struct rte_flow_action *action;
5868 struct action_raw_decap_data *action_raw_decap_data = NULL;
5871 ret = parse_vc(ctx, token, str, len, buf, size);
5874 /* Nothing else to do if there is no buffer. */
5877 if (!out->args.vc.actions_n)
5879 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5880 /* Point to selected object. */
5881 ctx->object = out->args.vc.data;
5882 ctx->objmask = NULL;
5883 /* Copy the headers to the buffer. */
5884 action_raw_decap_data = ctx->object;
5885 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
5886 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
5887 action->conf = &action_raw_decap_data->conf;
5892 parse_vc_action_set_meta(struct context *ctx, const struct token *token,
5893 const char *str, unsigned int len, void *buf,
5898 ret = parse_vc(ctx, token, str, len, buf, size);
5901 ret = rte_flow_dynf_metadata_register();
5908 parse_vc_action_sample(struct context *ctx, const struct token *token,
5909 const char *str, unsigned int len, void *buf,
5912 struct buffer *out = buf;
5913 struct rte_flow_action *action;
5914 struct action_sample_data *action_sample_data = NULL;
5915 static struct rte_flow_action end_action = {
5916 RTE_FLOW_ACTION_TYPE_END, 0
5920 ret = parse_vc(ctx, token, str, len, buf, size);
5923 /* Nothing else to do if there is no buffer. */
5926 if (!out->args.vc.actions_n)
5928 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5929 /* Point to selected object. */
5930 ctx->object = out->args.vc.data;
5931 ctx->objmask = NULL;
5932 /* Copy the headers to the buffer. */
5933 action_sample_data = ctx->object;
5934 action_sample_data->conf.actions = &end_action;
5935 action->conf = &action_sample_data->conf;
5940 parse_vc_action_sample_index(struct context *ctx, const struct token *token,
5941 const char *str, unsigned int len, void *buf,
5944 struct action_sample_data *action_sample_data;
5945 struct rte_flow_action *action;
5946 const struct arg *arg;
5947 struct buffer *out = buf;
5951 RTE_SET_USED(token);
5954 if (ctx->curr != ACTION_SAMPLE_INDEX_VALUE)
5956 arg = ARGS_ENTRY_ARB_BOUNDED
5957 (offsetof(struct action_sample_data, idx),
5958 sizeof(((struct action_sample_data *)0)->idx),
5959 0, RAW_SAMPLE_CONFS_MAX_NUM - 1);
5960 if (push_args(ctx, arg))
5962 ret = parse_int(ctx, token, str, len, NULL, 0);
5969 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5970 action_sample_data = ctx->object;
5971 idx = action_sample_data->idx;
5972 action_sample_data->conf.actions = raw_sample_confs[idx].data;
5973 action->conf = &action_sample_data->conf;
5977 /** Parse tokens for destroy command. */
5979 parse_destroy(struct context *ctx, const struct token *token,
5980 const char *str, unsigned int len,
5981 void *buf, unsigned int size)
5983 struct buffer *out = buf;
5985 /* Token name must match. */
5986 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5988 /* Nothing else to do if there is no buffer. */
5991 if (!out->command) {
5992 if (ctx->curr != DESTROY)
5994 if (sizeof(*out) > size)
5996 out->command = ctx->curr;
5999 ctx->objmask = NULL;
6000 out->args.destroy.rule =
6001 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6005 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
6006 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
6009 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
6010 ctx->objmask = NULL;
6014 /** Parse tokens for flush command. */
6016 parse_flush(struct context *ctx, const struct token *token,
6017 const char *str, unsigned int len,
6018 void *buf, unsigned int size)
6020 struct buffer *out = buf;
6022 /* Token name must match. */
6023 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6025 /* Nothing else to do if there is no buffer. */
6028 if (!out->command) {
6029 if (ctx->curr != FLUSH)
6031 if (sizeof(*out) > size)
6033 out->command = ctx->curr;
6036 ctx->objmask = NULL;
6041 /** Parse tokens for dump command. */
6043 parse_dump(struct context *ctx, const struct token *token,
6044 const char *str, unsigned int len,
6045 void *buf, unsigned int size)
6047 struct buffer *out = buf;
6049 /* Token name must match. */
6050 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6052 /* Nothing else to do if there is no buffer. */
6055 if (!out->command) {
6056 if (ctx->curr != DUMP)
6058 if (sizeof(*out) > size)
6060 out->command = ctx->curr;
6063 ctx->objmask = NULL;
6068 /** Parse tokens for query command. */
6070 parse_query(struct context *ctx, const struct token *token,
6071 const char *str, unsigned int len,
6072 void *buf, unsigned int size)
6074 struct buffer *out = buf;
6076 /* Token name must match. */
6077 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6079 /* Nothing else to do if there is no buffer. */
6082 if (!out->command) {
6083 if (ctx->curr != QUERY)
6085 if (sizeof(*out) > size)
6087 out->command = ctx->curr;
6090 ctx->objmask = NULL;
6095 /** Parse action names. */
6097 parse_action(struct context *ctx, const struct token *token,
6098 const char *str, unsigned int len,
6099 void *buf, unsigned int size)
6101 struct buffer *out = buf;
6102 const struct arg *arg = pop_args(ctx);
6106 /* Argument is expected. */
6109 /* Parse action name. */
6110 for (i = 0; next_action[i]; ++i) {
6111 const struct parse_action_priv *priv;
6113 token = &token_list[next_action[i]];
6114 if (strcmp_partial(token->name, str, len))
6120 memcpy((uint8_t *)ctx->object + arg->offset,
6126 push_args(ctx, arg);
6130 /** Parse tokens for list command. */
6132 parse_list(struct context *ctx, const struct token *token,
6133 const char *str, unsigned int len,
6134 void *buf, unsigned int size)
6136 struct buffer *out = buf;
6138 /* Token name must match. */
6139 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6141 /* Nothing else to do if there is no buffer. */
6144 if (!out->command) {
6145 if (ctx->curr != LIST)
6147 if (sizeof(*out) > size)
6149 out->command = ctx->curr;
6152 ctx->objmask = NULL;
6153 out->args.list.group =
6154 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6158 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
6159 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
6162 ctx->object = out->args.list.group + out->args.list.group_n++;
6163 ctx->objmask = NULL;
6167 /** Parse tokens for list all aged flows command. */
6169 parse_aged(struct context *ctx, const struct token *token,
6170 const char *str, unsigned int len,
6171 void *buf, unsigned int size)
6173 struct buffer *out = buf;
6175 /* Token name must match. */
6176 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6178 /* Nothing else to do if there is no buffer. */
6181 if (!out->command) {
6182 if (ctx->curr != AGED)
6184 if (sizeof(*out) > size)
6186 out->command = ctx->curr;
6189 ctx->objmask = NULL;
6191 if (ctx->curr == AGED_DESTROY)
6192 out->args.aged.destroy = 1;
6196 /** Parse tokens for isolate command. */
6198 parse_isolate(struct context *ctx, const struct token *token,
6199 const char *str, unsigned int len,
6200 void *buf, unsigned int size)
6202 struct buffer *out = buf;
6204 /* Token name must match. */
6205 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6207 /* Nothing else to do if there is no buffer. */
6210 if (!out->command) {
6211 if (ctx->curr != ISOLATE)
6213 if (sizeof(*out) > size)
6215 out->command = ctx->curr;
6218 ctx->objmask = NULL;
6224 parse_tunnel(struct context *ctx, const struct token *token,
6225 const char *str, unsigned int len,
6226 void *buf, unsigned int size)
6228 struct buffer *out = buf;
6230 /* Token name must match. */
6231 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6233 /* Nothing else to do if there is no buffer. */
6236 if (!out->command) {
6237 if (ctx->curr != TUNNEL)
6239 if (sizeof(*out) > size)
6241 out->command = ctx->curr;
6244 ctx->objmask = NULL;
6246 switch (ctx->curr) {
6250 case TUNNEL_DESTROY:
6252 out->command = ctx->curr;
6254 case TUNNEL_CREATE_TYPE:
6255 case TUNNEL_DESTROY_ID:
6256 ctx->object = &out->args.vc.tunnel_ops;
6265 * Parse signed/unsigned integers 8 to 64-bit long.
6267 * Last argument (ctx->args) is retrieved to determine integer type and
6271 parse_int(struct context *ctx, const struct token *token,
6272 const char *str, unsigned int len,
6273 void *buf, unsigned int size)
6275 const struct arg *arg = pop_args(ctx);
6280 /* Argument is expected. */
6285 (uintmax_t)strtoimax(str, &end, 0) :
6286 strtoumax(str, &end, 0);
6287 if (errno || (size_t)(end - str) != len)
6290 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
6291 (intmax_t)u > (intmax_t)arg->max)) ||
6292 (!arg->sign && (u < arg->min || u > arg->max))))
6297 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
6298 !arg_entry_bf_fill(ctx->objmask, -1, arg))
6302 buf = (uint8_t *)ctx->object + arg->offset;
6304 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
6308 case sizeof(uint8_t):
6309 *(uint8_t *)buf = u;
6311 case sizeof(uint16_t):
6312 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
6314 case sizeof(uint8_t [3]):
6315 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6317 ((uint8_t *)buf)[0] = u;
6318 ((uint8_t *)buf)[1] = u >> 8;
6319 ((uint8_t *)buf)[2] = u >> 16;
6323 ((uint8_t *)buf)[0] = u >> 16;
6324 ((uint8_t *)buf)[1] = u >> 8;
6325 ((uint8_t *)buf)[2] = u;
6327 case sizeof(uint32_t):
6328 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
6330 case sizeof(uint64_t):
6331 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
6336 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
6338 buf = (uint8_t *)ctx->objmask + arg->offset;
6343 push_args(ctx, arg);
6350 * Three arguments (ctx->args) are retrieved from the stack to store data,
6351 * its actual length and address (in that order).
6354 parse_string(struct context *ctx, const struct token *token,
6355 const char *str, unsigned int len,
6356 void *buf, unsigned int size)
6358 const struct arg *arg_data = pop_args(ctx);
6359 const struct arg *arg_len = pop_args(ctx);
6360 const struct arg *arg_addr = pop_args(ctx);
6361 char tmp[16]; /* Ought to be enough. */
6364 /* Arguments are expected. */
6368 push_args(ctx, arg_data);
6372 push_args(ctx, arg_len);
6373 push_args(ctx, arg_data);
6376 size = arg_data->size;
6377 /* Bit-mask fill is not supported. */
6378 if (arg_data->mask || size < len)
6382 /* Let parse_int() fill length information first. */
6383 ret = snprintf(tmp, sizeof(tmp), "%u", len);
6386 push_args(ctx, arg_len);
6387 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
6392 buf = (uint8_t *)ctx->object + arg_data->offset;
6393 /* Output buffer is not necessarily NUL-terminated. */
6394 memcpy(buf, str, len);
6395 memset((uint8_t *)buf + len, 0x00, size - len);
6397 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
6398 /* Save address if requested. */
6399 if (arg_addr->size) {
6400 memcpy((uint8_t *)ctx->object + arg_addr->offset,
6402 (uint8_t *)ctx->object + arg_data->offset
6406 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
6408 (uint8_t *)ctx->objmask + arg_data->offset
6414 push_args(ctx, arg_addr);
6415 push_args(ctx, arg_len);
6416 push_args(ctx, arg_data);
6421 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
6427 /* Check input parameters */
6428 if ((src == NULL) ||
6434 /* Convert chars to bytes */
6435 for (i = 0, len = 0; i < *size; i += 2) {
6436 snprintf(tmp, 3, "%s", src + i);
6437 dst[len++] = strtoul(tmp, &c, 16);
6452 parse_hex(struct context *ctx, const struct token *token,
6453 const char *str, unsigned int len,
6454 void *buf, unsigned int size)
6456 const struct arg *arg_data = pop_args(ctx);
6457 const struct arg *arg_len = pop_args(ctx);
6458 const struct arg *arg_addr = pop_args(ctx);
6459 char tmp[16]; /* Ought to be enough. */
6461 unsigned int hexlen = len;
6462 unsigned int length = 256;
6463 uint8_t hex_tmp[length];
6465 /* Arguments are expected. */
6469 push_args(ctx, arg_data);
6473 push_args(ctx, arg_len);
6474 push_args(ctx, arg_data);
6477 size = arg_data->size;
6478 /* Bit-mask fill is not supported. */
6484 /* translate bytes string to array. */
6485 if (str[0] == '0' && ((str[1] == 'x') ||
6490 if (hexlen > length)
6492 ret = parse_hex_string(str, hex_tmp, &hexlen);
6495 /* Let parse_int() fill length information first. */
6496 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
6499 push_args(ctx, arg_len);
6500 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
6505 buf = (uint8_t *)ctx->object + arg_data->offset;
6506 /* Output buffer is not necessarily NUL-terminated. */
6507 memcpy(buf, hex_tmp, hexlen);
6508 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
6510 memset((uint8_t *)ctx->objmask + arg_data->offset,
6512 /* Save address if requested. */
6513 if (arg_addr->size) {
6514 memcpy((uint8_t *)ctx->object + arg_addr->offset,
6516 (uint8_t *)ctx->object + arg_data->offset
6520 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
6522 (uint8_t *)ctx->objmask + arg_data->offset
6528 push_args(ctx, arg_addr);
6529 push_args(ctx, arg_len);
6530 push_args(ctx, arg_data);
6536 * Parse a zero-ended string.
6539 parse_string0(struct context *ctx, const struct token *token __rte_unused,
6540 const char *str, unsigned int len,
6541 void *buf, unsigned int size)
6543 const struct arg *arg_data = pop_args(ctx);
6545 /* Arguments are expected. */
6548 size = arg_data->size;
6549 /* Bit-mask fill is not supported. */
6550 if (arg_data->mask || size < len + 1)
6554 buf = (uint8_t *)ctx->object + arg_data->offset;
6555 strncpy(buf, str, len);
6557 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
6560 push_args(ctx, arg_data);
6565 * Parse a MAC address.
6567 * Last argument (ctx->args) is retrieved to determine storage size and
6571 parse_mac_addr(struct context *ctx, const struct token *token,
6572 const char *str, unsigned int len,
6573 void *buf, unsigned int size)
6575 const struct arg *arg = pop_args(ctx);
6576 struct rte_ether_addr tmp;
6580 /* Argument is expected. */
6584 /* Bit-mask fill is not supported. */
6585 if (arg->mask || size != sizeof(tmp))
6587 /* Only network endian is supported. */
6590 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
6591 if (ret < 0 || (unsigned int)ret != len)
6595 buf = (uint8_t *)ctx->object + arg->offset;
6596 memcpy(buf, &tmp, size);
6598 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6601 push_args(ctx, arg);
6606 * Parse an IPv4 address.
6608 * Last argument (ctx->args) is retrieved to determine storage size and
6612 parse_ipv4_addr(struct context *ctx, const struct token *token,
6613 const char *str, unsigned int len,
6614 void *buf, unsigned int size)
6616 const struct arg *arg = pop_args(ctx);
6621 /* Argument is expected. */
6625 /* Bit-mask fill is not supported. */
6626 if (arg->mask || size != sizeof(tmp))
6628 /* Only network endian is supported. */
6631 memcpy(str2, str, len);
6633 ret = inet_pton(AF_INET, str2, &tmp);
6635 /* Attempt integer parsing. */
6636 push_args(ctx, arg);
6637 return parse_int(ctx, token, str, len, buf, size);
6641 buf = (uint8_t *)ctx->object + arg->offset;
6642 memcpy(buf, &tmp, size);
6644 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6647 push_args(ctx, arg);
6652 * Parse an IPv6 address.
6654 * Last argument (ctx->args) is retrieved to determine storage size and
6658 parse_ipv6_addr(struct context *ctx, const struct token *token,
6659 const char *str, unsigned int len,
6660 void *buf, unsigned int size)
6662 const struct arg *arg = pop_args(ctx);
6664 struct in6_addr tmp;
6668 /* Argument is expected. */
6672 /* Bit-mask fill is not supported. */
6673 if (arg->mask || size != sizeof(tmp))
6675 /* Only network endian is supported. */
6678 memcpy(str2, str, len);
6680 ret = inet_pton(AF_INET6, str2, &tmp);
6685 buf = (uint8_t *)ctx->object + arg->offset;
6686 memcpy(buf, &tmp, size);
6688 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6691 push_args(ctx, arg);
6695 /** Boolean values (even indices stand for false). */
6696 static const char *const boolean_name[] = {
6706 * Parse a boolean value.
6708 * Last argument (ctx->args) is retrieved to determine storage size and
6712 parse_boolean(struct context *ctx, const struct token *token,
6713 const char *str, unsigned int len,
6714 void *buf, unsigned int size)
6716 const struct arg *arg = pop_args(ctx);
6720 /* Argument is expected. */
6723 for (i = 0; boolean_name[i]; ++i)
6724 if (!strcmp_partial(boolean_name[i], str, len))
6726 /* Process token as integer. */
6727 if (boolean_name[i])
6728 str = i & 1 ? "1" : "0";
6729 push_args(ctx, arg);
6730 ret = parse_int(ctx, token, str, strlen(str), buf, size);
6731 return ret > 0 ? (int)len : ret;
6734 /** Parse port and update context. */
6736 parse_port(struct context *ctx, const struct token *token,
6737 const char *str, unsigned int len,
6738 void *buf, unsigned int size)
6740 struct buffer *out = &(struct buffer){ .port = 0 };
6748 ctx->objmask = NULL;
6749 size = sizeof(*out);
6751 ret = parse_int(ctx, token, str, len, out, size);
6753 ctx->port = out->port;
6760 parse_sa_id2ptr(struct context *ctx, const struct token *token,
6761 const char *str, unsigned int len,
6762 void *buf, unsigned int size)
6764 struct rte_flow_action *action = ctx->object;
6772 ctx->objmask = NULL;
6773 ret = parse_int(ctx, token, str, len, ctx->object, sizeof(id));
6774 ctx->object = action;
6775 if (ret != (int)len)
6777 /* set shared action */
6779 action->conf = port_shared_action_get_by_id(ctx->port, id);
6780 ret = (action->conf) ? ret : -1;
6785 /** Parse set command, initialize output buffer for subsequent tokens. */
6787 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
6788 const char *str, unsigned int len,
6789 void *buf, unsigned int size)
6791 struct buffer *out = buf;
6793 /* Token name must match. */
6794 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6796 /* Nothing else to do if there is no buffer. */
6799 /* Make sure buffer is large enough. */
6800 if (size < sizeof(*out))
6803 ctx->objmask = NULL;
6807 out->command = ctx->curr;
6808 /* For encap/decap we need is pattern */
6809 out->args.vc.pattern = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6814 /** Parse set command, initialize output buffer for subsequent tokens. */
6816 parse_set_sample_action(struct context *ctx, const struct token *token,
6817 const char *str, unsigned int len,
6818 void *buf, unsigned int size)
6820 struct buffer *out = buf;
6822 /* Token name must match. */
6823 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6825 /* Nothing else to do if there is no buffer. */
6828 /* Make sure buffer is large enough. */
6829 if (size < sizeof(*out))
6832 ctx->objmask = NULL;
6836 out->command = ctx->curr;
6837 /* For sampler we need is actions */
6838 out->args.vc.actions = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6844 * Parse set raw_encap/raw_decap command,
6845 * initialize output buffer for subsequent tokens.
6848 parse_set_init(struct context *ctx, const struct token *token,
6849 const char *str, unsigned int len,
6850 void *buf, unsigned int size)
6852 struct buffer *out = buf;
6854 /* Token name must match. */
6855 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6857 /* Nothing else to do if there is no buffer. */
6860 /* Make sure buffer is large enough. */
6861 if (size < sizeof(*out))
6863 /* Initialize buffer. */
6864 memset(out, 0x00, sizeof(*out));
6865 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
6868 ctx->objmask = NULL;
6869 if (!out->command) {
6870 if (ctx->curr != SET)
6872 if (sizeof(*out) > size)
6874 out->command = ctx->curr;
6875 out->args.vc.data = (uint8_t *)out + size;
6876 ctx->object = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6882 /** No completion. */
6884 comp_none(struct context *ctx, const struct token *token,
6885 unsigned int ent, char *buf, unsigned int size)
6895 /** Complete boolean values. */
6897 comp_boolean(struct context *ctx, const struct token *token,
6898 unsigned int ent, char *buf, unsigned int size)
6904 for (i = 0; boolean_name[i]; ++i)
6905 if (buf && i == ent)
6906 return strlcpy(buf, boolean_name[i], size);
6912 /** Complete action names. */
6914 comp_action(struct context *ctx, const struct token *token,
6915 unsigned int ent, char *buf, unsigned int size)
6921 for (i = 0; next_action[i]; ++i)
6922 if (buf && i == ent)
6923 return strlcpy(buf, token_list[next_action[i]].name,
6930 /** Complete available ports. */
6932 comp_port(struct context *ctx, const struct token *token,
6933 unsigned int ent, char *buf, unsigned int size)
6940 RTE_ETH_FOREACH_DEV(p) {
6941 if (buf && i == ent)
6942 return snprintf(buf, size, "%u", p);
6950 /** Complete available rule IDs. */
6952 comp_rule_id(struct context *ctx, const struct token *token,
6953 unsigned int ent, char *buf, unsigned int size)
6956 struct rte_port *port;
6957 struct port_flow *pf;
6960 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
6961 ctx->port == (portid_t)RTE_PORT_ALL)
6963 port = &ports[ctx->port];
6964 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
6965 if (buf && i == ent)
6966 return snprintf(buf, size, "%u", pf->id);
6974 /** Complete type field for RSS action. */
6976 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
6977 unsigned int ent, char *buf, unsigned int size)
6983 for (i = 0; rss_type_table[i].str; ++i)
6988 return strlcpy(buf, rss_type_table[ent].str, size);
6990 return snprintf(buf, size, "end");
6994 /** Complete queue field for RSS action. */
6996 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
6997 unsigned int ent, char *buf, unsigned int size)
7004 return snprintf(buf, size, "%u", ent);
7006 return snprintf(buf, size, "end");
7010 /** Complete index number for set raw_encap/raw_decap commands. */
7012 comp_set_raw_index(struct context *ctx, const struct token *token,
7013 unsigned int ent, char *buf, unsigned int size)
7019 RTE_SET_USED(token);
7020 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
7021 if (buf && idx == ent)
7022 return snprintf(buf, size, "%u", idx);
7028 /** Complete index number for set raw_encap/raw_decap commands. */
7030 comp_set_sample_index(struct context *ctx, const struct token *token,
7031 unsigned int ent, char *buf, unsigned int size)
7037 RTE_SET_USED(token);
7038 for (idx = 0; idx < RAW_SAMPLE_CONFS_MAX_NUM; ++idx) {
7039 if (buf && idx == ent)
7040 return snprintf(buf, size, "%u", idx);
7046 /** Internal context. */
7047 static struct context cmd_flow_context;
7049 /** Global parser instance (cmdline API). */
7050 cmdline_parse_inst_t cmd_flow;
7051 cmdline_parse_inst_t cmd_set_raw;
7053 /** Initialize context. */
7055 cmd_flow_context_init(struct context *ctx)
7057 /* A full memset() is not necessary. */
7067 ctx->objmask = NULL;
7070 /** Parse a token (cmdline API). */
7072 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
7075 struct context *ctx = &cmd_flow_context;
7076 const struct token *token;
7077 const enum index *list;
7082 token = &token_list[ctx->curr];
7083 /* Check argument length. */
7086 for (len = 0; src[len]; ++len)
7087 if (src[len] == '#' || isspace(src[len]))
7091 /* Last argument and EOL detection. */
7092 for (i = len; src[i]; ++i)
7093 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
7095 else if (!isspace(src[i])) {
7100 if (src[i] == '\r' || src[i] == '\n') {
7104 /* Initialize context if necessary. */
7105 if (!ctx->next_num) {
7108 ctx->next[ctx->next_num++] = token->next[0];
7110 /* Process argument through candidates. */
7111 ctx->prev = ctx->curr;
7112 list = ctx->next[ctx->next_num - 1];
7113 for (i = 0; list[i]; ++i) {
7114 const struct token *next = &token_list[list[i]];
7117 ctx->curr = list[i];
7119 tmp = next->call(ctx, next, src, len, result, size);
7121 tmp = parse_default(ctx, next, src, len, result, size);
7122 if (tmp == -1 || tmp != len)
7130 /* Push subsequent tokens if any. */
7132 for (i = 0; token->next[i]; ++i) {
7133 if (ctx->next_num == RTE_DIM(ctx->next))
7135 ctx->next[ctx->next_num++] = token->next[i];
7137 /* Push arguments if any. */
7139 for (i = 0; token->args[i]; ++i) {
7140 if (ctx->args_num == RTE_DIM(ctx->args))
7142 ctx->args[ctx->args_num++] = token->args[i];
7147 /** Return number of completion entries (cmdline API). */
7149 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
7151 struct context *ctx = &cmd_flow_context;
7152 const struct token *token = &token_list[ctx->curr];
7153 const enum index *list;
7157 /* Count number of tokens in current list. */
7159 list = ctx->next[ctx->next_num - 1];
7161 list = token->next[0];
7162 for (i = 0; list[i]; ++i)
7167 * If there is a single token, use its completion callback, otherwise
7168 * return the number of entries.
7170 token = &token_list[list[0]];
7171 if (i == 1 && token->comp) {
7172 /* Save index for cmd_flow_get_help(). */
7173 ctx->prev = list[0];
7174 return token->comp(ctx, token, 0, NULL, 0);
7179 /** Return a completion entry (cmdline API). */
7181 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
7182 char *dst, unsigned int size)
7184 struct context *ctx = &cmd_flow_context;
7185 const struct token *token = &token_list[ctx->curr];
7186 const enum index *list;
7190 /* Count number of tokens in current list. */
7192 list = ctx->next[ctx->next_num - 1];
7194 list = token->next[0];
7195 for (i = 0; list[i]; ++i)
7199 /* If there is a single token, use its completion callback. */
7200 token = &token_list[list[0]];
7201 if (i == 1 && token->comp) {
7202 /* Save index for cmd_flow_get_help(). */
7203 ctx->prev = list[0];
7204 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
7206 /* Otherwise make sure the index is valid and use defaults. */
7209 token = &token_list[list[index]];
7210 strlcpy(dst, token->name, size);
7211 /* Save index for cmd_flow_get_help(). */
7212 ctx->prev = list[index];
7216 /** Populate help strings for current token (cmdline API). */
7218 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
7220 struct context *ctx = &cmd_flow_context;
7221 const struct token *token = &token_list[ctx->prev];
7226 /* Set token type and update global help with details. */
7227 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
7229 cmd_flow.help_str = token->help;
7231 cmd_flow.help_str = token->name;
7235 /** Token definition template (cmdline API). */
7236 static struct cmdline_token_hdr cmd_flow_token_hdr = {
7237 .ops = &(struct cmdline_token_ops){
7238 .parse = cmd_flow_parse,
7239 .complete_get_nb = cmd_flow_complete_get_nb,
7240 .complete_get_elt = cmd_flow_complete_get_elt,
7241 .get_help = cmd_flow_get_help,
7246 /** Populate the next dynamic token. */
7248 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
7249 cmdline_parse_token_hdr_t **hdr_inst)
7251 struct context *ctx = &cmd_flow_context;
7253 /* Always reinitialize context before requesting the first token. */
7254 if (!(hdr_inst - cmd_flow.tokens))
7255 cmd_flow_context_init(ctx);
7256 /* Return NULL when no more tokens are expected. */
7257 if (!ctx->next_num && ctx->curr) {
7261 /* Determine if command should end here. */
7262 if (ctx->eol && ctx->last && ctx->next_num) {
7263 const enum index *list = ctx->next[ctx->next_num - 1];
7266 for (i = 0; list[i]; ++i) {
7273 *hdr = &cmd_flow_token_hdr;
7276 /** Dispatch parsed buffer to function calls. */
7278 cmd_flow_parsed(const struct buffer *in)
7280 switch (in->command) {
7281 case SHARED_ACTION_CREATE:
7282 port_shared_action_create(
7283 in->port, in->args.vc.attr.group,
7284 &((const struct rte_flow_shared_action_conf) {
7285 .ingress = in->args.vc.attr.ingress,
7286 .egress = in->args.vc.attr.egress,
7287 .transfer = in->args.vc.attr.transfer,
7289 in->args.vc.actions);
7291 case SHARED_ACTION_DESTROY:
7292 port_shared_action_destroy(in->port,
7293 in->args.sa_destroy.action_id_n,
7294 in->args.sa_destroy.action_id);
7296 case SHARED_ACTION_UPDATE:
7297 port_shared_action_update(in->port, in->args.vc.attr.group,
7298 in->args.vc.actions);
7300 case SHARED_ACTION_QUERY:
7301 port_shared_action_query(in->port, in->args.sa.action_id);
7304 port_flow_validate(in->port, &in->args.vc.attr,
7305 in->args.vc.pattern, in->args.vc.actions,
7306 &in->args.vc.tunnel_ops);
7309 port_flow_create(in->port, &in->args.vc.attr,
7310 in->args.vc.pattern, in->args.vc.actions,
7311 &in->args.vc.tunnel_ops);
7314 port_flow_destroy(in->port, in->args.destroy.rule_n,
7315 in->args.destroy.rule);
7318 port_flow_flush(in->port);
7321 port_flow_dump(in->port, in->args.dump.file);
7324 port_flow_query(in->port, in->args.query.rule,
7325 &in->args.query.action);
7328 port_flow_list(in->port, in->args.list.group_n,
7329 in->args.list.group);
7332 port_flow_isolate(in->port, in->args.isolate.set);
7335 port_flow_aged(in->port, in->args.aged.destroy);
7338 port_flow_tunnel_create(in->port, &in->args.vc.tunnel_ops);
7340 case TUNNEL_DESTROY:
7341 port_flow_tunnel_destroy(in->port, in->args.vc.tunnel_ops.id);
7344 port_flow_tunnel_list(in->port);
7351 /** Token generator and output processing callback (cmdline API). */
7353 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
7356 cmd_flow_tok(arg0, arg2);
7358 cmd_flow_parsed(arg0);
7361 /** Global parser instance (cmdline API). */
7362 cmdline_parse_inst_t cmd_flow = {
7364 .data = NULL, /**< Unused. */
7365 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
7368 }, /**< Tokens are returned by cmd_flow_tok(). */
7371 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
7374 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
7376 struct rte_flow_item_ipv4 *ipv4;
7377 struct rte_flow_item_eth *eth;
7378 struct rte_flow_item_ipv6 *ipv6;
7379 struct rte_flow_item_vxlan *vxlan;
7380 struct rte_flow_item_vxlan_gpe *gpe;
7381 struct rte_flow_item_nvgre *nvgre;
7382 uint32_t ipv6_vtc_flow;
7384 switch (item->type) {
7385 case RTE_FLOW_ITEM_TYPE_ETH:
7386 eth = (struct rte_flow_item_eth *)buf;
7388 eth->type = rte_cpu_to_be_16(next_proto);
7390 case RTE_FLOW_ITEM_TYPE_IPV4:
7391 ipv4 = (struct rte_flow_item_ipv4 *)buf;
7392 ipv4->hdr.version_ihl = 0x45;
7393 if (next_proto && ipv4->hdr.next_proto_id == 0)
7394 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
7396 case RTE_FLOW_ITEM_TYPE_IPV6:
7397 ipv6 = (struct rte_flow_item_ipv6 *)buf;
7398 if (next_proto && ipv6->hdr.proto == 0)
7399 ipv6->hdr.proto = (uint8_t)next_proto;
7400 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
7401 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
7402 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
7403 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
7405 case RTE_FLOW_ITEM_TYPE_VXLAN:
7406 vxlan = (struct rte_flow_item_vxlan *)buf;
7407 vxlan->flags = 0x08;
7409 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7410 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
7413 case RTE_FLOW_ITEM_TYPE_NVGRE:
7414 nvgre = (struct rte_flow_item_nvgre *)buf;
7415 nvgre->protocol = rte_cpu_to_be_16(0x6558);
7416 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
7423 /** Helper of get item's default mask. */
7425 flow_item_default_mask(const struct rte_flow_item *item)
7427 const void *mask = NULL;
7428 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7430 switch (item->type) {
7431 case RTE_FLOW_ITEM_TYPE_ANY:
7432 mask = &rte_flow_item_any_mask;
7434 case RTE_FLOW_ITEM_TYPE_VF:
7435 mask = &rte_flow_item_vf_mask;
7437 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7438 mask = &rte_flow_item_port_id_mask;
7440 case RTE_FLOW_ITEM_TYPE_RAW:
7441 mask = &rte_flow_item_raw_mask;
7443 case RTE_FLOW_ITEM_TYPE_ETH:
7444 mask = &rte_flow_item_eth_mask;
7446 case RTE_FLOW_ITEM_TYPE_VLAN:
7447 mask = &rte_flow_item_vlan_mask;
7449 case RTE_FLOW_ITEM_TYPE_IPV4:
7450 mask = &rte_flow_item_ipv4_mask;
7452 case RTE_FLOW_ITEM_TYPE_IPV6:
7453 mask = &rte_flow_item_ipv6_mask;
7455 case RTE_FLOW_ITEM_TYPE_ICMP:
7456 mask = &rte_flow_item_icmp_mask;
7458 case RTE_FLOW_ITEM_TYPE_UDP:
7459 mask = &rte_flow_item_udp_mask;
7461 case RTE_FLOW_ITEM_TYPE_TCP:
7462 mask = &rte_flow_item_tcp_mask;
7464 case RTE_FLOW_ITEM_TYPE_SCTP:
7465 mask = &rte_flow_item_sctp_mask;
7467 case RTE_FLOW_ITEM_TYPE_VXLAN:
7468 mask = &rte_flow_item_vxlan_mask;
7470 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7471 mask = &rte_flow_item_vxlan_gpe_mask;
7473 case RTE_FLOW_ITEM_TYPE_E_TAG:
7474 mask = &rte_flow_item_e_tag_mask;
7476 case RTE_FLOW_ITEM_TYPE_NVGRE:
7477 mask = &rte_flow_item_nvgre_mask;
7479 case RTE_FLOW_ITEM_TYPE_MPLS:
7480 mask = &rte_flow_item_mpls_mask;
7482 case RTE_FLOW_ITEM_TYPE_GRE:
7483 mask = &rte_flow_item_gre_mask;
7485 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7486 mask = &gre_key_default_mask;
7488 case RTE_FLOW_ITEM_TYPE_META:
7489 mask = &rte_flow_item_meta_mask;
7491 case RTE_FLOW_ITEM_TYPE_FUZZY:
7492 mask = &rte_flow_item_fuzzy_mask;
7494 case RTE_FLOW_ITEM_TYPE_GTP:
7495 mask = &rte_flow_item_gtp_mask;
7497 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7498 mask = &rte_flow_item_gtp_psc_mask;
7500 case RTE_FLOW_ITEM_TYPE_GENEVE:
7501 mask = &rte_flow_item_geneve_mask;
7503 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
7504 mask = &rte_flow_item_pppoe_proto_id_mask;
7506 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
7507 mask = &rte_flow_item_l2tpv3oip_mask;
7509 case RTE_FLOW_ITEM_TYPE_ESP:
7510 mask = &rte_flow_item_esp_mask;
7512 case RTE_FLOW_ITEM_TYPE_AH:
7513 mask = &rte_flow_item_ah_mask;
7515 case RTE_FLOW_ITEM_TYPE_PFCP:
7516 mask = &rte_flow_item_pfcp_mask;
7524 /** Dispatch parsed buffer to function calls. */
7526 cmd_set_raw_parsed_sample(const struct buffer *in)
7528 uint32_t n = in->args.vc.actions_n;
7530 struct rte_flow_action *action = NULL;
7531 struct rte_flow_action *data = NULL;
7533 uint16_t idx = in->port; /* We borrow port field as index */
7534 uint32_t max_size = sizeof(struct rte_flow_action) *
7535 ACTION_SAMPLE_ACTIONS_NUM;
7537 RTE_ASSERT(in->command == SET_SAMPLE_ACTIONS);
7538 data = (struct rte_flow_action *)&raw_sample_confs[idx].data;
7539 memset(data, 0x00, max_size);
7540 for (; i <= n - 1; i++) {
7541 action = in->args.vc.actions + i;
7542 if (action->type == RTE_FLOW_ACTION_TYPE_END)
7544 switch (action->type) {
7545 case RTE_FLOW_ACTION_TYPE_MARK:
7546 size = sizeof(struct rte_flow_action_mark);
7547 rte_memcpy(&sample_mark[idx],
7548 (const void *)action->conf, size);
7549 action->conf = &sample_mark[idx];
7551 case RTE_FLOW_ACTION_TYPE_COUNT:
7552 size = sizeof(struct rte_flow_action_count);
7553 rte_memcpy(&sample_count[idx],
7554 (const void *)action->conf, size);
7555 action->conf = &sample_count[idx];
7557 case RTE_FLOW_ACTION_TYPE_QUEUE:
7558 size = sizeof(struct rte_flow_action_queue);
7559 rte_memcpy(&sample_queue[idx],
7560 (const void *)action->conf, size);
7561 action->conf = &sample_queue[idx];
7563 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7564 size = sizeof(struct rte_flow_action_raw_encap);
7565 rte_memcpy(&sample_encap[idx],
7566 (const void *)action->conf, size);
7567 action->conf = &sample_encap[idx];
7569 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7570 size = sizeof(struct rte_flow_action_port_id);
7571 rte_memcpy(&sample_port_id[idx],
7572 (const void *)action->conf, size);
7573 action->conf = &sample_port_id[idx];
7576 printf("Error - Not supported action\n");
7579 rte_memcpy(data, action, sizeof(struct rte_flow_action));
7584 /** Dispatch parsed buffer to function calls. */
7586 cmd_set_raw_parsed(const struct buffer *in)
7588 uint32_t n = in->args.vc.pattern_n;
7590 struct rte_flow_item *item = NULL;
7592 uint8_t *data = NULL;
7593 uint8_t *data_tail = NULL;
7594 size_t *total_size = NULL;
7595 uint16_t upper_layer = 0;
7597 uint16_t idx = in->port; /* We borrow port field as index */
7599 if (in->command == SET_SAMPLE_ACTIONS)
7600 return cmd_set_raw_parsed_sample(in);
7601 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
7602 in->command == SET_RAW_DECAP);
7603 if (in->command == SET_RAW_ENCAP) {
7604 total_size = &raw_encap_confs[idx].size;
7605 data = (uint8_t *)&raw_encap_confs[idx].data;
7607 total_size = &raw_decap_confs[idx].size;
7608 data = (uint8_t *)&raw_decap_confs[idx].data;
7611 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
7612 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
7613 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
7614 for (i = n - 1 ; i >= 0; --i) {
7615 item = in->args.vc.pattern + i;
7616 if (item->spec == NULL)
7617 item->spec = flow_item_default_mask(item);
7618 switch (item->type) {
7619 case RTE_FLOW_ITEM_TYPE_ETH:
7620 size = sizeof(struct rte_flow_item_eth);
7622 case RTE_FLOW_ITEM_TYPE_VLAN:
7623 size = sizeof(struct rte_flow_item_vlan);
7624 proto = RTE_ETHER_TYPE_VLAN;
7626 case RTE_FLOW_ITEM_TYPE_IPV4:
7627 size = sizeof(struct rte_flow_item_ipv4);
7628 proto = RTE_ETHER_TYPE_IPV4;
7630 case RTE_FLOW_ITEM_TYPE_IPV6:
7631 size = sizeof(struct rte_flow_item_ipv6);
7632 proto = RTE_ETHER_TYPE_IPV6;
7634 case RTE_FLOW_ITEM_TYPE_UDP:
7635 size = sizeof(struct rte_flow_item_udp);
7638 case RTE_FLOW_ITEM_TYPE_TCP:
7639 size = sizeof(struct rte_flow_item_tcp);
7642 case RTE_FLOW_ITEM_TYPE_VXLAN:
7643 size = sizeof(struct rte_flow_item_vxlan);
7645 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7646 size = sizeof(struct rte_flow_item_vxlan_gpe);
7648 case RTE_FLOW_ITEM_TYPE_GRE:
7649 size = sizeof(struct rte_flow_item_gre);
7652 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7653 size = sizeof(rte_be32_t);
7656 case RTE_FLOW_ITEM_TYPE_MPLS:
7657 size = sizeof(struct rte_flow_item_mpls);
7660 case RTE_FLOW_ITEM_TYPE_NVGRE:
7661 size = sizeof(struct rte_flow_item_nvgre);
7664 case RTE_FLOW_ITEM_TYPE_GENEVE:
7665 size = sizeof(struct rte_flow_item_geneve);
7667 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
7668 size = sizeof(struct rte_flow_item_l2tpv3oip);
7671 case RTE_FLOW_ITEM_TYPE_ESP:
7672 size = sizeof(struct rte_flow_item_esp);
7675 case RTE_FLOW_ITEM_TYPE_AH:
7676 size = sizeof(struct rte_flow_item_ah);
7679 case RTE_FLOW_ITEM_TYPE_GTP:
7680 size = sizeof(struct rte_flow_item_gtp);
7682 case RTE_FLOW_ITEM_TYPE_PFCP:
7683 size = sizeof(struct rte_flow_item_pfcp);
7686 printf("Error - Not supported item\n");
7688 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
7691 *total_size += size;
7692 rte_memcpy(data_tail - (*total_size), item->spec, size);
7693 /* update some fields which cannot be set by cmdline */
7694 update_fields((data_tail - (*total_size)), item,
7696 upper_layer = proto;
7698 if (verbose_level & 0x1)
7699 printf("total data size is %zu\n", (*total_size));
7700 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
7701 memmove(data, (data_tail - (*total_size)), *total_size);
7704 /** Populate help strings for current token (cmdline API). */
7706 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
7709 struct context *ctx = &cmd_flow_context;
7710 const struct token *token = &token_list[ctx->prev];
7715 /* Set token type and update global help with details. */
7716 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
7718 cmd_set_raw.help_str = token->help;
7720 cmd_set_raw.help_str = token->name;
7724 /** Token definition template (cmdline API). */
7725 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
7726 .ops = &(struct cmdline_token_ops){
7727 .parse = cmd_flow_parse,
7728 .complete_get_nb = cmd_flow_complete_get_nb,
7729 .complete_get_elt = cmd_flow_complete_get_elt,
7730 .get_help = cmd_set_raw_get_help,
7735 /** Populate the next dynamic token. */
7737 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
7738 cmdline_parse_token_hdr_t **hdr_inst)
7740 struct context *ctx = &cmd_flow_context;
7742 /* Always reinitialize context before requesting the first token. */
7743 if (!(hdr_inst - cmd_set_raw.tokens)) {
7744 cmd_flow_context_init(ctx);
7745 ctx->curr = START_SET;
7747 /* Return NULL when no more tokens are expected. */
7748 if (!ctx->next_num && (ctx->curr != START_SET)) {
7752 /* Determine if command should end here. */
7753 if (ctx->eol && ctx->last && ctx->next_num) {
7754 const enum index *list = ctx->next[ctx->next_num - 1];
7757 for (i = 0; list[i]; ++i) {
7764 *hdr = &cmd_set_raw_token_hdr;
7767 /** Token generator and output processing callback (cmdline API). */
7769 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
7772 cmd_set_raw_tok(arg0, arg2);
7774 cmd_set_raw_parsed(arg0);
7777 /** Global parser instance (cmdline API). */
7778 cmdline_parse_inst_t cmd_set_raw = {
7779 .f = cmd_set_raw_cb,
7780 .data = NULL, /**< Unused. */
7781 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
7784 }, /**< Tokens are returned by cmd_flow_tok(). */
7787 /* *** display raw_encap/raw_decap buf */
7788 struct cmd_show_set_raw_result {
7789 cmdline_fixed_string_t cmd_show;
7790 cmdline_fixed_string_t cmd_what;
7791 cmdline_fixed_string_t cmd_all;
7796 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
7798 struct cmd_show_set_raw_result *res = parsed_result;
7799 uint16_t index = res->cmd_index;
7801 uint8_t *raw_data = NULL;
7802 size_t raw_size = 0;
7803 char title[16] = {0};
7807 if (!strcmp(res->cmd_all, "all")) {
7810 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
7811 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
7815 if (!strcmp(res->cmd_what, "raw_encap")) {
7816 raw_data = (uint8_t *)&raw_encap_confs[index].data;
7817 raw_size = raw_encap_confs[index].size;
7818 snprintf(title, 16, "\nindex: %u", index);
7819 rte_hexdump(stdout, title, raw_data, raw_size);
7821 raw_data = (uint8_t *)&raw_decap_confs[index].data;
7822 raw_size = raw_decap_confs[index].size;
7823 snprintf(title, 16, "\nindex: %u", index);
7824 rte_hexdump(stdout, title, raw_data, raw_size);
7826 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
7829 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
7830 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7832 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
7833 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7834 cmd_what, "raw_encap#raw_decap");
7835 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
7836 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
7838 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
7839 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7841 cmdline_parse_inst_t cmd_show_set_raw = {
7842 .f = cmd_show_set_raw_parsed,
7844 .help_str = "show <raw_encap|raw_decap> <index>",
7846 (void *)&cmd_show_set_raw_cmd_show,
7847 (void *)&cmd_show_set_raw_cmd_what,
7848 (void *)&cmd_show_set_raw_cmd_index,
7852 cmdline_parse_inst_t cmd_show_set_raw_all = {
7853 .f = cmd_show_set_raw_parsed,
7855 .help_str = "show <raw_encap|raw_decap> all",
7857 (void *)&cmd_show_set_raw_cmd_show,
7858 (void *)&cmd_show_set_raw_cmd_what,
7859 (void *)&cmd_show_set_raw_cmd_all,