1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
26 #include <rte_vxlan.h>
30 #include <rte_geneve.h>
34 /** Parser token indices. */
59 /* Top-level command. */
61 /* Sub-leve commands. */
68 /* Top-level command. */
70 /* Sub-level commands. */
83 /* Tunnel arguments. */
90 /* Destroy arguments. */
93 /* Query arguments. */
99 /* Destroy aged flow arguments. */
102 /* Validate/create arguments. */
111 /* Shared action arguments */
112 SHARED_ACTION_CREATE,
113 SHARED_ACTION_UPDATE,
114 SHARED_ACTION_DESTROY,
117 /* Shared action create arguments */
118 SHARED_ACTION_CREATE_ID,
119 SHARED_ACTION_INGRESS,
120 SHARED_ACTION_EGRESS,
121 SHARED_ACTION_TRANSFER,
124 /* Shared action destroy arguments */
125 SHARED_ACTION_DESTROY_ID,
127 /* Validate/create pattern. */
165 ITEM_VLAN_INNER_TYPE,
166 ITEM_VLAN_HAS_MORE_VLAN,
169 ITEM_IPV4_FRAGMENT_OFFSET,
181 ITEM_IPV6_HAS_FRAG_EXT,
202 ITEM_E_TAG_GRP_ECID_B,
211 ITEM_GRE_C_RSVD0_VER,
229 ITEM_ARP_ETH_IPV4_SHA,
230 ITEM_ARP_ETH_IPV4_SPA,
231 ITEM_ARP_ETH_IPV4_THA,
232 ITEM_ARP_ETH_IPV4_TPA,
234 ITEM_IPV6_EXT_NEXT_HDR,
236 ITEM_IPV6_FRAG_EXT_NEXT_HDR,
237 ITEM_IPV6_FRAG_EXT_FRAG_DATA,
242 ITEM_ICMP6_ND_NS_TARGET_ADDR,
244 ITEM_ICMP6_ND_NA_TARGET_ADDR,
246 ITEM_ICMP6_ND_OPT_TYPE,
247 ITEM_ICMP6_ND_OPT_SLA_ETH,
248 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
249 ITEM_ICMP6_ND_OPT_TLA_ETH,
250 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
263 ITEM_HIGIG2_CLASSIFICATION,
269 ITEM_L2TPV3OIP_SESSION_ID,
279 ITEM_ECPRI_COMMON_TYPE,
280 ITEM_ECPRI_COMMON_TYPE_IQ_DATA,
281 ITEM_ECPRI_COMMON_TYPE_RTC_CTRL,
282 ITEM_ECPRI_COMMON_TYPE_DLY_MSR,
283 ITEM_ECPRI_MSG_IQ_DATA_PCID,
284 ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
285 ITEM_ECPRI_MSG_DLY_MSR_MSRID,
287 /* Validate/create actions. */
307 ACTION_RSS_FUNC_DEFAULT,
308 ACTION_RSS_FUNC_TOEPLITZ,
309 ACTION_RSS_FUNC_SIMPLE_XOR,
310 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
322 ACTION_PHY_PORT_ORIGINAL,
323 ACTION_PHY_PORT_INDEX,
325 ACTION_PORT_ID_ORIGINAL,
329 ACTION_OF_SET_MPLS_TTL,
330 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
331 ACTION_OF_DEC_MPLS_TTL,
332 ACTION_OF_SET_NW_TTL,
333 ACTION_OF_SET_NW_TTL_NW_TTL,
334 ACTION_OF_DEC_NW_TTL,
335 ACTION_OF_COPY_TTL_OUT,
336 ACTION_OF_COPY_TTL_IN,
339 ACTION_OF_PUSH_VLAN_ETHERTYPE,
340 ACTION_OF_SET_VLAN_VID,
341 ACTION_OF_SET_VLAN_VID_VLAN_VID,
342 ACTION_OF_SET_VLAN_PCP,
343 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
345 ACTION_OF_POP_MPLS_ETHERTYPE,
347 ACTION_OF_PUSH_MPLS_ETHERTYPE,
354 ACTION_MPLSOGRE_ENCAP,
355 ACTION_MPLSOGRE_DECAP,
356 ACTION_MPLSOUDP_ENCAP,
357 ACTION_MPLSOUDP_DECAP,
359 ACTION_SET_IPV4_SRC_IPV4_SRC,
361 ACTION_SET_IPV4_DST_IPV4_DST,
363 ACTION_SET_IPV6_SRC_IPV6_SRC,
365 ACTION_SET_IPV6_DST_IPV6_DST,
367 ACTION_SET_TP_SRC_TP_SRC,
369 ACTION_SET_TP_DST_TP_DST,
375 ACTION_SET_MAC_SRC_MAC_SRC,
377 ACTION_SET_MAC_DST_MAC_DST,
379 ACTION_INC_TCP_SEQ_VALUE,
381 ACTION_DEC_TCP_SEQ_VALUE,
383 ACTION_INC_TCP_ACK_VALUE,
385 ACTION_DEC_TCP_ACK_VALUE,
388 ACTION_RAW_ENCAP_INDEX,
389 ACTION_RAW_ENCAP_INDEX_VALUE,
390 ACTION_RAW_DECAP_INDEX,
391 ACTION_RAW_DECAP_INDEX_VALUE,
394 ACTION_SET_TAG_INDEX,
397 ACTION_SET_META_DATA,
398 ACTION_SET_META_MASK,
399 ACTION_SET_IPV4_DSCP,
400 ACTION_SET_IPV4_DSCP_VALUE,
401 ACTION_SET_IPV6_DSCP,
402 ACTION_SET_IPV6_DSCP_VALUE,
408 ACTION_SAMPLE_INDEX_VALUE,
410 SHARED_ACTION_ID2PTR,
413 /** Maximum size for pattern in struct rte_flow_item_raw. */
414 #define ITEM_RAW_PATTERN_SIZE 40
416 /** Storage size for struct rte_flow_item_raw including pattern. */
417 #define ITEM_RAW_SIZE \
418 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
420 /** Maximum number of queue indices in struct rte_flow_action_rss. */
421 #define ACTION_RSS_QUEUE_NUM 128
423 /** Storage for struct rte_flow_action_rss including external data. */
424 struct action_rss_data {
425 struct rte_flow_action_rss conf;
426 uint8_t key[RSS_HASH_KEY_LENGTH];
427 uint16_t queue[ACTION_RSS_QUEUE_NUM];
430 /** Maximum data size in struct rte_flow_action_raw_encap. */
431 #define ACTION_RAW_ENCAP_MAX_DATA 128
432 #define RAW_ENCAP_CONFS_MAX_NUM 8
434 /** Storage for struct rte_flow_action_raw_encap. */
435 struct raw_encap_conf {
436 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
437 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
441 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
443 /** Storage for struct rte_flow_action_raw_encap including external data. */
444 struct action_raw_encap_data {
445 struct rte_flow_action_raw_encap conf;
446 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
447 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
451 /** Storage for struct rte_flow_action_raw_decap. */
452 struct raw_decap_conf {
453 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
457 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
459 /** Storage for struct rte_flow_action_raw_decap including external data. */
460 struct action_raw_decap_data {
461 struct rte_flow_action_raw_decap conf;
462 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
466 struct vxlan_encap_conf vxlan_encap_conf = {
470 .vni = "\x00\x00\x00",
472 .udp_dst = RTE_BE16(RTE_VXLAN_DEFAULT_PORT),
473 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
474 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
475 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
476 "\x00\x00\x00\x00\x00\x00\x00\x01",
477 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
478 "\x00\x00\x00\x00\x00\x00\x11\x11",
482 .eth_src = "\x00\x00\x00\x00\x00\x00",
483 .eth_dst = "\xff\xff\xff\xff\xff\xff",
486 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
487 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
489 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
490 struct action_vxlan_encap_data {
491 struct rte_flow_action_vxlan_encap conf;
492 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
493 struct rte_flow_item_eth item_eth;
494 struct rte_flow_item_vlan item_vlan;
496 struct rte_flow_item_ipv4 item_ipv4;
497 struct rte_flow_item_ipv6 item_ipv6;
499 struct rte_flow_item_udp item_udp;
500 struct rte_flow_item_vxlan item_vxlan;
503 struct nvgre_encap_conf nvgre_encap_conf = {
506 .tni = "\x00\x00\x00",
507 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
508 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
509 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
510 "\x00\x00\x00\x00\x00\x00\x00\x01",
511 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
512 "\x00\x00\x00\x00\x00\x00\x11\x11",
514 .eth_src = "\x00\x00\x00\x00\x00\x00",
515 .eth_dst = "\xff\xff\xff\xff\xff\xff",
518 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
519 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
521 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
522 struct action_nvgre_encap_data {
523 struct rte_flow_action_nvgre_encap conf;
524 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
525 struct rte_flow_item_eth item_eth;
526 struct rte_flow_item_vlan item_vlan;
528 struct rte_flow_item_ipv4 item_ipv4;
529 struct rte_flow_item_ipv6 item_ipv6;
531 struct rte_flow_item_nvgre item_nvgre;
534 struct l2_encap_conf l2_encap_conf;
536 struct l2_decap_conf l2_decap_conf;
538 struct mplsogre_encap_conf mplsogre_encap_conf;
540 struct mplsogre_decap_conf mplsogre_decap_conf;
542 struct mplsoudp_encap_conf mplsoudp_encap_conf;
544 struct mplsoudp_decap_conf mplsoudp_decap_conf;
546 #define ACTION_SAMPLE_ACTIONS_NUM 10
547 #define RAW_SAMPLE_CONFS_MAX_NUM 8
548 /** Storage for struct rte_flow_action_sample including external data. */
549 struct action_sample_data {
550 struct rte_flow_action_sample conf;
553 /** Storage for struct rte_flow_action_sample. */
554 struct raw_sample_conf {
555 struct rte_flow_action data[ACTION_SAMPLE_ACTIONS_NUM];
557 struct raw_sample_conf raw_sample_confs[RAW_SAMPLE_CONFS_MAX_NUM];
558 struct rte_flow_action_mark sample_mark[RAW_SAMPLE_CONFS_MAX_NUM];
559 struct rte_flow_action_queue sample_queue[RAW_SAMPLE_CONFS_MAX_NUM];
560 struct rte_flow_action_count sample_count[RAW_SAMPLE_CONFS_MAX_NUM];
561 struct rte_flow_action_port_id sample_port_id[RAW_SAMPLE_CONFS_MAX_NUM];
562 struct rte_flow_action_raw_encap sample_encap[RAW_SAMPLE_CONFS_MAX_NUM];
564 /** Maximum number of subsequent tokens and arguments on the stack. */
565 #define CTX_STACK_SIZE 16
567 /** Parser context. */
569 /** Stack of subsequent token lists to process. */
570 const enum index *next[CTX_STACK_SIZE];
571 /** Arguments for stacked tokens. */
572 const void *args[CTX_STACK_SIZE];
573 enum index curr; /**< Current token index. */
574 enum index prev; /**< Index of the last token seen. */
575 int next_num; /**< Number of entries in next[]. */
576 int args_num; /**< Number of entries in args[]. */
577 uint32_t eol:1; /**< EOL has been detected. */
578 uint32_t last:1; /**< No more arguments. */
579 portid_t port; /**< Current port ID (for completions). */
580 uint32_t objdata; /**< Object-specific data. */
581 void *object; /**< Address of current object for relative offsets. */
582 void *objmask; /**< Object a full mask must be written to. */
585 /** Token argument. */
587 uint32_t hton:1; /**< Use network byte ordering. */
588 uint32_t sign:1; /**< Value is signed. */
589 uint32_t bounded:1; /**< Value is bounded. */
590 uintmax_t min; /**< Minimum value if bounded. */
591 uintmax_t max; /**< Maximum value if bounded. */
592 uint32_t offset; /**< Relative offset from ctx->object. */
593 uint32_t size; /**< Field size. */
594 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
597 /** Parser token definition. */
599 /** Type displayed during completion (defaults to "TOKEN"). */
601 /** Help displayed during completion (defaults to token name). */
603 /** Private data used by parser functions. */
606 * Lists of subsequent tokens to push on the stack. Each call to the
607 * parser consumes the last entry of that stack.
609 const enum index *const *next;
610 /** Arguments stack for subsequent tokens that need them. */
611 const struct arg *const *args;
613 * Token-processing callback, returns -1 in case of error, the
614 * length of the matched string otherwise. If NULL, attempts to
615 * match the token name.
617 * If buf is not NULL, the result should be stored in it according
618 * to context. An error is returned if not large enough.
620 int (*call)(struct context *ctx, const struct token *token,
621 const char *str, unsigned int len,
622 void *buf, unsigned int size);
624 * Callback that provides possible values for this token, used for
625 * completion. Returns -1 in case of error, the number of possible
626 * values otherwise. If NULL, the token name is used.
628 * If buf is not NULL, entry index ent is written to buf and the
629 * full length of the entry is returned (same behavior as
632 int (*comp)(struct context *ctx, const struct token *token,
633 unsigned int ent, char *buf, unsigned int size);
634 /** Mandatory token name, no default value. */
638 /** Static initializer for the next field. */
639 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
641 /** Static initializer for a NEXT() entry. */
642 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
644 /** Static initializer for the args field. */
645 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
647 /** Static initializer for ARGS() to target a field. */
648 #define ARGS_ENTRY(s, f) \
649 (&(const struct arg){ \
650 .offset = offsetof(s, f), \
651 .size = sizeof(((s *)0)->f), \
654 /** Static initializer for ARGS() to target a bit-field. */
655 #define ARGS_ENTRY_BF(s, f, b) \
656 (&(const struct arg){ \
658 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
661 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
662 #define ARGS_ENTRY_MASK(s, f, m) \
663 (&(const struct arg){ \
664 .offset = offsetof(s, f), \
665 .size = sizeof(((s *)0)->f), \
666 .mask = (const void *)(m), \
669 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
670 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
671 (&(const struct arg){ \
673 .offset = offsetof(s, f), \
674 .size = sizeof(((s *)0)->f), \
675 .mask = (const void *)(m), \
678 /** Static initializer for ARGS() to target a pointer. */
679 #define ARGS_ENTRY_PTR(s, f) \
680 (&(const struct arg){ \
681 .size = sizeof(*((s *)0)->f), \
684 /** Static initializer for ARGS() with arbitrary offset and size. */
685 #define ARGS_ENTRY_ARB(o, s) \
686 (&(const struct arg){ \
691 /** Same as ARGS_ENTRY_ARB() with bounded values. */
692 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
693 (&(const struct arg){ \
701 /** Same as ARGS_ENTRY() using network byte ordering. */
702 #define ARGS_ENTRY_HTON(s, f) \
703 (&(const struct arg){ \
705 .offset = offsetof(s, f), \
706 .size = sizeof(((s *)0)->f), \
709 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
710 #define ARG_ENTRY_HTON(s) \
711 (&(const struct arg){ \
717 /** Parser output buffer layout expected by cmd_flow_parsed(). */
719 enum index command; /**< Flow command. */
720 portid_t port; /**< Affected port ID. */
724 uint32_t action_id_n;
725 } sa_destroy; /**< Shared action destroy arguments. */
728 } sa; /* Shared action query arguments */
730 struct rte_flow_attr attr;
731 struct tunnel_ops tunnel_ops;
732 struct rte_flow_item *pattern;
733 struct rte_flow_action *actions;
737 } vc; /**< Validate/create arguments. */
741 } destroy; /**< Destroy arguments. */
744 } dump; /**< Dump arguments. */
747 struct rte_flow_action action;
748 } query; /**< Query arguments. */
752 } list; /**< List arguments. */
755 } isolate; /**< Isolated mode arguments. */
758 } aged; /**< Aged arguments. */
759 } args; /**< Command arguments. */
762 /** Private data for pattern items. */
763 struct parse_item_priv {
764 enum rte_flow_item_type type; /**< Item type. */
765 uint32_t size; /**< Size of item specification structure. */
768 #define PRIV_ITEM(t, s) \
769 (&(const struct parse_item_priv){ \
770 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
774 /** Private data for actions. */
775 struct parse_action_priv {
776 enum rte_flow_action_type type; /**< Action type. */
777 uint32_t size; /**< Size of action configuration structure. */
780 #define PRIV_ACTION(t, s) \
781 (&(const struct parse_action_priv){ \
782 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
786 static const enum index next_sa_create_attr[] = {
787 SHARED_ACTION_CREATE_ID,
788 SHARED_ACTION_INGRESS,
789 SHARED_ACTION_EGRESS,
790 SHARED_ACTION_TRANSFER,
795 static const enum index next_sa_subcmd[] = {
796 SHARED_ACTION_CREATE,
797 SHARED_ACTION_UPDATE,
798 SHARED_ACTION_DESTROY,
803 static const enum index next_vc_attr[] = {
815 static const enum index next_destroy_attr[] = {
821 static const enum index next_dump_attr[] = {
827 static const enum index next_list_attr[] = {
833 static const enum index next_aged_attr[] = {
839 static const enum index next_sa_destroy_attr[] = {
840 SHARED_ACTION_DESTROY_ID,
845 static const enum index item_param[] = {
854 static const enum index next_item[] = {
891 ITEM_ICMP6_ND_OPT_SLA_ETH,
892 ITEM_ICMP6_ND_OPT_TLA_ETH,
910 static const enum index item_fuzzy[] = {
916 static const enum index item_any[] = {
922 static const enum index item_vf[] = {
928 static const enum index item_phy_port[] = {
934 static const enum index item_port_id[] = {
940 static const enum index item_mark[] = {
946 static const enum index item_raw[] = {
956 static const enum index item_eth[] = {
965 static const enum index item_vlan[] = {
970 ITEM_VLAN_INNER_TYPE,
971 ITEM_VLAN_HAS_MORE_VLAN,
976 static const enum index item_ipv4[] = {
978 ITEM_IPV4_FRAGMENT_OFFSET,
987 static const enum index item_ipv6[] = {
994 ITEM_IPV6_HAS_FRAG_EXT,
999 static const enum index item_icmp[] = {
1008 static const enum index item_udp[] = {
1015 static const enum index item_tcp[] = {
1023 static const enum index item_sctp[] = {
1032 static const enum index item_vxlan[] = {
1038 static const enum index item_e_tag[] = {
1039 ITEM_E_TAG_GRP_ECID_B,
1044 static const enum index item_nvgre[] = {
1050 static const enum index item_mpls[] = {
1058 static const enum index item_gre[] = {
1060 ITEM_GRE_C_RSVD0_VER,
1068 static const enum index item_gre_key[] = {
1074 static const enum index item_gtp[] = {
1082 static const enum index item_geneve[] = {
1089 static const enum index item_vxlan_gpe[] = {
1095 static const enum index item_arp_eth_ipv4[] = {
1096 ITEM_ARP_ETH_IPV4_SHA,
1097 ITEM_ARP_ETH_IPV4_SPA,
1098 ITEM_ARP_ETH_IPV4_THA,
1099 ITEM_ARP_ETH_IPV4_TPA,
1104 static const enum index item_ipv6_ext[] = {
1105 ITEM_IPV6_EXT_NEXT_HDR,
1110 static const enum index item_ipv6_frag_ext[] = {
1111 ITEM_IPV6_FRAG_EXT_NEXT_HDR,
1112 ITEM_IPV6_FRAG_EXT_FRAG_DATA,
1117 static const enum index item_icmp6[] = {
1124 static const enum index item_icmp6_nd_ns[] = {
1125 ITEM_ICMP6_ND_NS_TARGET_ADDR,
1130 static const enum index item_icmp6_nd_na[] = {
1131 ITEM_ICMP6_ND_NA_TARGET_ADDR,
1136 static const enum index item_icmp6_nd_opt[] = {
1137 ITEM_ICMP6_ND_OPT_TYPE,
1142 static const enum index item_icmp6_nd_opt_sla_eth[] = {
1143 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
1148 static const enum index item_icmp6_nd_opt_tla_eth[] = {
1149 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
1154 static const enum index item_meta[] = {
1160 static const enum index item_gtp_psc[] = {
1167 static const enum index item_pppoed[] = {
1173 static const enum index item_pppoes[] = {
1179 static const enum index item_pppoe_proto_id[] = {
1184 static const enum index item_higig2[] = {
1185 ITEM_HIGIG2_CLASSIFICATION,
1191 static const enum index item_esp[] = {
1197 static const enum index item_ah[] = {
1203 static const enum index item_pfcp[] = {
1210 static const enum index next_set_raw[] = {
1216 static const enum index item_tag[] = {
1223 static const enum index item_l2tpv3oip[] = {
1224 ITEM_L2TPV3OIP_SESSION_ID,
1229 static const enum index item_ecpri[] = {
1235 static const enum index item_ecpri_common[] = {
1236 ITEM_ECPRI_COMMON_TYPE,
1240 static const enum index item_ecpri_common_type[] = {
1241 ITEM_ECPRI_COMMON_TYPE_IQ_DATA,
1242 ITEM_ECPRI_COMMON_TYPE_RTC_CTRL,
1243 ITEM_ECPRI_COMMON_TYPE_DLY_MSR,
1247 static const enum index next_action[] = {
1263 ACTION_OF_SET_MPLS_TTL,
1264 ACTION_OF_DEC_MPLS_TTL,
1265 ACTION_OF_SET_NW_TTL,
1266 ACTION_OF_DEC_NW_TTL,
1267 ACTION_OF_COPY_TTL_OUT,
1268 ACTION_OF_COPY_TTL_IN,
1270 ACTION_OF_PUSH_VLAN,
1271 ACTION_OF_SET_VLAN_VID,
1272 ACTION_OF_SET_VLAN_PCP,
1274 ACTION_OF_PUSH_MPLS,
1281 ACTION_MPLSOGRE_ENCAP,
1282 ACTION_MPLSOGRE_DECAP,
1283 ACTION_MPLSOUDP_ENCAP,
1284 ACTION_MPLSOUDP_DECAP,
1285 ACTION_SET_IPV4_SRC,
1286 ACTION_SET_IPV4_DST,
1287 ACTION_SET_IPV6_SRC,
1288 ACTION_SET_IPV6_DST,
1304 ACTION_SET_IPV4_DSCP,
1305 ACTION_SET_IPV6_DSCP,
1312 static const enum index action_mark[] = {
1318 static const enum index action_queue[] = {
1324 static const enum index action_count[] = {
1326 ACTION_COUNT_SHARED,
1331 static const enum index action_rss[] = {
1342 static const enum index action_vf[] = {
1349 static const enum index action_phy_port[] = {
1350 ACTION_PHY_PORT_ORIGINAL,
1351 ACTION_PHY_PORT_INDEX,
1356 static const enum index action_port_id[] = {
1357 ACTION_PORT_ID_ORIGINAL,
1363 static const enum index action_meter[] = {
1369 static const enum index action_of_set_mpls_ttl[] = {
1370 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1375 static const enum index action_of_set_nw_ttl[] = {
1376 ACTION_OF_SET_NW_TTL_NW_TTL,
1381 static const enum index action_of_push_vlan[] = {
1382 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1387 static const enum index action_of_set_vlan_vid[] = {
1388 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1393 static const enum index action_of_set_vlan_pcp[] = {
1394 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1399 static const enum index action_of_pop_mpls[] = {
1400 ACTION_OF_POP_MPLS_ETHERTYPE,
1405 static const enum index action_of_push_mpls[] = {
1406 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1411 static const enum index action_set_ipv4_src[] = {
1412 ACTION_SET_IPV4_SRC_IPV4_SRC,
1417 static const enum index action_set_mac_src[] = {
1418 ACTION_SET_MAC_SRC_MAC_SRC,
1423 static const enum index action_set_ipv4_dst[] = {
1424 ACTION_SET_IPV4_DST_IPV4_DST,
1429 static const enum index action_set_ipv6_src[] = {
1430 ACTION_SET_IPV6_SRC_IPV6_SRC,
1435 static const enum index action_set_ipv6_dst[] = {
1436 ACTION_SET_IPV6_DST_IPV6_DST,
1441 static const enum index action_set_tp_src[] = {
1442 ACTION_SET_TP_SRC_TP_SRC,
1447 static const enum index action_set_tp_dst[] = {
1448 ACTION_SET_TP_DST_TP_DST,
1453 static const enum index action_set_ttl[] = {
1459 static const enum index action_jump[] = {
1465 static const enum index action_set_mac_dst[] = {
1466 ACTION_SET_MAC_DST_MAC_DST,
1471 static const enum index action_inc_tcp_seq[] = {
1472 ACTION_INC_TCP_SEQ_VALUE,
1477 static const enum index action_dec_tcp_seq[] = {
1478 ACTION_DEC_TCP_SEQ_VALUE,
1483 static const enum index action_inc_tcp_ack[] = {
1484 ACTION_INC_TCP_ACK_VALUE,
1489 static const enum index action_dec_tcp_ack[] = {
1490 ACTION_DEC_TCP_ACK_VALUE,
1495 static const enum index action_raw_encap[] = {
1496 ACTION_RAW_ENCAP_INDEX,
1501 static const enum index action_raw_decap[] = {
1502 ACTION_RAW_DECAP_INDEX,
1507 static const enum index action_set_tag[] = {
1508 ACTION_SET_TAG_DATA,
1509 ACTION_SET_TAG_INDEX,
1510 ACTION_SET_TAG_MASK,
1515 static const enum index action_set_meta[] = {
1516 ACTION_SET_META_DATA,
1517 ACTION_SET_META_MASK,
1522 static const enum index action_set_ipv4_dscp[] = {
1523 ACTION_SET_IPV4_DSCP_VALUE,
1528 static const enum index action_set_ipv6_dscp[] = {
1529 ACTION_SET_IPV6_DSCP_VALUE,
1534 static const enum index action_age[] = {
1541 static const enum index action_sample[] = {
1543 ACTION_SAMPLE_RATIO,
1544 ACTION_SAMPLE_INDEX,
1549 static const enum index next_action_sample[] = {
1559 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1560 const char *, unsigned int,
1561 void *, unsigned int);
1562 static int parse_set_sample_action(struct context *, const struct token *,
1563 const char *, unsigned int,
1564 void *, unsigned int);
1565 static int parse_set_init(struct context *, const struct token *,
1566 const char *, unsigned int,
1567 void *, unsigned int);
1568 static int parse_init(struct context *, const struct token *,
1569 const char *, unsigned int,
1570 void *, unsigned int);
1571 static int parse_vc(struct context *, const struct token *,
1572 const char *, unsigned int,
1573 void *, unsigned int);
1574 static int parse_vc_spec(struct context *, const struct token *,
1575 const char *, unsigned int, void *, unsigned int);
1576 static int parse_vc_conf(struct context *, const struct token *,
1577 const char *, unsigned int, void *, unsigned int);
1578 static int parse_vc_item_ecpri_type(struct context *, const struct token *,
1579 const char *, unsigned int,
1580 void *, unsigned int);
1581 static int parse_vc_action_rss(struct context *, const struct token *,
1582 const char *, unsigned int, void *,
1584 static int parse_vc_action_rss_func(struct context *, const struct token *,
1585 const char *, unsigned int, void *,
1587 static int parse_vc_action_rss_type(struct context *, const struct token *,
1588 const char *, unsigned int, void *,
1590 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1591 const char *, unsigned int, void *,
1593 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1594 const char *, unsigned int, void *,
1596 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1597 const char *, unsigned int, void *,
1599 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1600 const char *, unsigned int, void *,
1602 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1603 const char *, unsigned int, void *,
1605 static int parse_vc_action_mplsogre_encap(struct context *,
1606 const struct token *, const char *,
1607 unsigned int, void *, unsigned int);
1608 static int parse_vc_action_mplsogre_decap(struct context *,
1609 const struct token *, const char *,
1610 unsigned int, void *, unsigned int);
1611 static int parse_vc_action_mplsoudp_encap(struct context *,
1612 const struct token *, const char *,
1613 unsigned int, void *, unsigned int);
1614 static int parse_vc_action_mplsoudp_decap(struct context *,
1615 const struct token *, const char *,
1616 unsigned int, void *, unsigned int);
1617 static int parse_vc_action_raw_encap(struct context *,
1618 const struct token *, const char *,
1619 unsigned int, void *, unsigned int);
1620 static int parse_vc_action_raw_decap(struct context *,
1621 const struct token *, const char *,
1622 unsigned int, void *, unsigned int);
1623 static int parse_vc_action_raw_encap_index(struct context *,
1624 const struct token *, const char *,
1625 unsigned int, void *, unsigned int);
1626 static int parse_vc_action_raw_decap_index(struct context *,
1627 const struct token *, const char *,
1628 unsigned int, void *, unsigned int);
1629 static int parse_vc_action_set_meta(struct context *ctx,
1630 const struct token *token, const char *str,
1631 unsigned int len, void *buf,
1633 static int parse_vc_action_sample(struct context *ctx,
1634 const struct token *token, const char *str,
1635 unsigned int len, void *buf,
1638 parse_vc_action_sample_index(struct context *ctx, const struct token *token,
1639 const char *str, unsigned int len, void *buf,
1641 static int parse_destroy(struct context *, const struct token *,
1642 const char *, unsigned int,
1643 void *, unsigned int);
1644 static int parse_flush(struct context *, const struct token *,
1645 const char *, unsigned int,
1646 void *, unsigned int);
1647 static int parse_dump(struct context *, const struct token *,
1648 const char *, unsigned int,
1649 void *, unsigned int);
1650 static int parse_query(struct context *, const struct token *,
1651 const char *, unsigned int,
1652 void *, unsigned int);
1653 static int parse_action(struct context *, const struct token *,
1654 const char *, unsigned int,
1655 void *, unsigned int);
1656 static int parse_list(struct context *, const struct token *,
1657 const char *, unsigned int,
1658 void *, unsigned int);
1659 static int parse_aged(struct context *, const struct token *,
1660 const char *, unsigned int,
1661 void *, unsigned int);
1662 static int parse_isolate(struct context *, const struct token *,
1663 const char *, unsigned int,
1664 void *, unsigned int);
1665 static int parse_tunnel(struct context *, const struct token *,
1666 const char *, unsigned int,
1667 void *, unsigned int);
1668 static int parse_int(struct context *, const struct token *,
1669 const char *, unsigned int,
1670 void *, unsigned int);
1671 static int parse_prefix(struct context *, const struct token *,
1672 const char *, unsigned int,
1673 void *, unsigned int);
1674 static int parse_boolean(struct context *, const struct token *,
1675 const char *, unsigned int,
1676 void *, unsigned int);
1677 static int parse_string(struct context *, const struct token *,
1678 const char *, unsigned int,
1679 void *, unsigned int);
1680 static int parse_hex(struct context *ctx, const struct token *token,
1681 const char *str, unsigned int len,
1682 void *buf, unsigned int size);
1683 static int parse_string0(struct context *, const struct token *,
1684 const char *, unsigned int,
1685 void *, unsigned int);
1686 static int parse_mac_addr(struct context *, const struct token *,
1687 const char *, unsigned int,
1688 void *, unsigned int);
1689 static int parse_ipv4_addr(struct context *, const struct token *,
1690 const char *, unsigned int,
1691 void *, unsigned int);
1692 static int parse_ipv6_addr(struct context *, const struct token *,
1693 const char *, unsigned int,
1694 void *, unsigned int);
1695 static int parse_port(struct context *, const struct token *,
1696 const char *, unsigned int,
1697 void *, unsigned int);
1698 static int parse_sa(struct context *, const struct token *,
1699 const char *, unsigned int,
1700 void *, unsigned int);
1701 static int parse_sa_destroy(struct context *ctx, const struct token *token,
1702 const char *str, unsigned int len,
1703 void *buf, unsigned int size);
1704 static int parse_sa_id2ptr(struct context *ctx, const struct token *token,
1705 const char *str, unsigned int len, void *buf,
1707 static int comp_none(struct context *, const struct token *,
1708 unsigned int, char *, unsigned int);
1709 static int comp_boolean(struct context *, const struct token *,
1710 unsigned int, char *, unsigned int);
1711 static int comp_action(struct context *, const struct token *,
1712 unsigned int, char *, unsigned int);
1713 static int comp_port(struct context *, const struct token *,
1714 unsigned int, char *, unsigned int);
1715 static int comp_rule_id(struct context *, const struct token *,
1716 unsigned int, char *, unsigned int);
1717 static int comp_vc_action_rss_type(struct context *, const struct token *,
1718 unsigned int, char *, unsigned int);
1719 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1720 unsigned int, char *, unsigned int);
1721 static int comp_set_raw_index(struct context *, const struct token *,
1722 unsigned int, char *, unsigned int);
1723 static int comp_set_sample_index(struct context *, const struct token *,
1724 unsigned int, char *, unsigned int);
1726 /** Token definitions. */
1727 static const struct token token_list[] = {
1728 /* Special tokens. */
1731 .help = "null entry, abused as the entry point",
1732 .next = NEXT(NEXT_ENTRY(FLOW)),
1737 .help = "command may end here",
1740 .name = "START_SET",
1741 .help = "null entry, abused as the entry point for set",
1742 .next = NEXT(NEXT_ENTRY(SET)),
1747 .help = "set command may end here",
1749 /* Common tokens. */
1753 .help = "integer value",
1758 .name = "{unsigned}",
1760 .help = "unsigned integer value",
1767 .help = "prefix length for bit-mask",
1768 .call = parse_prefix,
1772 .name = "{boolean}",
1774 .help = "any boolean value",
1775 .call = parse_boolean,
1776 .comp = comp_boolean,
1781 .help = "fixed string",
1782 .call = parse_string,
1788 .help = "fixed string",
1792 .name = "{file path}",
1794 .help = "file path",
1795 .call = parse_string0,
1799 .name = "{MAC address}",
1801 .help = "standard MAC address notation",
1802 .call = parse_mac_addr,
1806 .name = "{IPv4 address}",
1807 .type = "IPV4 ADDRESS",
1808 .help = "standard IPv4 address notation",
1809 .call = parse_ipv4_addr,
1813 .name = "{IPv6 address}",
1814 .type = "IPV6 ADDRESS",
1815 .help = "standard IPv6 address notation",
1816 .call = parse_ipv6_addr,
1820 .name = "{rule id}",
1822 .help = "rule identifier",
1824 .comp = comp_rule_id,
1827 .name = "{port_id}",
1829 .help = "port identifier",
1834 .name = "{group_id}",
1836 .help = "group identifier",
1840 [PRIORITY_LEVEL] = {
1843 .help = "priority level",
1847 [SHARED_ACTION_ID] = {
1848 .name = "{shared_action_id}",
1849 .type = "SHARED_ACTION_ID",
1850 .help = "shared action id",
1854 /* Top-level command. */
1857 .type = "{command} {port_id} [{arg} [...]]",
1858 .help = "manage ingress/egress flow rules",
1859 .next = NEXT(NEXT_ENTRY
1873 /* Top-level command. */
1875 .name = "shared_action",
1876 .type = "{command} {port_id} [{arg} [...]]",
1877 .help = "manage shared actions",
1878 .next = NEXT(next_sa_subcmd, NEXT_ENTRY(PORT_ID)),
1879 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1882 /* Sub-level commands. */
1883 [SHARED_ACTION_CREATE] = {
1885 .help = "create shared action",
1886 .next = NEXT(next_sa_create_attr),
1889 [SHARED_ACTION_UPDATE] = {
1891 .help = "update shared action",
1892 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_SPEC),
1893 NEXT_ENTRY(SHARED_ACTION_ID)),
1894 .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
1897 [SHARED_ACTION_DESTROY] = {
1899 .help = "destroy shared action",
1900 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_DESTROY_ID)),
1901 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1902 .call = parse_sa_destroy,
1904 [SHARED_ACTION_QUERY] = {
1906 .help = "query shared action",
1907 .next = NEXT(NEXT_ENTRY(END), NEXT_ENTRY(SHARED_ACTION_ID)),
1908 .args = ARGS(ARGS_ENTRY(struct buffer, args.sa.action_id)),
1913 .help = "check whether a flow rule can be created",
1914 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1915 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1920 .help = "create a flow rule",
1921 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1922 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1927 .help = "destroy specific flow rules",
1928 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1929 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1930 .call = parse_destroy,
1934 .help = "destroy all flow rules",
1935 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1936 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1937 .call = parse_flush,
1941 .help = "dump all flow rules to file",
1942 .next = NEXT(next_dump_attr, NEXT_ENTRY(PORT_ID)),
1943 .args = ARGS(ARGS_ENTRY(struct buffer, args.dump.file),
1944 ARGS_ENTRY(struct buffer, port)),
1949 .help = "query an existing flow rule",
1950 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1951 NEXT_ENTRY(RULE_ID),
1952 NEXT_ENTRY(PORT_ID)),
1953 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1954 ARGS_ENTRY(struct buffer, args.query.rule),
1955 ARGS_ENTRY(struct buffer, port)),
1956 .call = parse_query,
1960 .help = "list existing flow rules",
1961 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1962 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1967 .help = "list and destroy aged flows",
1968 .next = NEXT(next_aged_attr, NEXT_ENTRY(PORT_ID)),
1969 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1974 .help = "restrict ingress traffic to the defined flow rules",
1975 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1976 NEXT_ENTRY(PORT_ID)),
1977 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1978 ARGS_ENTRY(struct buffer, port)),
1979 .call = parse_isolate,
1983 .help = "new tunnel API",
1984 .next = NEXT(NEXT_ENTRY
1985 (TUNNEL_CREATE, TUNNEL_LIST, TUNNEL_DESTROY)),
1986 .call = parse_tunnel,
1988 /* Tunnel arguments. */
1991 .help = "create new tunnel object",
1992 .next = NEXT(NEXT_ENTRY(TUNNEL_CREATE_TYPE),
1993 NEXT_ENTRY(PORT_ID)),
1994 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1995 .call = parse_tunnel,
1997 [TUNNEL_CREATE_TYPE] = {
1999 .help = "create new tunnel",
2000 .next = NEXT(NEXT_ENTRY(FILE_PATH)),
2001 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, type)),
2002 .call = parse_tunnel,
2004 [TUNNEL_DESTROY] = {
2006 .help = "destroy tunel",
2007 .next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),
2008 NEXT_ENTRY(PORT_ID)),
2009 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2010 .call = parse_tunnel,
2012 [TUNNEL_DESTROY_ID] = {
2014 .help = "tunnel identifier to testroy",
2015 .next = NEXT(NEXT_ENTRY(UNSIGNED)),
2016 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2017 .call = parse_tunnel,
2021 .help = "list existing tunnels",
2022 .next = NEXT(NEXT_ENTRY(PORT_ID)),
2023 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
2024 .call = parse_tunnel,
2026 /* Destroy arguments. */
2029 .help = "specify a rule identifier",
2030 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
2031 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
2032 .call = parse_destroy,
2034 /* Query arguments. */
2038 .help = "action to query, must be part of the rule",
2039 .call = parse_action,
2040 .comp = comp_action,
2042 /* List arguments. */
2045 .help = "specify a group",
2046 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
2047 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
2052 .help = "specify aged flows need be destroyed",
2056 /* Validate/create attributes. */
2059 .help = "specify a group",
2060 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
2061 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
2066 .help = "specify a priority level",
2067 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
2068 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
2073 .help = "affect rule to ingress",
2074 .next = NEXT(next_vc_attr),
2079 .help = "affect rule to egress",
2080 .next = NEXT(next_vc_attr),
2085 .help = "apply rule directly to endpoints found in pattern",
2086 .next = NEXT(next_vc_attr),
2090 .name = "tunnel_set",
2091 .help = "tunnel steer rule",
2092 .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
2093 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2097 .name = "tunnel_match",
2098 .help = "tunnel match rule",
2099 .next = NEXT(next_vc_attr, NEXT_ENTRY(UNSIGNED)),
2100 .args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
2103 /* Validate/create pattern. */
2106 .help = "submit a list of pattern items",
2107 .next = NEXT(next_item),
2112 .help = "match value perfectly (with full bit-mask)",
2113 .call = parse_vc_spec,
2115 [ITEM_PARAM_SPEC] = {
2117 .help = "match value according to configured bit-mask",
2118 .call = parse_vc_spec,
2120 [ITEM_PARAM_LAST] = {
2122 .help = "specify upper bound to establish a range",
2123 .call = parse_vc_spec,
2125 [ITEM_PARAM_MASK] = {
2127 .help = "specify bit-mask with relevant bits set to one",
2128 .call = parse_vc_spec,
2130 [ITEM_PARAM_PREFIX] = {
2132 .help = "generate bit-mask from a prefix length",
2133 .call = parse_vc_spec,
2137 .help = "specify next pattern item",
2138 .next = NEXT(next_item),
2142 .help = "end list of pattern items",
2143 .priv = PRIV_ITEM(END, 0),
2144 .next = NEXT(NEXT_ENTRY(ACTIONS)),
2149 .help = "no-op pattern item",
2150 .priv = PRIV_ITEM(VOID, 0),
2151 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2156 .help = "perform actions when pattern does not match",
2157 .priv = PRIV_ITEM(INVERT, 0),
2158 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2163 .help = "match any protocol for the current layer",
2164 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
2165 .next = NEXT(item_any),
2170 .help = "number of layers covered",
2171 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
2172 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
2176 .help = "match traffic from/to the physical function",
2177 .priv = PRIV_ITEM(PF, 0),
2178 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
2183 .help = "match traffic from/to a virtual function ID",
2184 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
2185 .next = NEXT(item_vf),
2191 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
2192 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
2196 .help = "match traffic from/to a specific physical port",
2197 .priv = PRIV_ITEM(PHY_PORT,
2198 sizeof(struct rte_flow_item_phy_port)),
2199 .next = NEXT(item_phy_port),
2202 [ITEM_PHY_PORT_INDEX] = {
2204 .help = "physical port index",
2205 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
2206 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
2210 .help = "match traffic from/to a given DPDK port ID",
2211 .priv = PRIV_ITEM(PORT_ID,
2212 sizeof(struct rte_flow_item_port_id)),
2213 .next = NEXT(item_port_id),
2216 [ITEM_PORT_ID_ID] = {
2218 .help = "DPDK port ID",
2219 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
2220 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
2224 .help = "match traffic against value set in previously matched rule",
2225 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
2226 .next = NEXT(item_mark),
2231 .help = "Integer value to match against",
2232 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
2233 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
2237 .help = "match an arbitrary byte string",
2238 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
2239 .next = NEXT(item_raw),
2242 [ITEM_RAW_RELATIVE] = {
2244 .help = "look for pattern after the previous item",
2245 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
2246 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
2249 [ITEM_RAW_SEARCH] = {
2251 .help = "search pattern from offset (see also limit)",
2252 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
2253 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
2256 [ITEM_RAW_OFFSET] = {
2258 .help = "absolute or relative offset for pattern",
2259 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
2260 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
2262 [ITEM_RAW_LIMIT] = {
2264 .help = "search area limit for start of pattern",
2265 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
2266 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
2268 [ITEM_RAW_PATTERN] = {
2270 .help = "byte string to look for",
2271 .next = NEXT(item_raw,
2273 NEXT_ENTRY(ITEM_PARAM_IS,
2276 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
2277 ARGS_ENTRY(struct rte_flow_item_raw, length),
2278 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
2279 ITEM_RAW_PATTERN_SIZE)),
2283 .help = "match Ethernet header",
2284 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
2285 .next = NEXT(item_eth),
2290 .help = "destination MAC",
2291 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
2292 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
2296 .help = "source MAC",
2297 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
2298 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
2302 .help = "EtherType",
2303 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
2304 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
2306 [ITEM_ETH_HAS_VLAN] = {
2308 .help = "packet header contains VLAN",
2309 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
2310 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_eth,
2315 .help = "match 802.1Q/ad VLAN tag",
2316 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
2317 .next = NEXT(item_vlan),
2322 .help = "tag control information",
2323 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2324 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
2328 .help = "priority code point",
2329 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2330 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2335 .help = "drop eligible indicator",
2336 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2337 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2342 .help = "VLAN identifier",
2343 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2344 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2347 [ITEM_VLAN_INNER_TYPE] = {
2348 .name = "inner_type",
2349 .help = "inner EtherType",
2350 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2351 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
2354 [ITEM_VLAN_HAS_MORE_VLAN] = {
2355 .name = "has_more_vlan",
2356 .help = "packet header contains another VLAN",
2357 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2358 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_vlan,
2363 .help = "match IPv4 header",
2364 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
2365 .next = NEXT(item_ipv4),
2370 .help = "type of service",
2371 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2372 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2373 hdr.type_of_service)),
2375 [ITEM_IPV4_FRAGMENT_OFFSET] = {
2376 .name = "fragment_offset",
2377 .help = "fragmentation flags and fragment offset",
2378 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2379 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2380 hdr.fragment_offset)),
2384 .help = "time to live",
2385 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2386 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2389 [ITEM_IPV4_PROTO] = {
2391 .help = "next protocol ID",
2392 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2393 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2394 hdr.next_proto_id)),
2398 .help = "source address",
2399 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2400 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2405 .help = "destination address",
2406 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2407 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2412 .help = "match IPv6 header",
2413 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
2414 .next = NEXT(item_ipv6),
2419 .help = "traffic class",
2420 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2421 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2423 "\x0f\xf0\x00\x00")),
2425 [ITEM_IPV6_FLOW] = {
2427 .help = "flow label",
2428 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2429 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2431 "\x00\x0f\xff\xff")),
2433 [ITEM_IPV6_PROTO] = {
2435 .help = "protocol (next header)",
2436 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2437 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2442 .help = "hop limit",
2443 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2444 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2449 .help = "source address",
2450 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2451 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2456 .help = "destination address",
2457 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2458 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2461 [ITEM_IPV6_HAS_FRAG_EXT] = {
2462 .name = "has_frag_ext",
2463 .help = "fragment packet attribute",
2464 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2465 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_ipv6,
2470 .help = "match ICMP header",
2471 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2472 .next = NEXT(item_icmp),
2475 [ITEM_ICMP_TYPE] = {
2477 .help = "ICMP packet type",
2478 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2479 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2482 [ITEM_ICMP_CODE] = {
2484 .help = "ICMP packet code",
2485 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2486 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2489 [ITEM_ICMP_IDENT] = {
2491 .help = "ICMP packet identifier",
2492 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2493 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2498 .help = "ICMP packet sequence number",
2499 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2500 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2505 .help = "match UDP header",
2506 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2507 .next = NEXT(item_udp),
2512 .help = "UDP source port",
2513 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2514 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2519 .help = "UDP destination port",
2520 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2521 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2526 .help = "match TCP header",
2527 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2528 .next = NEXT(item_tcp),
2533 .help = "TCP source port",
2534 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2535 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2540 .help = "TCP destination port",
2541 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2542 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2545 [ITEM_TCP_FLAGS] = {
2547 .help = "TCP flags",
2548 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2549 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2554 .help = "match SCTP header",
2555 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2556 .next = NEXT(item_sctp),
2561 .help = "SCTP source port",
2562 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2563 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2568 .help = "SCTP destination port",
2569 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2570 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2575 .help = "validation tag",
2576 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2577 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2580 [ITEM_SCTP_CKSUM] = {
2583 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2584 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2589 .help = "match VXLAN header",
2590 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2591 .next = NEXT(item_vxlan),
2594 [ITEM_VXLAN_VNI] = {
2596 .help = "VXLAN identifier",
2597 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2598 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2602 .help = "match E-Tag header",
2603 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2604 .next = NEXT(item_e_tag),
2607 [ITEM_E_TAG_GRP_ECID_B] = {
2608 .name = "grp_ecid_b",
2609 .help = "GRP and E-CID base",
2610 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2611 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2617 .help = "match NVGRE header",
2618 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2619 .next = NEXT(item_nvgre),
2622 [ITEM_NVGRE_TNI] = {
2624 .help = "virtual subnet ID",
2625 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2626 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2630 .help = "match MPLS header",
2631 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2632 .next = NEXT(item_mpls),
2635 [ITEM_MPLS_LABEL] = {
2637 .help = "MPLS label",
2638 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2639 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2645 .help = "MPLS Traffic Class",
2646 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2647 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2653 .help = "MPLS Bottom-of-Stack",
2654 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2655 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2661 .help = "match GRE header",
2662 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2663 .next = NEXT(item_gre),
2666 [ITEM_GRE_PROTO] = {
2668 .help = "GRE protocol type",
2669 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2670 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2673 [ITEM_GRE_C_RSVD0_VER] = {
2674 .name = "c_rsvd0_ver",
2676 "checksum (1b), undefined (1b), key bit (1b),"
2677 " sequence number (1b), reserved 0 (9b),"
2679 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2680 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2683 [ITEM_GRE_C_BIT] = {
2685 .help = "checksum bit (C)",
2686 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2687 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2689 "\x80\x00\x00\x00")),
2691 [ITEM_GRE_S_BIT] = {
2693 .help = "sequence number bit (S)",
2694 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2695 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2697 "\x10\x00\x00\x00")),
2699 [ITEM_GRE_K_BIT] = {
2701 .help = "key bit (K)",
2702 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2703 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2705 "\x20\x00\x00\x00")),
2709 .help = "fuzzy pattern match, expect faster than default",
2710 .priv = PRIV_ITEM(FUZZY,
2711 sizeof(struct rte_flow_item_fuzzy)),
2712 .next = NEXT(item_fuzzy),
2715 [ITEM_FUZZY_THRESH] = {
2717 .help = "match accuracy threshold",
2718 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2719 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2724 .help = "match GTP header",
2725 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2726 .next = NEXT(item_gtp),
2729 [ITEM_GTP_FLAGS] = {
2730 .name = "v_pt_rsv_flags",
2731 .help = "GTP flags",
2732 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2733 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp,
2736 [ITEM_GTP_MSG_TYPE] = {
2738 .help = "GTP message type",
2739 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2740 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp, msg_type)),
2744 .help = "tunnel endpoint identifier",
2745 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2746 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2750 .help = "match GTP header",
2751 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2752 .next = NEXT(item_gtp),
2757 .help = "match GTP header",
2758 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2759 .next = NEXT(item_gtp),
2764 .help = "match GENEVE header",
2765 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2766 .next = NEXT(item_geneve),
2769 [ITEM_GENEVE_VNI] = {
2771 .help = "virtual network identifier",
2772 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2773 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2775 [ITEM_GENEVE_PROTO] = {
2777 .help = "GENEVE protocol type",
2778 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2779 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2782 [ITEM_VXLAN_GPE] = {
2783 .name = "vxlan-gpe",
2784 .help = "match VXLAN-GPE header",
2785 .priv = PRIV_ITEM(VXLAN_GPE,
2786 sizeof(struct rte_flow_item_vxlan_gpe)),
2787 .next = NEXT(item_vxlan_gpe),
2790 [ITEM_VXLAN_GPE_VNI] = {
2792 .help = "VXLAN-GPE identifier",
2793 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2794 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2797 [ITEM_ARP_ETH_IPV4] = {
2798 .name = "arp_eth_ipv4",
2799 .help = "match ARP header for Ethernet/IPv4",
2800 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2801 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2802 .next = NEXT(item_arp_eth_ipv4),
2805 [ITEM_ARP_ETH_IPV4_SHA] = {
2807 .help = "sender hardware address",
2808 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2810 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2813 [ITEM_ARP_ETH_IPV4_SPA] = {
2815 .help = "sender IPv4 address",
2816 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2818 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2821 [ITEM_ARP_ETH_IPV4_THA] = {
2823 .help = "target hardware address",
2824 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2826 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2829 [ITEM_ARP_ETH_IPV4_TPA] = {
2831 .help = "target IPv4 address",
2832 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2834 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2839 .help = "match presence of any IPv6 extension header",
2840 .priv = PRIV_ITEM(IPV6_EXT,
2841 sizeof(struct rte_flow_item_ipv6_ext)),
2842 .next = NEXT(item_ipv6_ext),
2845 [ITEM_IPV6_EXT_NEXT_HDR] = {
2847 .help = "next header",
2848 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2849 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2852 [ITEM_IPV6_FRAG_EXT] = {
2853 .name = "ipv6_frag_ext",
2854 .help = "match presence of IPv6 fragment extension header",
2855 .priv = PRIV_ITEM(IPV6_FRAG_EXT,
2856 sizeof(struct rte_flow_item_ipv6_frag_ext)),
2857 .next = NEXT(item_ipv6_frag_ext),
2860 [ITEM_IPV6_FRAG_EXT_NEXT_HDR] = {
2862 .help = "next header",
2863 .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
2865 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_ipv6_frag_ext,
2868 [ITEM_IPV6_FRAG_EXT_FRAG_DATA] = {
2869 .name = "frag_data",
2870 .help = "Fragment flags and offset",
2871 .next = NEXT(item_ipv6_frag_ext, NEXT_ENTRY(UNSIGNED),
2873 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_frag_ext,
2878 .help = "match any ICMPv6 header",
2879 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2880 .next = NEXT(item_icmp6),
2883 [ITEM_ICMP6_TYPE] = {
2885 .help = "ICMPv6 type",
2886 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2887 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2890 [ITEM_ICMP6_CODE] = {
2892 .help = "ICMPv6 code",
2893 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2894 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2897 [ITEM_ICMP6_ND_NS] = {
2898 .name = "icmp6_nd_ns",
2899 .help = "match ICMPv6 neighbor discovery solicitation",
2900 .priv = PRIV_ITEM(ICMP6_ND_NS,
2901 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2902 .next = NEXT(item_icmp6_nd_ns),
2905 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2906 .name = "target_addr",
2907 .help = "target address",
2908 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2910 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2913 [ITEM_ICMP6_ND_NA] = {
2914 .name = "icmp6_nd_na",
2915 .help = "match ICMPv6 neighbor discovery advertisement",
2916 .priv = PRIV_ITEM(ICMP6_ND_NA,
2917 sizeof(struct rte_flow_item_icmp6_nd_na)),
2918 .next = NEXT(item_icmp6_nd_na),
2921 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2922 .name = "target_addr",
2923 .help = "target address",
2924 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2926 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2929 [ITEM_ICMP6_ND_OPT] = {
2930 .name = "icmp6_nd_opt",
2931 .help = "match presence of any ICMPv6 neighbor discovery"
2933 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2934 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2935 .next = NEXT(item_icmp6_nd_opt),
2938 [ITEM_ICMP6_ND_OPT_TYPE] = {
2940 .help = "ND option type",
2941 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2943 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2946 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2947 .name = "icmp6_nd_opt_sla_eth",
2948 .help = "match ICMPv6 neighbor discovery source Ethernet"
2949 " link-layer address option",
2951 (ICMP6_ND_OPT_SLA_ETH,
2952 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2953 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2956 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2958 .help = "source Ethernet LLA",
2959 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2961 .args = ARGS(ARGS_ENTRY_HTON
2962 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2964 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2965 .name = "icmp6_nd_opt_tla_eth",
2966 .help = "match ICMPv6 neighbor discovery target Ethernet"
2967 " link-layer address option",
2969 (ICMP6_ND_OPT_TLA_ETH,
2970 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2971 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2974 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2976 .help = "target Ethernet LLA",
2977 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2979 .args = ARGS(ARGS_ENTRY_HTON
2980 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2984 .help = "match metadata header",
2985 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2986 .next = NEXT(item_meta),
2989 [ITEM_META_DATA] = {
2991 .help = "metadata value",
2992 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2993 .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta,
2994 data, "\xff\xff\xff\xff")),
2998 .help = "match GRE key",
2999 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
3000 .next = NEXT(item_gre_key),
3003 [ITEM_GRE_KEY_VALUE] = {
3005 .help = "key value",
3006 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
3007 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3011 .help = "match GTP extension header with type 0x85",
3012 .priv = PRIV_ITEM(GTP_PSC,
3013 sizeof(struct rte_flow_item_gtp_psc)),
3014 .next = NEXT(item_gtp_psc),
3017 [ITEM_GTP_PSC_QFI] = {
3019 .help = "QoS flow identifier",
3020 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
3021 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
3024 [ITEM_GTP_PSC_PDU_T] = {
3027 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
3028 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
3033 .help = "match PPPoE session header",
3034 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
3035 .next = NEXT(item_pppoes),
3040 .help = "match PPPoE discovery header",
3041 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
3042 .next = NEXT(item_pppoed),
3045 [ITEM_PPPOE_SEID] = {
3047 .help = "session identifier",
3048 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
3049 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
3052 [ITEM_PPPOE_PROTO_ID] = {
3053 .name = "pppoe_proto_id",
3054 .help = "match PPPoE session protocol identifier",
3055 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
3056 sizeof(struct rte_flow_item_pppoe_proto_id)),
3057 .next = NEXT(item_pppoe_proto_id, NEXT_ENTRY(UNSIGNED),
3059 .args = ARGS(ARGS_ENTRY_HTON
3060 (struct rte_flow_item_pppoe_proto_id, proto_id)),
3065 .help = "matches higig2 header",
3066 .priv = PRIV_ITEM(HIGIG2,
3067 sizeof(struct rte_flow_item_higig2_hdr)),
3068 .next = NEXT(item_higig2),
3071 [ITEM_HIGIG2_CLASSIFICATION] = {
3072 .name = "classification",
3073 .help = "matches classification of higig2 header",
3074 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
3075 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
3076 hdr.ppt1.classification)),
3078 [ITEM_HIGIG2_VID] = {
3080 .help = "matches vid of higig2 header",
3081 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
3082 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
3087 .help = "match tag value",
3088 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
3089 .next = NEXT(item_tag),
3094 .help = "tag value to match",
3095 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
3096 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)),
3098 [ITEM_TAG_INDEX] = {
3100 .help = "index of tag array to match",
3101 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
3102 NEXT_ENTRY(ITEM_PARAM_IS)),
3103 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
3105 [ITEM_L2TPV3OIP] = {
3106 .name = "l2tpv3oip",
3107 .help = "match L2TPv3 over IP header",
3108 .priv = PRIV_ITEM(L2TPV3OIP,
3109 sizeof(struct rte_flow_item_l2tpv3oip)),
3110 .next = NEXT(item_l2tpv3oip),
3113 [ITEM_L2TPV3OIP_SESSION_ID] = {
3114 .name = "session_id",
3115 .help = "session identifier",
3116 .next = NEXT(item_l2tpv3oip, NEXT_ENTRY(UNSIGNED), item_param),
3117 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_l2tpv3oip,
3122 .help = "match ESP header",
3123 .priv = PRIV_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
3124 .next = NEXT(item_esp),
3129 .help = "security policy index",
3130 .next = NEXT(item_esp, NEXT_ENTRY(UNSIGNED), item_param),
3131 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_esp,
3136 .help = "match AH header",
3137 .priv = PRIV_ITEM(AH, sizeof(struct rte_flow_item_ah)),
3138 .next = NEXT(item_ah),
3143 .help = "security parameters index",
3144 .next = NEXT(item_ah, NEXT_ENTRY(UNSIGNED), item_param),
3145 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ah, spi)),
3149 .help = "match pfcp header",
3150 .priv = PRIV_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
3151 .next = NEXT(item_pfcp),
3154 [ITEM_PFCP_S_FIELD] = {
3157 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
3158 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp,
3161 [ITEM_PFCP_SEID] = {
3163 .help = "session endpoint identifier",
3164 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
3165 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp, seid)),
3169 .help = "match eCPRI header",
3170 .priv = PRIV_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
3171 .next = NEXT(item_ecpri),
3174 [ITEM_ECPRI_COMMON] = {
3176 .help = "eCPRI common header",
3177 .next = NEXT(item_ecpri_common),
3179 [ITEM_ECPRI_COMMON_TYPE] = {
3181 .help = "type of common header",
3182 .next = NEXT(item_ecpri_common_type),
3183 .args = ARGS(ARG_ENTRY_HTON(struct rte_flow_item_ecpri)),
3185 [ITEM_ECPRI_COMMON_TYPE_IQ_DATA] = {
3187 .help = "Type #0: IQ Data",
3188 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_IQ_DATA_PCID,
3190 .call = parse_vc_item_ecpri_type,
3192 [ITEM_ECPRI_MSG_IQ_DATA_PCID] = {
3194 .help = "Physical Channel ID",
3195 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_IQ_DATA_PCID,
3196 ITEM_ECPRI_COMMON, ITEM_NEXT),
3197 NEXT_ENTRY(UNSIGNED), item_param),
3198 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3201 [ITEM_ECPRI_COMMON_TYPE_RTC_CTRL] = {
3203 .help = "Type #2: Real-Time Control Data",
3204 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
3206 .call = parse_vc_item_ecpri_type,
3208 [ITEM_ECPRI_MSG_RTC_CTRL_RTCID] = {
3210 .help = "Real-Time Control Data ID",
3211 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_RTC_CTRL_RTCID,
3212 ITEM_ECPRI_COMMON, ITEM_NEXT),
3213 NEXT_ENTRY(UNSIGNED), item_param),
3214 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3217 [ITEM_ECPRI_COMMON_TYPE_DLY_MSR] = {
3218 .name = "delay_measure",
3219 .help = "Type #5: One-Way Delay Measurement",
3220 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_DLY_MSR_MSRID,
3222 .call = parse_vc_item_ecpri_type,
3224 [ITEM_ECPRI_MSG_DLY_MSR_MSRID] = {
3226 .help = "Measurement ID",
3227 .next = NEXT(NEXT_ENTRY(ITEM_ECPRI_MSG_DLY_MSR_MSRID,
3228 ITEM_ECPRI_COMMON, ITEM_NEXT),
3229 NEXT_ENTRY(UNSIGNED), item_param),
3230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ecpri,
3233 /* Validate/create actions. */
3236 .help = "submit a list of associated actions",
3237 .next = NEXT(next_action),
3242 .help = "specify next action",
3243 .next = NEXT(next_action),
3247 .help = "end list of actions",
3248 .priv = PRIV_ACTION(END, 0),
3253 .help = "no-op action",
3254 .priv = PRIV_ACTION(VOID, 0),
3255 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3258 [ACTION_PASSTHRU] = {
3260 .help = "let subsequent rule process matched packets",
3261 .priv = PRIV_ACTION(PASSTHRU, 0),
3262 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3267 .help = "redirect traffic to a given group",
3268 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
3269 .next = NEXT(action_jump),
3272 [ACTION_JUMP_GROUP] = {
3274 .help = "group to redirect traffic to",
3275 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
3276 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
3277 .call = parse_vc_conf,
3281 .help = "attach 32 bit value to packets",
3282 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
3283 .next = NEXT(action_mark),
3286 [ACTION_MARK_ID] = {
3288 .help = "32 bit value to return with packets",
3289 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
3290 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
3291 .call = parse_vc_conf,
3295 .help = "flag packets",
3296 .priv = PRIV_ACTION(FLAG, 0),
3297 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3302 .help = "assign packets to a given queue index",
3303 .priv = PRIV_ACTION(QUEUE,
3304 sizeof(struct rte_flow_action_queue)),
3305 .next = NEXT(action_queue),
3308 [ACTION_QUEUE_INDEX] = {
3310 .help = "queue index to use",
3311 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
3312 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
3313 .call = parse_vc_conf,
3317 .help = "drop packets (note: passthru has priority)",
3318 .priv = PRIV_ACTION(DROP, 0),
3319 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3324 .help = "enable counters for this rule",
3325 .priv = PRIV_ACTION(COUNT,
3326 sizeof(struct rte_flow_action_count)),
3327 .next = NEXT(action_count),
3330 [ACTION_COUNT_ID] = {
3331 .name = "identifier",
3332 .help = "counter identifier to use",
3333 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
3334 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
3335 .call = parse_vc_conf,
3337 [ACTION_COUNT_SHARED] = {
3339 .help = "shared counter",
3340 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
3341 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
3343 .call = parse_vc_conf,
3347 .help = "spread packets among several queues",
3348 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
3349 .next = NEXT(action_rss),
3350 .call = parse_vc_action_rss,
3352 [ACTION_RSS_FUNC] = {
3354 .help = "RSS hash function to apply",
3355 .next = NEXT(action_rss,
3356 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
3357 ACTION_RSS_FUNC_TOEPLITZ,
3358 ACTION_RSS_FUNC_SIMPLE_XOR,
3359 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
3361 [ACTION_RSS_FUNC_DEFAULT] = {
3363 .help = "default hash function",
3364 .call = parse_vc_action_rss_func,
3366 [ACTION_RSS_FUNC_TOEPLITZ] = {
3368 .help = "Toeplitz hash function",
3369 .call = parse_vc_action_rss_func,
3371 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
3372 .name = "simple_xor",
3373 .help = "simple XOR hash function",
3374 .call = parse_vc_action_rss_func,
3376 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
3377 .name = "symmetric_toeplitz",
3378 .help = "Symmetric Toeplitz hash function",
3379 .call = parse_vc_action_rss_func,
3381 [ACTION_RSS_LEVEL] = {
3383 .help = "encapsulation level for \"types\"",
3384 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
3385 .args = ARGS(ARGS_ENTRY_ARB
3386 (offsetof(struct action_rss_data, conf) +
3387 offsetof(struct rte_flow_action_rss, level),
3388 sizeof(((struct rte_flow_action_rss *)0)->
3391 [ACTION_RSS_TYPES] = {
3393 .help = "specific RSS hash types",
3394 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
3396 [ACTION_RSS_TYPE] = {
3398 .help = "RSS hash type",
3399 .call = parse_vc_action_rss_type,
3400 .comp = comp_vc_action_rss_type,
3402 [ACTION_RSS_KEY] = {
3404 .help = "RSS hash key",
3405 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
3406 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
3408 (offsetof(struct action_rss_data, conf) +
3409 offsetof(struct rte_flow_action_rss, key_len),
3410 sizeof(((struct rte_flow_action_rss *)0)->
3412 ARGS_ENTRY(struct action_rss_data, key)),
3414 [ACTION_RSS_KEY_LEN] = {
3416 .help = "RSS hash key length in bytes",
3417 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
3418 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3419 (offsetof(struct action_rss_data, conf) +
3420 offsetof(struct rte_flow_action_rss, key_len),
3421 sizeof(((struct rte_flow_action_rss *)0)->
3424 RSS_HASH_KEY_LENGTH)),
3426 [ACTION_RSS_QUEUES] = {
3428 .help = "queue indices to use",
3429 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
3430 .call = parse_vc_conf,
3432 [ACTION_RSS_QUEUE] = {
3434 .help = "queue index",
3435 .call = parse_vc_action_rss_queue,
3436 .comp = comp_vc_action_rss_queue,
3440 .help = "direct traffic to physical function",
3441 .priv = PRIV_ACTION(PF, 0),
3442 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3447 .help = "direct traffic to a virtual function ID",
3448 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
3449 .next = NEXT(action_vf),
3452 [ACTION_VF_ORIGINAL] = {
3454 .help = "use original VF ID if possible",
3455 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
3456 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
3458 .call = parse_vc_conf,
3463 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
3464 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
3465 .call = parse_vc_conf,
3467 [ACTION_PHY_PORT] = {
3469 .help = "direct packets to physical port index",
3470 .priv = PRIV_ACTION(PHY_PORT,
3471 sizeof(struct rte_flow_action_phy_port)),
3472 .next = NEXT(action_phy_port),
3475 [ACTION_PHY_PORT_ORIGINAL] = {
3477 .help = "use original port index if possible",
3478 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
3479 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
3481 .call = parse_vc_conf,
3483 [ACTION_PHY_PORT_INDEX] = {
3485 .help = "physical port index",
3486 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
3487 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
3489 .call = parse_vc_conf,
3491 [ACTION_PORT_ID] = {
3493 .help = "direct matching traffic to a given DPDK port ID",
3494 .priv = PRIV_ACTION(PORT_ID,
3495 sizeof(struct rte_flow_action_port_id)),
3496 .next = NEXT(action_port_id),
3499 [ACTION_PORT_ID_ORIGINAL] = {
3501 .help = "use original DPDK port ID if possible",
3502 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
3503 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
3505 .call = parse_vc_conf,
3507 [ACTION_PORT_ID_ID] = {
3509 .help = "DPDK port ID",
3510 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
3511 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
3512 .call = parse_vc_conf,
3516 .help = "meter the directed packets at given id",
3517 .priv = PRIV_ACTION(METER,
3518 sizeof(struct rte_flow_action_meter)),
3519 .next = NEXT(action_meter),
3522 [ACTION_METER_ID] = {
3524 .help = "meter id to use",
3525 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
3526 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
3527 .call = parse_vc_conf,
3529 [ACTION_OF_SET_MPLS_TTL] = {
3530 .name = "of_set_mpls_ttl",
3531 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
3534 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
3535 .next = NEXT(action_of_set_mpls_ttl),
3538 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
3541 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
3542 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
3544 .call = parse_vc_conf,
3546 [ACTION_OF_DEC_MPLS_TTL] = {
3547 .name = "of_dec_mpls_ttl",
3548 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
3549 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
3550 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3553 [ACTION_OF_SET_NW_TTL] = {
3554 .name = "of_set_nw_ttl",
3555 .help = "OpenFlow's OFPAT_SET_NW_TTL",
3558 sizeof(struct rte_flow_action_of_set_nw_ttl)),
3559 .next = NEXT(action_of_set_nw_ttl),
3562 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
3565 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
3566 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
3568 .call = parse_vc_conf,
3570 [ACTION_OF_DEC_NW_TTL] = {
3571 .name = "of_dec_nw_ttl",
3572 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
3573 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
3574 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3577 [ACTION_OF_COPY_TTL_OUT] = {
3578 .name = "of_copy_ttl_out",
3579 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
3580 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
3581 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3584 [ACTION_OF_COPY_TTL_IN] = {
3585 .name = "of_copy_ttl_in",
3586 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
3587 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
3588 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3591 [ACTION_OF_POP_VLAN] = {
3592 .name = "of_pop_vlan",
3593 .help = "OpenFlow's OFPAT_POP_VLAN",
3594 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
3595 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3598 [ACTION_OF_PUSH_VLAN] = {
3599 .name = "of_push_vlan",
3600 .help = "OpenFlow's OFPAT_PUSH_VLAN",
3603 sizeof(struct rte_flow_action_of_push_vlan)),
3604 .next = NEXT(action_of_push_vlan),
3607 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
3608 .name = "ethertype",
3609 .help = "EtherType",
3610 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
3611 .args = ARGS(ARGS_ENTRY_HTON
3612 (struct rte_flow_action_of_push_vlan,
3614 .call = parse_vc_conf,
3616 [ACTION_OF_SET_VLAN_VID] = {
3617 .name = "of_set_vlan_vid",
3618 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
3621 sizeof(struct rte_flow_action_of_set_vlan_vid)),
3622 .next = NEXT(action_of_set_vlan_vid),
3625 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
3628 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
3629 .args = ARGS(ARGS_ENTRY_HTON
3630 (struct rte_flow_action_of_set_vlan_vid,
3632 .call = parse_vc_conf,
3634 [ACTION_OF_SET_VLAN_PCP] = {
3635 .name = "of_set_vlan_pcp",
3636 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
3639 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
3640 .next = NEXT(action_of_set_vlan_pcp),
3643 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
3645 .help = "VLAN priority",
3646 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
3647 .args = ARGS(ARGS_ENTRY_HTON
3648 (struct rte_flow_action_of_set_vlan_pcp,
3650 .call = parse_vc_conf,
3652 [ACTION_OF_POP_MPLS] = {
3653 .name = "of_pop_mpls",
3654 .help = "OpenFlow's OFPAT_POP_MPLS",
3655 .priv = PRIV_ACTION(OF_POP_MPLS,
3656 sizeof(struct rte_flow_action_of_pop_mpls)),
3657 .next = NEXT(action_of_pop_mpls),
3660 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3661 .name = "ethertype",
3662 .help = "EtherType",
3663 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3664 .args = ARGS(ARGS_ENTRY_HTON
3665 (struct rte_flow_action_of_pop_mpls,
3667 .call = parse_vc_conf,
3669 [ACTION_OF_PUSH_MPLS] = {
3670 .name = "of_push_mpls",
3671 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3674 sizeof(struct rte_flow_action_of_push_mpls)),
3675 .next = NEXT(action_of_push_mpls),
3678 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3679 .name = "ethertype",
3680 .help = "EtherType",
3681 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3682 .args = ARGS(ARGS_ENTRY_HTON
3683 (struct rte_flow_action_of_push_mpls,
3685 .call = parse_vc_conf,
3687 [ACTION_VXLAN_ENCAP] = {
3688 .name = "vxlan_encap",
3689 .help = "VXLAN encapsulation, uses configuration set by \"set"
3691 .priv = PRIV_ACTION(VXLAN_ENCAP,
3692 sizeof(struct action_vxlan_encap_data)),
3693 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3694 .call = parse_vc_action_vxlan_encap,
3696 [ACTION_VXLAN_DECAP] = {
3697 .name = "vxlan_decap",
3698 .help = "Performs a decapsulation action by stripping all"
3699 " headers of the VXLAN tunnel network overlay from the"
3701 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3702 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3705 [ACTION_NVGRE_ENCAP] = {
3706 .name = "nvgre_encap",
3707 .help = "NVGRE encapsulation, uses configuration set by \"set"
3709 .priv = PRIV_ACTION(NVGRE_ENCAP,
3710 sizeof(struct action_nvgre_encap_data)),
3711 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3712 .call = parse_vc_action_nvgre_encap,
3714 [ACTION_NVGRE_DECAP] = {
3715 .name = "nvgre_decap",
3716 .help = "Performs a decapsulation action by stripping all"
3717 " headers of the NVGRE tunnel network overlay from the"
3719 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3720 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3723 [ACTION_L2_ENCAP] = {
3725 .help = "l2 encap, uses configuration set by"
3726 " \"set l2_encap\"",
3727 .priv = PRIV_ACTION(RAW_ENCAP,
3728 sizeof(struct action_raw_encap_data)),
3729 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3730 .call = parse_vc_action_l2_encap,
3732 [ACTION_L2_DECAP] = {
3734 .help = "l2 decap, uses configuration set by"
3735 " \"set l2_decap\"",
3736 .priv = PRIV_ACTION(RAW_DECAP,
3737 sizeof(struct action_raw_decap_data)),
3738 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3739 .call = parse_vc_action_l2_decap,
3741 [ACTION_MPLSOGRE_ENCAP] = {
3742 .name = "mplsogre_encap",
3743 .help = "mplsogre encapsulation, uses configuration set by"
3744 " \"set mplsogre_encap\"",
3745 .priv = PRIV_ACTION(RAW_ENCAP,
3746 sizeof(struct action_raw_encap_data)),
3747 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3748 .call = parse_vc_action_mplsogre_encap,
3750 [ACTION_MPLSOGRE_DECAP] = {
3751 .name = "mplsogre_decap",
3752 .help = "mplsogre decapsulation, uses configuration set by"
3753 " \"set mplsogre_decap\"",
3754 .priv = PRIV_ACTION(RAW_DECAP,
3755 sizeof(struct action_raw_decap_data)),
3756 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3757 .call = parse_vc_action_mplsogre_decap,
3759 [ACTION_MPLSOUDP_ENCAP] = {
3760 .name = "mplsoudp_encap",
3761 .help = "mplsoudp encapsulation, uses configuration set by"
3762 " \"set mplsoudp_encap\"",
3763 .priv = PRIV_ACTION(RAW_ENCAP,
3764 sizeof(struct action_raw_encap_data)),
3765 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3766 .call = parse_vc_action_mplsoudp_encap,
3768 [ACTION_MPLSOUDP_DECAP] = {
3769 .name = "mplsoudp_decap",
3770 .help = "mplsoudp decapsulation, uses configuration set by"
3771 " \"set mplsoudp_decap\"",
3772 .priv = PRIV_ACTION(RAW_DECAP,
3773 sizeof(struct action_raw_decap_data)),
3774 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3775 .call = parse_vc_action_mplsoudp_decap,
3777 [ACTION_SET_IPV4_SRC] = {
3778 .name = "set_ipv4_src",
3779 .help = "Set a new IPv4 source address in the outermost"
3781 .priv = PRIV_ACTION(SET_IPV4_SRC,
3782 sizeof(struct rte_flow_action_set_ipv4)),
3783 .next = NEXT(action_set_ipv4_src),
3786 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3787 .name = "ipv4_addr",
3788 .help = "new IPv4 source address to set",
3789 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3790 .args = ARGS(ARGS_ENTRY_HTON
3791 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3792 .call = parse_vc_conf,
3794 [ACTION_SET_IPV4_DST] = {
3795 .name = "set_ipv4_dst",
3796 .help = "Set a new IPv4 destination address in the outermost"
3798 .priv = PRIV_ACTION(SET_IPV4_DST,
3799 sizeof(struct rte_flow_action_set_ipv4)),
3800 .next = NEXT(action_set_ipv4_dst),
3803 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3804 .name = "ipv4_addr",
3805 .help = "new IPv4 destination address to set",
3806 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3807 .args = ARGS(ARGS_ENTRY_HTON
3808 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3809 .call = parse_vc_conf,
3811 [ACTION_SET_IPV6_SRC] = {
3812 .name = "set_ipv6_src",
3813 .help = "Set a new IPv6 source address in the outermost"
3815 .priv = PRIV_ACTION(SET_IPV6_SRC,
3816 sizeof(struct rte_flow_action_set_ipv6)),
3817 .next = NEXT(action_set_ipv6_src),
3820 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3821 .name = "ipv6_addr",
3822 .help = "new IPv6 source address to set",
3823 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3824 .args = ARGS(ARGS_ENTRY_HTON
3825 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3826 .call = parse_vc_conf,
3828 [ACTION_SET_IPV6_DST] = {
3829 .name = "set_ipv6_dst",
3830 .help = "Set a new IPv6 destination address in the outermost"
3832 .priv = PRIV_ACTION(SET_IPV6_DST,
3833 sizeof(struct rte_flow_action_set_ipv6)),
3834 .next = NEXT(action_set_ipv6_dst),
3837 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3838 .name = "ipv6_addr",
3839 .help = "new IPv6 destination address to set",
3840 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3841 .args = ARGS(ARGS_ENTRY_HTON
3842 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3843 .call = parse_vc_conf,
3845 [ACTION_SET_TP_SRC] = {
3846 .name = "set_tp_src",
3847 .help = "set a new source port number in the outermost"
3849 .priv = PRIV_ACTION(SET_TP_SRC,
3850 sizeof(struct rte_flow_action_set_tp)),
3851 .next = NEXT(action_set_tp_src),
3854 [ACTION_SET_TP_SRC_TP_SRC] = {
3856 .help = "new source port number to set",
3857 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3858 .args = ARGS(ARGS_ENTRY_HTON
3859 (struct rte_flow_action_set_tp, port)),
3860 .call = parse_vc_conf,
3862 [ACTION_SET_TP_DST] = {
3863 .name = "set_tp_dst",
3864 .help = "set a new destination port number in the outermost"
3866 .priv = PRIV_ACTION(SET_TP_DST,
3867 sizeof(struct rte_flow_action_set_tp)),
3868 .next = NEXT(action_set_tp_dst),
3871 [ACTION_SET_TP_DST_TP_DST] = {
3873 .help = "new destination port number to set",
3874 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3875 .args = ARGS(ARGS_ENTRY_HTON
3876 (struct rte_flow_action_set_tp, port)),
3877 .call = parse_vc_conf,
3879 [ACTION_MAC_SWAP] = {
3881 .help = "Swap the source and destination MAC addresses"
3882 " in the outermost Ethernet header",
3883 .priv = PRIV_ACTION(MAC_SWAP, 0),
3884 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3887 [ACTION_DEC_TTL] = {
3889 .help = "decrease network TTL if available",
3890 .priv = PRIV_ACTION(DEC_TTL, 0),
3891 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3894 [ACTION_SET_TTL] = {
3896 .help = "set ttl value",
3897 .priv = PRIV_ACTION(SET_TTL,
3898 sizeof(struct rte_flow_action_set_ttl)),
3899 .next = NEXT(action_set_ttl),
3902 [ACTION_SET_TTL_TTL] = {
3903 .name = "ttl_value",
3904 .help = "new ttl value to set",
3905 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3906 .args = ARGS(ARGS_ENTRY_HTON
3907 (struct rte_flow_action_set_ttl, ttl_value)),
3908 .call = parse_vc_conf,
3910 [ACTION_SET_MAC_SRC] = {
3911 .name = "set_mac_src",
3912 .help = "set source mac address",
3913 .priv = PRIV_ACTION(SET_MAC_SRC,
3914 sizeof(struct rte_flow_action_set_mac)),
3915 .next = NEXT(action_set_mac_src),
3918 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3920 .help = "new source mac address",
3921 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3922 .args = ARGS(ARGS_ENTRY_HTON
3923 (struct rte_flow_action_set_mac, mac_addr)),
3924 .call = parse_vc_conf,
3926 [ACTION_SET_MAC_DST] = {
3927 .name = "set_mac_dst",
3928 .help = "set destination mac address",
3929 .priv = PRIV_ACTION(SET_MAC_DST,
3930 sizeof(struct rte_flow_action_set_mac)),
3931 .next = NEXT(action_set_mac_dst),
3934 [ACTION_SET_MAC_DST_MAC_DST] = {
3936 .help = "new destination mac address to set",
3937 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3938 .args = ARGS(ARGS_ENTRY_HTON
3939 (struct rte_flow_action_set_mac, mac_addr)),
3940 .call = parse_vc_conf,
3942 [ACTION_INC_TCP_SEQ] = {
3943 .name = "inc_tcp_seq",
3944 .help = "increase TCP sequence number",
3945 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3946 .next = NEXT(action_inc_tcp_seq),
3949 [ACTION_INC_TCP_SEQ_VALUE] = {
3951 .help = "the value to increase TCP sequence number by",
3952 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3953 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3954 .call = parse_vc_conf,
3956 [ACTION_DEC_TCP_SEQ] = {
3957 .name = "dec_tcp_seq",
3958 .help = "decrease TCP sequence number",
3959 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3960 .next = NEXT(action_dec_tcp_seq),
3963 [ACTION_DEC_TCP_SEQ_VALUE] = {
3965 .help = "the value to decrease TCP sequence number by",
3966 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3967 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3968 .call = parse_vc_conf,
3970 [ACTION_INC_TCP_ACK] = {
3971 .name = "inc_tcp_ack",
3972 .help = "increase TCP acknowledgment number",
3973 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3974 .next = NEXT(action_inc_tcp_ack),
3977 [ACTION_INC_TCP_ACK_VALUE] = {
3979 .help = "the value to increase TCP acknowledgment number by",
3980 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3981 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3982 .call = parse_vc_conf,
3984 [ACTION_DEC_TCP_ACK] = {
3985 .name = "dec_tcp_ack",
3986 .help = "decrease TCP acknowledgment number",
3987 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3988 .next = NEXT(action_dec_tcp_ack),
3991 [ACTION_DEC_TCP_ACK_VALUE] = {
3993 .help = "the value to decrease TCP acknowledgment number by",
3994 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3995 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3996 .call = parse_vc_conf,
3998 [ACTION_RAW_ENCAP] = {
3999 .name = "raw_encap",
4000 .help = "encapsulation data, defined by set raw_encap",
4001 .priv = PRIV_ACTION(RAW_ENCAP,
4002 sizeof(struct action_raw_encap_data)),
4003 .next = NEXT(action_raw_encap),
4004 .call = parse_vc_action_raw_encap,
4006 [ACTION_RAW_ENCAP_INDEX] = {
4008 .help = "the index of raw_encap_confs",
4009 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
4011 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
4014 .help = "unsigned integer value",
4015 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4016 .call = parse_vc_action_raw_encap_index,
4017 .comp = comp_set_raw_index,
4019 [ACTION_RAW_DECAP] = {
4020 .name = "raw_decap",
4021 .help = "decapsulation data, defined by set raw_encap",
4022 .priv = PRIV_ACTION(RAW_DECAP,
4023 sizeof(struct action_raw_decap_data)),
4024 .next = NEXT(action_raw_decap),
4025 .call = parse_vc_action_raw_decap,
4027 [ACTION_RAW_DECAP_INDEX] = {
4029 .help = "the index of raw_encap_confs",
4030 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
4032 [ACTION_RAW_DECAP_INDEX_VALUE] = {
4035 .help = "unsigned integer value",
4036 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4037 .call = parse_vc_action_raw_decap_index,
4038 .comp = comp_set_raw_index,
4040 /* Top level command. */
4043 .help = "set raw encap/decap/sample data",
4044 .type = "set raw_encap|raw_decap <index> <pattern>"
4045 " or set sample_actions <index> <action>",
4046 .next = NEXT(NEXT_ENTRY
4049 SET_SAMPLE_ACTIONS)),
4050 .call = parse_set_init,
4052 /* Sub-level commands. */
4054 .name = "raw_encap",
4055 .help = "set raw encap data",
4056 .next = NEXT(next_set_raw),
4057 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4058 (offsetof(struct buffer, port),
4059 sizeof(((struct buffer *)0)->port),
4060 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
4061 .call = parse_set_raw_encap_decap,
4064 .name = "raw_decap",
4065 .help = "set raw decap data",
4066 .next = NEXT(next_set_raw),
4067 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4068 (offsetof(struct buffer, port),
4069 sizeof(((struct buffer *)0)->port),
4070 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
4071 .call = parse_set_raw_encap_decap,
4076 .help = "index of raw_encap/raw_decap data",
4077 .next = NEXT(next_item),
4080 [SET_SAMPLE_INDEX] = {
4083 .help = "index of sample actions",
4084 .next = NEXT(next_action_sample),
4087 [SET_SAMPLE_ACTIONS] = {
4088 .name = "sample_actions",
4089 .help = "set sample actions list",
4090 .next = NEXT(NEXT_ENTRY(SET_SAMPLE_INDEX)),
4091 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
4092 (offsetof(struct buffer, port),
4093 sizeof(((struct buffer *)0)->port),
4094 0, RAW_SAMPLE_CONFS_MAX_NUM - 1)),
4095 .call = parse_set_sample_action,
4097 [ACTION_SET_TAG] = {
4100 .priv = PRIV_ACTION(SET_TAG,
4101 sizeof(struct rte_flow_action_set_tag)),
4102 .next = NEXT(action_set_tag),
4105 [ACTION_SET_TAG_INDEX] = {
4107 .help = "index of tag array",
4108 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4109 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
4110 .call = parse_vc_conf,
4112 [ACTION_SET_TAG_DATA] = {
4114 .help = "tag value",
4115 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4116 .args = ARGS(ARGS_ENTRY
4117 (struct rte_flow_action_set_tag, data)),
4118 .call = parse_vc_conf,
4120 [ACTION_SET_TAG_MASK] = {
4122 .help = "mask for tag value",
4123 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
4124 .args = ARGS(ARGS_ENTRY
4125 (struct rte_flow_action_set_tag, mask)),
4126 .call = parse_vc_conf,
4128 [ACTION_SET_META] = {
4130 .help = "set metadata",
4131 .priv = PRIV_ACTION(SET_META,
4132 sizeof(struct rte_flow_action_set_meta)),
4133 .next = NEXT(action_set_meta),
4134 .call = parse_vc_action_set_meta,
4136 [ACTION_SET_META_DATA] = {
4138 .help = "metadata value",
4139 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
4140 .args = ARGS(ARGS_ENTRY
4141 (struct rte_flow_action_set_meta, data)),
4142 .call = parse_vc_conf,
4144 [ACTION_SET_META_MASK] = {
4146 .help = "mask for metadata value",
4147 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
4148 .args = ARGS(ARGS_ENTRY
4149 (struct rte_flow_action_set_meta, mask)),
4150 .call = parse_vc_conf,
4152 [ACTION_SET_IPV4_DSCP] = {
4153 .name = "set_ipv4_dscp",
4154 .help = "set DSCP value",
4155 .priv = PRIV_ACTION(SET_IPV4_DSCP,
4156 sizeof(struct rte_flow_action_set_dscp)),
4157 .next = NEXT(action_set_ipv4_dscp),
4160 [ACTION_SET_IPV4_DSCP_VALUE] = {
4161 .name = "dscp_value",
4162 .help = "new IPv4 DSCP value to set",
4163 .next = NEXT(action_set_ipv4_dscp, NEXT_ENTRY(UNSIGNED)),
4164 .args = ARGS(ARGS_ENTRY
4165 (struct rte_flow_action_set_dscp, dscp)),
4166 .call = parse_vc_conf,
4168 [ACTION_SET_IPV6_DSCP] = {
4169 .name = "set_ipv6_dscp",
4170 .help = "set DSCP value",
4171 .priv = PRIV_ACTION(SET_IPV6_DSCP,
4172 sizeof(struct rte_flow_action_set_dscp)),
4173 .next = NEXT(action_set_ipv6_dscp),
4176 [ACTION_SET_IPV6_DSCP_VALUE] = {
4177 .name = "dscp_value",
4178 .help = "new IPv6 DSCP value to set",
4179 .next = NEXT(action_set_ipv6_dscp, NEXT_ENTRY(UNSIGNED)),
4180 .args = ARGS(ARGS_ENTRY
4181 (struct rte_flow_action_set_dscp, dscp)),
4182 .call = parse_vc_conf,
4186 .help = "set a specific metadata header",
4187 .next = NEXT(action_age),
4188 .priv = PRIV_ACTION(AGE,
4189 sizeof(struct rte_flow_action_age)),
4192 [ACTION_AGE_TIMEOUT] = {
4194 .help = "flow age timeout value",
4195 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_age,
4197 .next = NEXT(action_age, NEXT_ENTRY(UNSIGNED)),
4198 .call = parse_vc_conf,
4202 .help = "set a sample action",
4203 .next = NEXT(action_sample),
4204 .priv = PRIV_ACTION(SAMPLE,
4205 sizeof(struct action_sample_data)),
4206 .call = parse_vc_action_sample,
4208 [ACTION_SAMPLE_RATIO] = {
4210 .help = "flow sample ratio value",
4211 .next = NEXT(action_sample, NEXT_ENTRY(UNSIGNED)),
4212 .args = ARGS(ARGS_ENTRY_ARB
4213 (offsetof(struct action_sample_data, conf) +
4214 offsetof(struct rte_flow_action_sample, ratio),
4215 sizeof(((struct rte_flow_action_sample *)0)->
4218 [ACTION_SAMPLE_INDEX] = {
4220 .help = "the index of sample actions list",
4221 .next = NEXT(NEXT_ENTRY(ACTION_SAMPLE_INDEX_VALUE)),
4223 [ACTION_SAMPLE_INDEX_VALUE] = {
4226 .help = "unsigned integer value",
4227 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4228 .call = parse_vc_action_sample_index,
4229 .comp = comp_set_sample_index,
4231 /* Shared action destroy arguments. */
4232 [SHARED_ACTION_DESTROY_ID] = {
4233 .name = "action_id",
4234 .help = "specify a shared action id to destroy",
4235 .next = NEXT(next_sa_destroy_attr,
4236 NEXT_ENTRY(SHARED_ACTION_ID)),
4237 .args = ARGS(ARGS_ENTRY_PTR(struct buffer,
4238 args.sa_destroy.action_id)),
4239 .call = parse_sa_destroy,
4241 /* Shared action create arguments. */
4242 [SHARED_ACTION_CREATE_ID] = {
4243 .name = "action_id",
4244 .help = "specify a shared action id to create",
4245 .next = NEXT(next_sa_create_attr,
4246 NEXT_ENTRY(SHARED_ACTION_ID)),
4247 .args = ARGS(ARGS_ENTRY(struct buffer, args.vc.attr.group)),
4251 .help = "apply shared action by id",
4252 .priv = PRIV_ACTION(SHARED, 0),
4253 .next = NEXT(NEXT_ENTRY(SHARED_ACTION_ID2PTR)),
4254 .args = ARGS(ARGS_ENTRY_ARB(0, sizeof(uint32_t))),
4257 [SHARED_ACTION_ID2PTR] = {
4258 .name = "{action_id}",
4259 .type = "SHARED_ACTION_ID",
4260 .help = "shared action id",
4261 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
4262 .call = parse_sa_id2ptr,
4265 [SHARED_ACTION_INGRESS] = {
4267 .help = "affect rule to ingress",
4268 .next = NEXT(next_sa_create_attr),
4271 [SHARED_ACTION_EGRESS] = {
4273 .help = "affect rule to egress",
4274 .next = NEXT(next_sa_create_attr),
4277 [SHARED_ACTION_TRANSFER] = {
4279 .help = "affect rule to transfer",
4280 .next = NEXT(next_sa_create_attr),
4283 [SHARED_ACTION_SPEC] = {
4285 .help = "specify action to share",
4286 .next = NEXT(next_action),
4290 /** Remove and return last entry from argument stack. */
4291 static const struct arg *
4292 pop_args(struct context *ctx)
4294 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
4297 /** Add entry on top of the argument stack. */
4299 push_args(struct context *ctx, const struct arg *arg)
4301 if (ctx->args_num == CTX_STACK_SIZE)
4303 ctx->args[ctx->args_num++] = arg;
4307 /** Spread value into buffer according to bit-mask. */
4309 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
4311 uint32_t i = arg->size;
4319 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4328 unsigned int shift = 0;
4329 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
4331 for (shift = 0; arg->mask[i] >> shift; ++shift) {
4332 if (!(arg->mask[i] & (1 << shift)))
4337 *buf &= ~(1 << shift);
4338 *buf |= (val & 1) << shift;
4346 /** Compare a string with a partial one of a given length. */
4348 strcmp_partial(const char *full, const char *partial, size_t partial_len)
4350 int r = strncmp(full, partial, partial_len);
4354 if (strlen(full) <= partial_len)
4356 return full[partial_len];
4360 * Parse a prefix length and generate a bit-mask.
4362 * Last argument (ctx->args) is retrieved to determine mask size, storage
4363 * location and whether the result must use network byte ordering.
4366 parse_prefix(struct context *ctx, const struct token *token,
4367 const char *str, unsigned int len,
4368 void *buf, unsigned int size)
4370 const struct arg *arg = pop_args(ctx);
4371 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
4378 /* Argument is expected. */
4382 u = strtoumax(str, &end, 0);
4383 if (errno || (size_t)(end - str) != len)
4388 extra = arg_entry_bf_fill(NULL, 0, arg);
4397 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
4398 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4405 if (bytes > size || bytes + !!extra > size)
4409 buf = (uint8_t *)ctx->object + arg->offset;
4410 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4412 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
4413 memset(buf, 0x00, size - bytes);
4415 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
4419 memset(buf, 0xff, bytes);
4420 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
4422 ((uint8_t *)buf)[bytes] = conv[extra];
4425 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4428 push_args(ctx, arg);
4432 /** Default parsing function for token name matching. */
4434 parse_default(struct context *ctx, const struct token *token,
4435 const char *str, unsigned int len,
4436 void *buf, unsigned int size)
4441 if (strcmp_partial(token->name, str, len))
4446 /** Parse flow command, initialize output buffer for subsequent tokens. */
4448 parse_init(struct context *ctx, const struct token *token,
4449 const char *str, unsigned int len,
4450 void *buf, unsigned int size)
4452 struct buffer *out = buf;
4454 /* Token name must match. */
4455 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4457 /* Nothing else to do if there is no buffer. */
4460 /* Make sure buffer is large enough. */
4461 if (size < sizeof(*out))
4463 /* Initialize buffer. */
4464 memset(out, 0x00, sizeof(*out));
4465 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
4468 ctx->objmask = NULL;
4472 /** Parse tokens for shared action commands. */
4474 parse_sa(struct context *ctx, const struct token *token,
4475 const char *str, unsigned int len,
4476 void *buf, unsigned int size)
4478 struct buffer *out = buf;
4480 /* Token name must match. */
4481 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4483 /* Nothing else to do if there is no buffer. */
4486 if (!out->command) {
4487 if (ctx->curr != SHARED_ACTION)
4489 if (sizeof(*out) > size)
4491 out->command = ctx->curr;
4494 ctx->objmask = NULL;
4495 out->args.vc.data = (uint8_t *)out + size;
4498 switch (ctx->curr) {
4499 case SHARED_ACTION_CREATE:
4500 case SHARED_ACTION_UPDATE:
4501 out->args.vc.actions =
4502 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4504 out->args.vc.attr.group = UINT32_MAX;
4506 case SHARED_ACTION_QUERY:
4507 out->command = ctx->curr;
4510 ctx->objmask = NULL;
4512 case SHARED_ACTION_EGRESS:
4513 out->args.vc.attr.egress = 1;
4515 case SHARED_ACTION_INGRESS:
4516 out->args.vc.attr.ingress = 1;
4518 case SHARED_ACTION_TRANSFER:
4519 out->args.vc.attr.transfer = 1;
4527 /** Parse tokens for shared action destroy command. */
4529 parse_sa_destroy(struct context *ctx, const struct token *token,
4530 const char *str, unsigned int len,
4531 void *buf, unsigned int size)
4533 struct buffer *out = buf;
4534 uint32_t *action_id;
4536 /* Token name must match. */
4537 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4539 /* Nothing else to do if there is no buffer. */
4542 if (!out->command || out->command == SHARED_ACTION) {
4543 if (ctx->curr != SHARED_ACTION_DESTROY)
4545 if (sizeof(*out) > size)
4547 out->command = ctx->curr;
4550 ctx->objmask = NULL;
4551 out->args.sa_destroy.action_id =
4552 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4556 action_id = out->args.sa_destroy.action_id
4557 + out->args.sa_destroy.action_id_n++;
4558 if ((uint8_t *)action_id > (uint8_t *)out + size)
4561 ctx->object = action_id;
4562 ctx->objmask = NULL;
4566 /** Parse tokens for validate/create commands. */
4568 parse_vc(struct context *ctx, const struct token *token,
4569 const char *str, unsigned int len,
4570 void *buf, unsigned int size)
4572 struct buffer *out = buf;
4576 /* Token name must match. */
4577 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4579 /* Nothing else to do if there is no buffer. */
4582 if (!out->command) {
4583 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
4585 if (sizeof(*out) > size)
4587 out->command = ctx->curr;
4590 ctx->objmask = NULL;
4591 out->args.vc.data = (uint8_t *)out + size;
4595 switch (ctx->curr) {
4597 ctx->object = &out->args.vc.attr;
4601 ctx->object = &out->args.vc.tunnel_ops;
4604 ctx->objmask = NULL;
4605 switch (ctx->curr) {
4610 out->args.vc.tunnel_ops.enabled = 1;
4611 out->args.vc.tunnel_ops.actions = 1;
4614 out->args.vc.tunnel_ops.enabled = 1;
4615 out->args.vc.tunnel_ops.items = 1;
4618 out->args.vc.attr.ingress = 1;
4621 out->args.vc.attr.egress = 1;
4624 out->args.vc.attr.transfer = 1;
4627 out->args.vc.pattern =
4628 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4630 ctx->object = out->args.vc.pattern;
4631 ctx->objmask = NULL;
4634 out->args.vc.actions =
4635 (void *)RTE_ALIGN_CEIL((uintptr_t)
4636 (out->args.vc.pattern +
4637 out->args.vc.pattern_n),
4639 ctx->object = out->args.vc.actions;
4640 ctx->objmask = NULL;
4647 if (!out->args.vc.actions) {
4648 const struct parse_item_priv *priv = token->priv;
4649 struct rte_flow_item *item =
4650 out->args.vc.pattern + out->args.vc.pattern_n;
4652 data_size = priv->size * 3; /* spec, last, mask */
4653 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
4654 (out->args.vc.data - data_size),
4656 if ((uint8_t *)item + sizeof(*item) > data)
4658 *item = (struct rte_flow_item){
4661 ++out->args.vc.pattern_n;
4663 ctx->objmask = NULL;
4665 const struct parse_action_priv *priv = token->priv;
4666 struct rte_flow_action *action =
4667 out->args.vc.actions + out->args.vc.actions_n;
4669 data_size = priv->size; /* configuration */
4670 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
4671 (out->args.vc.data - data_size),
4673 if ((uint8_t *)action + sizeof(*action) > data)
4675 *action = (struct rte_flow_action){
4677 .conf = data_size ? data : NULL,
4679 ++out->args.vc.actions_n;
4680 ctx->object = action;
4681 ctx->objmask = NULL;
4683 memset(data, 0, data_size);
4684 out->args.vc.data = data;
4685 ctx->objdata = data_size;
4689 /** Parse pattern item parameter type. */
4691 parse_vc_spec(struct context *ctx, const struct token *token,
4692 const char *str, unsigned int len,
4693 void *buf, unsigned int size)
4695 struct buffer *out = buf;
4696 struct rte_flow_item *item;
4702 /* Token name must match. */
4703 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4705 /* Parse parameter types. */
4706 switch (ctx->curr) {
4707 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
4713 case ITEM_PARAM_SPEC:
4716 case ITEM_PARAM_LAST:
4719 case ITEM_PARAM_PREFIX:
4720 /* Modify next token to expect a prefix. */
4721 if (ctx->next_num < 2)
4723 ctx->next[ctx->next_num - 2] = prefix;
4725 case ITEM_PARAM_MASK:
4731 /* Nothing else to do if there is no buffer. */
4734 if (!out->args.vc.pattern_n)
4736 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4737 data_size = ctx->objdata / 3; /* spec, last, mask */
4738 /* Point to selected object. */
4739 ctx->object = out->args.vc.data + (data_size * index);
4741 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
4742 item->mask = ctx->objmask;
4744 ctx->objmask = NULL;
4745 /* Update relevant item pointer. */
4746 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
4751 /** Parse action configuration field. */
4753 parse_vc_conf(struct context *ctx, const struct token *token,
4754 const char *str, unsigned int len,
4755 void *buf, unsigned int size)
4757 struct buffer *out = buf;
4760 /* Token name must match. */
4761 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4763 /* Nothing else to do if there is no buffer. */
4766 /* Point to selected object. */
4767 ctx->object = out->args.vc.data;
4768 ctx->objmask = NULL;
4772 /** Parse eCPRI common header type field. */
4774 parse_vc_item_ecpri_type(struct context *ctx, const struct token *token,
4775 const char *str, unsigned int len,
4776 void *buf, unsigned int size)
4778 struct rte_flow_item_ecpri *ecpri;
4779 struct rte_flow_item_ecpri *ecpri_mask;
4780 struct rte_flow_item *item;
4783 struct buffer *out = buf;
4784 const struct arg *arg;
4787 /* Token name must match. */
4788 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4790 switch (ctx->curr) {
4791 case ITEM_ECPRI_COMMON_TYPE_IQ_DATA:
4792 msg_type = RTE_ECPRI_MSG_TYPE_IQ_DATA;
4794 case ITEM_ECPRI_COMMON_TYPE_RTC_CTRL:
4795 msg_type = RTE_ECPRI_MSG_TYPE_RTC_CTRL;
4797 case ITEM_ECPRI_COMMON_TYPE_DLY_MSR:
4798 msg_type = RTE_ECPRI_MSG_TYPE_DLY_MSR;
4805 arg = pop_args(ctx);
4808 ecpri = (struct rte_flow_item_ecpri *)out->args.vc.data;
4809 ecpri->hdr.common.type = msg_type;
4810 data_size = ctx->objdata / 3; /* spec, last, mask */
4811 ecpri_mask = (struct rte_flow_item_ecpri *)(out->args.vc.data +
4813 ecpri_mask->hdr.common.type = 0xFF;
4815 ecpri->hdr.common.u32 = rte_cpu_to_be_32(ecpri->hdr.common.u32);
4816 ecpri_mask->hdr.common.u32 =
4817 rte_cpu_to_be_32(ecpri_mask->hdr.common.u32);
4819 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4821 item->mask = ecpri_mask;
4825 /** Parse RSS action. */
4827 parse_vc_action_rss(struct context *ctx, const struct token *token,
4828 const char *str, unsigned int len,
4829 void *buf, unsigned int size)
4831 struct buffer *out = buf;
4832 struct rte_flow_action *action;
4833 struct action_rss_data *action_rss_data;
4837 ret = parse_vc(ctx, token, str, len, buf, size);
4840 /* Nothing else to do if there is no buffer. */
4843 if (!out->args.vc.actions_n)
4845 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4846 /* Point to selected object. */
4847 ctx->object = out->args.vc.data;
4848 ctx->objmask = NULL;
4849 /* Set up default configuration. */
4850 action_rss_data = ctx->object;
4851 *action_rss_data = (struct action_rss_data){
4852 .conf = (struct rte_flow_action_rss){
4853 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4857 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
4859 .queue = action_rss_data->queue,
4863 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
4864 action_rss_data->queue[i] = i;
4865 action->conf = &action_rss_data->conf;
4870 * Parse func field for RSS action.
4872 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
4873 * ACTION_RSS_FUNC_* index that called this function.
4876 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
4877 const char *str, unsigned int len,
4878 void *buf, unsigned int size)
4880 struct action_rss_data *action_rss_data;
4881 enum rte_eth_hash_function func;
4885 /* Token name must match. */
4886 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4888 switch (ctx->curr) {
4889 case ACTION_RSS_FUNC_DEFAULT:
4890 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
4892 case ACTION_RSS_FUNC_TOEPLITZ:
4893 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
4895 case ACTION_RSS_FUNC_SIMPLE_XOR:
4896 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
4898 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
4899 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
4906 action_rss_data = ctx->object;
4907 action_rss_data->conf.func = func;
4912 * Parse type field for RSS action.
4914 * Valid tokens are type field names and the "end" token.
4917 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
4918 const char *str, unsigned int len,
4919 void *buf, unsigned int size)
4921 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
4922 struct action_rss_data *action_rss_data;
4928 if (ctx->curr != ACTION_RSS_TYPE)
4930 if (!(ctx->objdata >> 16) && ctx->object) {
4931 action_rss_data = ctx->object;
4932 action_rss_data->conf.types = 0;
4934 if (!strcmp_partial("end", str, len)) {
4935 ctx->objdata &= 0xffff;
4938 for (i = 0; rss_type_table[i].str; ++i)
4939 if (!strcmp_partial(rss_type_table[i].str, str, len))
4941 if (!rss_type_table[i].str)
4943 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
4945 if (ctx->next_num == RTE_DIM(ctx->next))
4947 ctx->next[ctx->next_num++] = next;
4950 action_rss_data = ctx->object;
4951 action_rss_data->conf.types |= rss_type_table[i].rss_type;
4956 * Parse queue field for RSS action.
4958 * Valid tokens are queue indices and the "end" token.
4961 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
4962 const char *str, unsigned int len,
4963 void *buf, unsigned int size)
4965 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
4966 struct action_rss_data *action_rss_data;
4967 const struct arg *arg;
4974 if (ctx->curr != ACTION_RSS_QUEUE)
4976 i = ctx->objdata >> 16;
4977 if (!strcmp_partial("end", str, len)) {
4978 ctx->objdata &= 0xffff;
4981 if (i >= ACTION_RSS_QUEUE_NUM)
4983 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4984 i * sizeof(action_rss_data->queue[i]),
4985 sizeof(action_rss_data->queue[i]));
4986 if (push_args(ctx, arg))
4988 ret = parse_int(ctx, token, str, len, NULL, 0);
4994 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
4996 if (ctx->next_num == RTE_DIM(ctx->next))
4998 ctx->next[ctx->next_num++] = next;
5002 action_rss_data = ctx->object;
5003 action_rss_data->conf.queue_num = i;
5004 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
5008 /** Parse VXLAN encap action. */
5010 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
5011 const char *str, unsigned int len,
5012 void *buf, unsigned int size)
5014 struct buffer *out = buf;
5015 struct rte_flow_action *action;
5016 struct action_vxlan_encap_data *action_vxlan_encap_data;
5019 ret = parse_vc(ctx, token, str, len, buf, size);
5022 /* Nothing else to do if there is no buffer. */
5025 if (!out->args.vc.actions_n)
5027 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5028 /* Point to selected object. */
5029 ctx->object = out->args.vc.data;
5030 ctx->objmask = NULL;
5031 /* Set up default configuration. */
5032 action_vxlan_encap_data = ctx->object;
5033 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
5034 .conf = (struct rte_flow_action_vxlan_encap){
5035 .definition = action_vxlan_encap_data->items,
5039 .type = RTE_FLOW_ITEM_TYPE_ETH,
5040 .spec = &action_vxlan_encap_data->item_eth,
5041 .mask = &rte_flow_item_eth_mask,
5044 .type = RTE_FLOW_ITEM_TYPE_VLAN,
5045 .spec = &action_vxlan_encap_data->item_vlan,
5046 .mask = &rte_flow_item_vlan_mask,
5049 .type = RTE_FLOW_ITEM_TYPE_IPV4,
5050 .spec = &action_vxlan_encap_data->item_ipv4,
5051 .mask = &rte_flow_item_ipv4_mask,
5054 .type = RTE_FLOW_ITEM_TYPE_UDP,
5055 .spec = &action_vxlan_encap_data->item_udp,
5056 .mask = &rte_flow_item_udp_mask,
5059 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
5060 .spec = &action_vxlan_encap_data->item_vxlan,
5061 .mask = &rte_flow_item_vxlan_mask,
5064 .type = RTE_FLOW_ITEM_TYPE_END,
5069 .tci = vxlan_encap_conf.vlan_tci,
5073 .src_addr = vxlan_encap_conf.ipv4_src,
5074 .dst_addr = vxlan_encap_conf.ipv4_dst,
5077 .src_port = vxlan_encap_conf.udp_src,
5078 .dst_port = vxlan_encap_conf.udp_dst,
5080 .item_vxlan.flags = 0,
5082 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
5083 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5084 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
5085 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5086 if (!vxlan_encap_conf.select_ipv4) {
5087 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
5088 &vxlan_encap_conf.ipv6_src,
5089 sizeof(vxlan_encap_conf.ipv6_src));
5090 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
5091 &vxlan_encap_conf.ipv6_dst,
5092 sizeof(vxlan_encap_conf.ipv6_dst));
5093 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
5094 .type = RTE_FLOW_ITEM_TYPE_IPV6,
5095 .spec = &action_vxlan_encap_data->item_ipv6,
5096 .mask = &rte_flow_item_ipv6_mask,
5099 if (!vxlan_encap_conf.select_vlan)
5100 action_vxlan_encap_data->items[1].type =
5101 RTE_FLOW_ITEM_TYPE_VOID;
5102 if (vxlan_encap_conf.select_tos_ttl) {
5103 if (vxlan_encap_conf.select_ipv4) {
5104 static struct rte_flow_item_ipv4 ipv4_mask_tos;
5106 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
5107 sizeof(ipv4_mask_tos));
5108 ipv4_mask_tos.hdr.type_of_service = 0xff;
5109 ipv4_mask_tos.hdr.time_to_live = 0xff;
5110 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
5111 vxlan_encap_conf.ip_tos;
5112 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
5113 vxlan_encap_conf.ip_ttl;
5114 action_vxlan_encap_data->items[2].mask =
5117 static struct rte_flow_item_ipv6 ipv6_mask_tos;
5119 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
5120 sizeof(ipv6_mask_tos));
5121 ipv6_mask_tos.hdr.vtc_flow |=
5122 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
5123 ipv6_mask_tos.hdr.hop_limits = 0xff;
5124 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
5126 ((uint32_t)vxlan_encap_conf.ip_tos <<
5127 RTE_IPV6_HDR_TC_SHIFT);
5128 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
5129 vxlan_encap_conf.ip_ttl;
5130 action_vxlan_encap_data->items[2].mask =
5134 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
5135 RTE_DIM(vxlan_encap_conf.vni));
5136 action->conf = &action_vxlan_encap_data->conf;
5140 /** Parse NVGRE encap action. */
5142 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
5143 const char *str, unsigned int len,
5144 void *buf, unsigned int size)
5146 struct buffer *out = buf;
5147 struct rte_flow_action *action;
5148 struct action_nvgre_encap_data *action_nvgre_encap_data;
5151 ret = parse_vc(ctx, token, str, len, buf, size);
5154 /* Nothing else to do if there is no buffer. */
5157 if (!out->args.vc.actions_n)
5159 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5160 /* Point to selected object. */
5161 ctx->object = out->args.vc.data;
5162 ctx->objmask = NULL;
5163 /* Set up default configuration. */
5164 action_nvgre_encap_data = ctx->object;
5165 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
5166 .conf = (struct rte_flow_action_nvgre_encap){
5167 .definition = action_nvgre_encap_data->items,
5171 .type = RTE_FLOW_ITEM_TYPE_ETH,
5172 .spec = &action_nvgre_encap_data->item_eth,
5173 .mask = &rte_flow_item_eth_mask,
5176 .type = RTE_FLOW_ITEM_TYPE_VLAN,
5177 .spec = &action_nvgre_encap_data->item_vlan,
5178 .mask = &rte_flow_item_vlan_mask,
5181 .type = RTE_FLOW_ITEM_TYPE_IPV4,
5182 .spec = &action_nvgre_encap_data->item_ipv4,
5183 .mask = &rte_flow_item_ipv4_mask,
5186 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
5187 .spec = &action_nvgre_encap_data->item_nvgre,
5188 .mask = &rte_flow_item_nvgre_mask,
5191 .type = RTE_FLOW_ITEM_TYPE_END,
5196 .tci = nvgre_encap_conf.vlan_tci,
5200 .src_addr = nvgre_encap_conf.ipv4_src,
5201 .dst_addr = nvgre_encap_conf.ipv4_dst,
5203 .item_nvgre.flow_id = 0,
5205 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
5206 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5207 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
5208 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5209 if (!nvgre_encap_conf.select_ipv4) {
5210 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
5211 &nvgre_encap_conf.ipv6_src,
5212 sizeof(nvgre_encap_conf.ipv6_src));
5213 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
5214 &nvgre_encap_conf.ipv6_dst,
5215 sizeof(nvgre_encap_conf.ipv6_dst));
5216 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
5217 .type = RTE_FLOW_ITEM_TYPE_IPV6,
5218 .spec = &action_nvgre_encap_data->item_ipv6,
5219 .mask = &rte_flow_item_ipv6_mask,
5222 if (!nvgre_encap_conf.select_vlan)
5223 action_nvgre_encap_data->items[1].type =
5224 RTE_FLOW_ITEM_TYPE_VOID;
5225 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
5226 RTE_DIM(nvgre_encap_conf.tni));
5227 action->conf = &action_nvgre_encap_data->conf;
5231 /** Parse l2 encap action. */
5233 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
5234 const char *str, unsigned int len,
5235 void *buf, unsigned int size)
5237 struct buffer *out = buf;
5238 struct rte_flow_action *action;
5239 struct action_raw_encap_data *action_encap_data;
5240 struct rte_flow_item_eth eth = { .type = 0, };
5241 struct rte_flow_item_vlan vlan = {
5242 .tci = mplsoudp_encap_conf.vlan_tci,
5248 ret = parse_vc(ctx, token, str, len, buf, size);
5251 /* Nothing else to do if there is no buffer. */
5254 if (!out->args.vc.actions_n)
5256 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5257 /* Point to selected object. */
5258 ctx->object = out->args.vc.data;
5259 ctx->objmask = NULL;
5260 /* Copy the headers to the buffer. */
5261 action_encap_data = ctx->object;
5262 *action_encap_data = (struct action_raw_encap_data) {
5263 .conf = (struct rte_flow_action_raw_encap){
5264 .data = action_encap_data->data,
5268 header = action_encap_data->data;
5269 if (l2_encap_conf.select_vlan)
5270 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5271 else if (l2_encap_conf.select_ipv4)
5272 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5274 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5275 memcpy(eth.dst.addr_bytes,
5276 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5277 memcpy(eth.src.addr_bytes,
5278 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5279 memcpy(header, ð, sizeof(eth));
5280 header += sizeof(eth);
5281 if (l2_encap_conf.select_vlan) {
5282 if (l2_encap_conf.select_ipv4)
5283 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5285 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5286 memcpy(header, &vlan, sizeof(vlan));
5287 header += sizeof(vlan);
5289 action_encap_data->conf.size = header -
5290 action_encap_data->data;
5291 action->conf = &action_encap_data->conf;
5295 /** Parse l2 decap action. */
5297 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
5298 const char *str, unsigned int len,
5299 void *buf, unsigned int size)
5301 struct buffer *out = buf;
5302 struct rte_flow_action *action;
5303 struct action_raw_decap_data *action_decap_data;
5304 struct rte_flow_item_eth eth = { .type = 0, };
5305 struct rte_flow_item_vlan vlan = {
5306 .tci = mplsoudp_encap_conf.vlan_tci,
5312 ret = parse_vc(ctx, token, str, len, buf, size);
5315 /* Nothing else to do if there is no buffer. */
5318 if (!out->args.vc.actions_n)
5320 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5321 /* Point to selected object. */
5322 ctx->object = out->args.vc.data;
5323 ctx->objmask = NULL;
5324 /* Copy the headers to the buffer. */
5325 action_decap_data = ctx->object;
5326 *action_decap_data = (struct action_raw_decap_data) {
5327 .conf = (struct rte_flow_action_raw_decap){
5328 .data = action_decap_data->data,
5332 header = action_decap_data->data;
5333 if (l2_decap_conf.select_vlan)
5334 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5335 memcpy(header, ð, sizeof(eth));
5336 header += sizeof(eth);
5337 if (l2_decap_conf.select_vlan) {
5338 memcpy(header, &vlan, sizeof(vlan));
5339 header += sizeof(vlan);
5341 action_decap_data->conf.size = header -
5342 action_decap_data->data;
5343 action->conf = &action_decap_data->conf;
5347 #define ETHER_TYPE_MPLS_UNICAST 0x8847
5349 /** Parse MPLSOGRE encap action. */
5351 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
5352 const char *str, unsigned int len,
5353 void *buf, unsigned int size)
5355 struct buffer *out = buf;
5356 struct rte_flow_action *action;
5357 struct action_raw_encap_data *action_encap_data;
5358 struct rte_flow_item_eth eth = { .type = 0, };
5359 struct rte_flow_item_vlan vlan = {
5360 .tci = mplsogre_encap_conf.vlan_tci,
5363 struct rte_flow_item_ipv4 ipv4 = {
5365 .src_addr = mplsogre_encap_conf.ipv4_src,
5366 .dst_addr = mplsogre_encap_conf.ipv4_dst,
5367 .next_proto_id = IPPROTO_GRE,
5368 .version_ihl = RTE_IPV4_VHL_DEF,
5369 .time_to_live = IPDEFTTL,
5372 struct rte_flow_item_ipv6 ipv6 = {
5374 .proto = IPPROTO_GRE,
5375 .hop_limits = IPDEFTTL,
5378 struct rte_flow_item_gre gre = {
5379 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
5381 struct rte_flow_item_mpls mpls = {
5387 ret = parse_vc(ctx, token, str, len, buf, size);
5390 /* Nothing else to do if there is no buffer. */
5393 if (!out->args.vc.actions_n)
5395 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5396 /* Point to selected object. */
5397 ctx->object = out->args.vc.data;
5398 ctx->objmask = NULL;
5399 /* Copy the headers to the buffer. */
5400 action_encap_data = ctx->object;
5401 *action_encap_data = (struct action_raw_encap_data) {
5402 .conf = (struct rte_flow_action_raw_encap){
5403 .data = action_encap_data->data,
5408 header = action_encap_data->data;
5409 if (mplsogre_encap_conf.select_vlan)
5410 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5411 else if (mplsogre_encap_conf.select_ipv4)
5412 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5414 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5415 memcpy(eth.dst.addr_bytes,
5416 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5417 memcpy(eth.src.addr_bytes,
5418 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5419 memcpy(header, ð, sizeof(eth));
5420 header += sizeof(eth);
5421 if (mplsogre_encap_conf.select_vlan) {
5422 if (mplsogre_encap_conf.select_ipv4)
5423 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5425 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5426 memcpy(header, &vlan, sizeof(vlan));
5427 header += sizeof(vlan);
5429 if (mplsogre_encap_conf.select_ipv4) {
5430 memcpy(header, &ipv4, sizeof(ipv4));
5431 header += sizeof(ipv4);
5433 memcpy(&ipv6.hdr.src_addr,
5434 &mplsogre_encap_conf.ipv6_src,
5435 sizeof(mplsogre_encap_conf.ipv6_src));
5436 memcpy(&ipv6.hdr.dst_addr,
5437 &mplsogre_encap_conf.ipv6_dst,
5438 sizeof(mplsogre_encap_conf.ipv6_dst));
5439 memcpy(header, &ipv6, sizeof(ipv6));
5440 header += sizeof(ipv6);
5442 memcpy(header, &gre, sizeof(gre));
5443 header += sizeof(gre);
5444 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
5445 RTE_DIM(mplsogre_encap_conf.label));
5446 mpls.label_tc_s[2] |= 0x1;
5447 memcpy(header, &mpls, sizeof(mpls));
5448 header += sizeof(mpls);
5449 action_encap_data->conf.size = header -
5450 action_encap_data->data;
5451 action->conf = &action_encap_data->conf;
5455 /** Parse MPLSOGRE decap action. */
5457 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
5458 const char *str, unsigned int len,
5459 void *buf, unsigned int size)
5461 struct buffer *out = buf;
5462 struct rte_flow_action *action;
5463 struct action_raw_decap_data *action_decap_data;
5464 struct rte_flow_item_eth eth = { .type = 0, };
5465 struct rte_flow_item_vlan vlan = {.tci = 0};
5466 struct rte_flow_item_ipv4 ipv4 = {
5468 .next_proto_id = IPPROTO_GRE,
5471 struct rte_flow_item_ipv6 ipv6 = {
5473 .proto = IPPROTO_GRE,
5476 struct rte_flow_item_gre gre = {
5477 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
5479 struct rte_flow_item_mpls mpls;
5483 ret = parse_vc(ctx, token, str, len, buf, size);
5486 /* Nothing else to do if there is no buffer. */
5489 if (!out->args.vc.actions_n)
5491 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5492 /* Point to selected object. */
5493 ctx->object = out->args.vc.data;
5494 ctx->objmask = NULL;
5495 /* Copy the headers to the buffer. */
5496 action_decap_data = ctx->object;
5497 *action_decap_data = (struct action_raw_decap_data) {
5498 .conf = (struct rte_flow_action_raw_decap){
5499 .data = action_decap_data->data,
5503 header = action_decap_data->data;
5504 if (mplsogre_decap_conf.select_vlan)
5505 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5506 else if (mplsogre_encap_conf.select_ipv4)
5507 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5509 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5510 memcpy(eth.dst.addr_bytes,
5511 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5512 memcpy(eth.src.addr_bytes,
5513 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5514 memcpy(header, ð, sizeof(eth));
5515 header += sizeof(eth);
5516 if (mplsogre_encap_conf.select_vlan) {
5517 if (mplsogre_encap_conf.select_ipv4)
5518 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5520 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5521 memcpy(header, &vlan, sizeof(vlan));
5522 header += sizeof(vlan);
5524 if (mplsogre_encap_conf.select_ipv4) {
5525 memcpy(header, &ipv4, sizeof(ipv4));
5526 header += sizeof(ipv4);
5528 memcpy(header, &ipv6, sizeof(ipv6));
5529 header += sizeof(ipv6);
5531 memcpy(header, &gre, sizeof(gre));
5532 header += sizeof(gre);
5533 memset(&mpls, 0, sizeof(mpls));
5534 memcpy(header, &mpls, sizeof(mpls));
5535 header += sizeof(mpls);
5536 action_decap_data->conf.size = header -
5537 action_decap_data->data;
5538 action->conf = &action_decap_data->conf;
5542 /** Parse MPLSOUDP encap action. */
5544 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
5545 const char *str, unsigned int len,
5546 void *buf, unsigned int size)
5548 struct buffer *out = buf;
5549 struct rte_flow_action *action;
5550 struct action_raw_encap_data *action_encap_data;
5551 struct rte_flow_item_eth eth = { .type = 0, };
5552 struct rte_flow_item_vlan vlan = {
5553 .tci = mplsoudp_encap_conf.vlan_tci,
5556 struct rte_flow_item_ipv4 ipv4 = {
5558 .src_addr = mplsoudp_encap_conf.ipv4_src,
5559 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
5560 .next_proto_id = IPPROTO_UDP,
5561 .version_ihl = RTE_IPV4_VHL_DEF,
5562 .time_to_live = IPDEFTTL,
5565 struct rte_flow_item_ipv6 ipv6 = {
5567 .proto = IPPROTO_UDP,
5568 .hop_limits = IPDEFTTL,
5571 struct rte_flow_item_udp udp = {
5573 .src_port = mplsoudp_encap_conf.udp_src,
5574 .dst_port = mplsoudp_encap_conf.udp_dst,
5577 struct rte_flow_item_mpls mpls;
5581 ret = parse_vc(ctx, token, str, len, buf, size);
5584 /* Nothing else to do if there is no buffer. */
5587 if (!out->args.vc.actions_n)
5589 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5590 /* Point to selected object. */
5591 ctx->object = out->args.vc.data;
5592 ctx->objmask = NULL;
5593 /* Copy the headers to the buffer. */
5594 action_encap_data = ctx->object;
5595 *action_encap_data = (struct action_raw_encap_data) {
5596 .conf = (struct rte_flow_action_raw_encap){
5597 .data = action_encap_data->data,
5602 header = action_encap_data->data;
5603 if (mplsoudp_encap_conf.select_vlan)
5604 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5605 else if (mplsoudp_encap_conf.select_ipv4)
5606 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5608 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5609 memcpy(eth.dst.addr_bytes,
5610 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5611 memcpy(eth.src.addr_bytes,
5612 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5613 memcpy(header, ð, sizeof(eth));
5614 header += sizeof(eth);
5615 if (mplsoudp_encap_conf.select_vlan) {
5616 if (mplsoudp_encap_conf.select_ipv4)
5617 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5619 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5620 memcpy(header, &vlan, sizeof(vlan));
5621 header += sizeof(vlan);
5623 if (mplsoudp_encap_conf.select_ipv4) {
5624 memcpy(header, &ipv4, sizeof(ipv4));
5625 header += sizeof(ipv4);
5627 memcpy(&ipv6.hdr.src_addr,
5628 &mplsoudp_encap_conf.ipv6_src,
5629 sizeof(mplsoudp_encap_conf.ipv6_src));
5630 memcpy(&ipv6.hdr.dst_addr,
5631 &mplsoudp_encap_conf.ipv6_dst,
5632 sizeof(mplsoudp_encap_conf.ipv6_dst));
5633 memcpy(header, &ipv6, sizeof(ipv6));
5634 header += sizeof(ipv6);
5636 memcpy(header, &udp, sizeof(udp));
5637 header += sizeof(udp);
5638 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
5639 RTE_DIM(mplsoudp_encap_conf.label));
5640 mpls.label_tc_s[2] |= 0x1;
5641 memcpy(header, &mpls, sizeof(mpls));
5642 header += sizeof(mpls);
5643 action_encap_data->conf.size = header -
5644 action_encap_data->data;
5645 action->conf = &action_encap_data->conf;
5649 /** Parse MPLSOUDP decap action. */
5651 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
5652 const char *str, unsigned int len,
5653 void *buf, unsigned int size)
5655 struct buffer *out = buf;
5656 struct rte_flow_action *action;
5657 struct action_raw_decap_data *action_decap_data;
5658 struct rte_flow_item_eth eth = { .type = 0, };
5659 struct rte_flow_item_vlan vlan = {.tci = 0};
5660 struct rte_flow_item_ipv4 ipv4 = {
5662 .next_proto_id = IPPROTO_UDP,
5665 struct rte_flow_item_ipv6 ipv6 = {
5667 .proto = IPPROTO_UDP,
5670 struct rte_flow_item_udp udp = {
5672 .dst_port = rte_cpu_to_be_16(6635),
5675 struct rte_flow_item_mpls mpls;
5679 ret = parse_vc(ctx, token, str, len, buf, size);
5682 /* Nothing else to do if there is no buffer. */
5685 if (!out->args.vc.actions_n)
5687 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5688 /* Point to selected object. */
5689 ctx->object = out->args.vc.data;
5690 ctx->objmask = NULL;
5691 /* Copy the headers to the buffer. */
5692 action_decap_data = ctx->object;
5693 *action_decap_data = (struct action_raw_decap_data) {
5694 .conf = (struct rte_flow_action_raw_decap){
5695 .data = action_decap_data->data,
5699 header = action_decap_data->data;
5700 if (mplsoudp_decap_conf.select_vlan)
5701 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
5702 else if (mplsoudp_encap_conf.select_ipv4)
5703 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5705 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5706 memcpy(eth.dst.addr_bytes,
5707 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
5708 memcpy(eth.src.addr_bytes,
5709 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
5710 memcpy(header, ð, sizeof(eth));
5711 header += sizeof(eth);
5712 if (mplsoudp_encap_conf.select_vlan) {
5713 if (mplsoudp_encap_conf.select_ipv4)
5714 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5716 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5717 memcpy(header, &vlan, sizeof(vlan));
5718 header += sizeof(vlan);
5720 if (mplsoudp_encap_conf.select_ipv4) {
5721 memcpy(header, &ipv4, sizeof(ipv4));
5722 header += sizeof(ipv4);
5724 memcpy(header, &ipv6, sizeof(ipv6));
5725 header += sizeof(ipv6);
5727 memcpy(header, &udp, sizeof(udp));
5728 header += sizeof(udp);
5729 memset(&mpls, 0, sizeof(mpls));
5730 memcpy(header, &mpls, sizeof(mpls));
5731 header += sizeof(mpls);
5732 action_decap_data->conf.size = header -
5733 action_decap_data->data;
5734 action->conf = &action_decap_data->conf;
5739 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
5740 const char *str, unsigned int len, void *buf,
5743 struct action_raw_decap_data *action_raw_decap_data;
5744 struct rte_flow_action *action;
5745 const struct arg *arg;
5746 struct buffer *out = buf;
5750 RTE_SET_USED(token);
5753 arg = ARGS_ENTRY_ARB_BOUNDED
5754 (offsetof(struct action_raw_decap_data, idx),
5755 sizeof(((struct action_raw_decap_data *)0)->idx),
5756 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5757 if (push_args(ctx, arg))
5759 ret = parse_int(ctx, token, str, len, NULL, 0);
5766 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5767 action_raw_decap_data = ctx->object;
5768 idx = action_raw_decap_data->idx;
5769 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
5770 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
5771 action->conf = &action_raw_decap_data->conf;
5777 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
5778 const char *str, unsigned int len, void *buf,
5781 struct action_raw_encap_data *action_raw_encap_data;
5782 struct rte_flow_action *action;
5783 const struct arg *arg;
5784 struct buffer *out = buf;
5788 RTE_SET_USED(token);
5791 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
5793 arg = ARGS_ENTRY_ARB_BOUNDED
5794 (offsetof(struct action_raw_encap_data, idx),
5795 sizeof(((struct action_raw_encap_data *)0)->idx),
5796 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5797 if (push_args(ctx, arg))
5799 ret = parse_int(ctx, token, str, len, NULL, 0);
5806 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5807 action_raw_encap_data = ctx->object;
5808 idx = action_raw_encap_data->idx;
5809 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
5810 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
5811 action_raw_encap_data->conf.preserve = NULL;
5812 action->conf = &action_raw_encap_data->conf;
5817 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
5818 const char *str, unsigned int len, void *buf,
5821 struct buffer *out = buf;
5822 struct rte_flow_action *action;
5823 struct action_raw_encap_data *action_raw_encap_data = NULL;
5826 ret = parse_vc(ctx, token, str, len, buf, size);
5829 /* Nothing else to do if there is no buffer. */
5832 if (!out->args.vc.actions_n)
5834 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5835 /* Point to selected object. */
5836 ctx->object = out->args.vc.data;
5837 ctx->objmask = NULL;
5838 /* Copy the headers to the buffer. */
5839 action_raw_encap_data = ctx->object;
5840 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
5841 action_raw_encap_data->conf.preserve = NULL;
5842 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
5843 action->conf = &action_raw_encap_data->conf;
5848 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
5849 const char *str, unsigned int len, void *buf,
5852 struct buffer *out = buf;
5853 struct rte_flow_action *action;
5854 struct action_raw_decap_data *action_raw_decap_data = NULL;
5857 ret = parse_vc(ctx, token, str, len, buf, size);
5860 /* Nothing else to do if there is no buffer. */
5863 if (!out->args.vc.actions_n)
5865 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5866 /* Point to selected object. */
5867 ctx->object = out->args.vc.data;
5868 ctx->objmask = NULL;
5869 /* Copy the headers to the buffer. */
5870 action_raw_decap_data = ctx->object;
5871 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
5872 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
5873 action->conf = &action_raw_decap_data->conf;
5878 parse_vc_action_set_meta(struct context *ctx, const struct token *token,
5879 const char *str, unsigned int len, void *buf,
5884 ret = parse_vc(ctx, token, str, len, buf, size);
5887 ret = rte_flow_dynf_metadata_register();
5894 parse_vc_action_sample(struct context *ctx, const struct token *token,
5895 const char *str, unsigned int len, void *buf,
5898 struct buffer *out = buf;
5899 struct rte_flow_action *action;
5900 struct action_sample_data *action_sample_data = NULL;
5901 static struct rte_flow_action end_action = {
5902 RTE_FLOW_ACTION_TYPE_END, 0
5906 ret = parse_vc(ctx, token, str, len, buf, size);
5909 /* Nothing else to do if there is no buffer. */
5912 if (!out->args.vc.actions_n)
5914 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5915 /* Point to selected object. */
5916 ctx->object = out->args.vc.data;
5917 ctx->objmask = NULL;
5918 /* Copy the headers to the buffer. */
5919 action_sample_data = ctx->object;
5920 action_sample_data->conf.actions = &end_action;
5921 action->conf = &action_sample_data->conf;
5926 parse_vc_action_sample_index(struct context *ctx, const struct token *token,
5927 const char *str, unsigned int len, void *buf,
5930 struct action_sample_data *action_sample_data;
5931 struct rte_flow_action *action;
5932 const struct arg *arg;
5933 struct buffer *out = buf;
5937 RTE_SET_USED(token);
5940 if (ctx->curr != ACTION_SAMPLE_INDEX_VALUE)
5942 arg = ARGS_ENTRY_ARB_BOUNDED
5943 (offsetof(struct action_sample_data, idx),
5944 sizeof(((struct action_sample_data *)0)->idx),
5945 0, RAW_SAMPLE_CONFS_MAX_NUM - 1);
5946 if (push_args(ctx, arg))
5948 ret = parse_int(ctx, token, str, len, NULL, 0);
5955 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5956 action_sample_data = ctx->object;
5957 idx = action_sample_data->idx;
5958 action_sample_data->conf.actions = raw_sample_confs[idx].data;
5959 action->conf = &action_sample_data->conf;
5963 /** Parse tokens for destroy command. */
5965 parse_destroy(struct context *ctx, const struct token *token,
5966 const char *str, unsigned int len,
5967 void *buf, unsigned int size)
5969 struct buffer *out = buf;
5971 /* Token name must match. */
5972 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5974 /* Nothing else to do if there is no buffer. */
5977 if (!out->command) {
5978 if (ctx->curr != DESTROY)
5980 if (sizeof(*out) > size)
5982 out->command = ctx->curr;
5985 ctx->objmask = NULL;
5986 out->args.destroy.rule =
5987 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5991 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
5992 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
5995 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
5996 ctx->objmask = NULL;
6000 /** Parse tokens for flush command. */
6002 parse_flush(struct context *ctx, const struct token *token,
6003 const char *str, unsigned int len,
6004 void *buf, unsigned int size)
6006 struct buffer *out = buf;
6008 /* Token name must match. */
6009 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6011 /* Nothing else to do if there is no buffer. */
6014 if (!out->command) {
6015 if (ctx->curr != FLUSH)
6017 if (sizeof(*out) > size)
6019 out->command = ctx->curr;
6022 ctx->objmask = NULL;
6027 /** Parse tokens for dump command. */
6029 parse_dump(struct context *ctx, const struct token *token,
6030 const char *str, unsigned int len,
6031 void *buf, unsigned int size)
6033 struct buffer *out = buf;
6035 /* Token name must match. */
6036 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6038 /* Nothing else to do if there is no buffer. */
6041 if (!out->command) {
6042 if (ctx->curr != DUMP)
6044 if (sizeof(*out) > size)
6046 out->command = ctx->curr;
6049 ctx->objmask = NULL;
6054 /** Parse tokens for query command. */
6056 parse_query(struct context *ctx, const struct token *token,
6057 const char *str, unsigned int len,
6058 void *buf, unsigned int size)
6060 struct buffer *out = buf;
6062 /* Token name must match. */
6063 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6065 /* Nothing else to do if there is no buffer. */
6068 if (!out->command) {
6069 if (ctx->curr != QUERY)
6071 if (sizeof(*out) > size)
6073 out->command = ctx->curr;
6076 ctx->objmask = NULL;
6081 /** Parse action names. */
6083 parse_action(struct context *ctx, const struct token *token,
6084 const char *str, unsigned int len,
6085 void *buf, unsigned int size)
6087 struct buffer *out = buf;
6088 const struct arg *arg = pop_args(ctx);
6092 /* Argument is expected. */
6095 /* Parse action name. */
6096 for (i = 0; next_action[i]; ++i) {
6097 const struct parse_action_priv *priv;
6099 token = &token_list[next_action[i]];
6100 if (strcmp_partial(token->name, str, len))
6106 memcpy((uint8_t *)ctx->object + arg->offset,
6112 push_args(ctx, arg);
6116 /** Parse tokens for list command. */
6118 parse_list(struct context *ctx, const struct token *token,
6119 const char *str, unsigned int len,
6120 void *buf, unsigned int size)
6122 struct buffer *out = buf;
6124 /* Token name must match. */
6125 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6127 /* Nothing else to do if there is no buffer. */
6130 if (!out->command) {
6131 if (ctx->curr != LIST)
6133 if (sizeof(*out) > size)
6135 out->command = ctx->curr;
6138 ctx->objmask = NULL;
6139 out->args.list.group =
6140 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6144 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
6145 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
6148 ctx->object = out->args.list.group + out->args.list.group_n++;
6149 ctx->objmask = NULL;
6153 /** Parse tokens for list all aged flows command. */
6155 parse_aged(struct context *ctx, const struct token *token,
6156 const char *str, unsigned int len,
6157 void *buf, unsigned int size)
6159 struct buffer *out = buf;
6161 /* Token name must match. */
6162 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6164 /* Nothing else to do if there is no buffer. */
6167 if (!out->command) {
6168 if (ctx->curr != AGED)
6170 if (sizeof(*out) > size)
6172 out->command = ctx->curr;
6175 ctx->objmask = NULL;
6177 if (ctx->curr == AGED_DESTROY)
6178 out->args.aged.destroy = 1;
6182 /** Parse tokens for isolate command. */
6184 parse_isolate(struct context *ctx, const struct token *token,
6185 const char *str, unsigned int len,
6186 void *buf, unsigned int size)
6188 struct buffer *out = buf;
6190 /* Token name must match. */
6191 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6193 /* Nothing else to do if there is no buffer. */
6196 if (!out->command) {
6197 if (ctx->curr != ISOLATE)
6199 if (sizeof(*out) > size)
6201 out->command = ctx->curr;
6204 ctx->objmask = NULL;
6210 parse_tunnel(struct context *ctx, const struct token *token,
6211 const char *str, unsigned int len,
6212 void *buf, unsigned int size)
6214 struct buffer *out = buf;
6216 /* Token name must match. */
6217 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6219 /* Nothing else to do if there is no buffer. */
6222 if (!out->command) {
6223 if (ctx->curr != TUNNEL)
6225 if (sizeof(*out) > size)
6227 out->command = ctx->curr;
6230 ctx->objmask = NULL;
6232 switch (ctx->curr) {
6236 case TUNNEL_DESTROY:
6238 out->command = ctx->curr;
6240 case TUNNEL_CREATE_TYPE:
6241 case TUNNEL_DESTROY_ID:
6242 ctx->object = &out->args.vc.tunnel_ops;
6251 * Parse signed/unsigned integers 8 to 64-bit long.
6253 * Last argument (ctx->args) is retrieved to determine integer type and
6257 parse_int(struct context *ctx, const struct token *token,
6258 const char *str, unsigned int len,
6259 void *buf, unsigned int size)
6261 const struct arg *arg = pop_args(ctx);
6266 /* Argument is expected. */
6271 (uintmax_t)strtoimax(str, &end, 0) :
6272 strtoumax(str, &end, 0);
6273 if (errno || (size_t)(end - str) != len)
6276 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
6277 (intmax_t)u > (intmax_t)arg->max)) ||
6278 (!arg->sign && (u < arg->min || u > arg->max))))
6283 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
6284 !arg_entry_bf_fill(ctx->objmask, -1, arg))
6288 buf = (uint8_t *)ctx->object + arg->offset;
6290 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
6294 case sizeof(uint8_t):
6295 *(uint8_t *)buf = u;
6297 case sizeof(uint16_t):
6298 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
6300 case sizeof(uint8_t [3]):
6301 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
6303 ((uint8_t *)buf)[0] = u;
6304 ((uint8_t *)buf)[1] = u >> 8;
6305 ((uint8_t *)buf)[2] = u >> 16;
6309 ((uint8_t *)buf)[0] = u >> 16;
6310 ((uint8_t *)buf)[1] = u >> 8;
6311 ((uint8_t *)buf)[2] = u;
6313 case sizeof(uint32_t):
6314 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
6316 case sizeof(uint64_t):
6317 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
6322 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
6324 buf = (uint8_t *)ctx->objmask + arg->offset;
6329 push_args(ctx, arg);
6336 * Three arguments (ctx->args) are retrieved from the stack to store data,
6337 * its actual length and address (in that order).
6340 parse_string(struct context *ctx, const struct token *token,
6341 const char *str, unsigned int len,
6342 void *buf, unsigned int size)
6344 const struct arg *arg_data = pop_args(ctx);
6345 const struct arg *arg_len = pop_args(ctx);
6346 const struct arg *arg_addr = pop_args(ctx);
6347 char tmp[16]; /* Ought to be enough. */
6350 /* Arguments are expected. */
6354 push_args(ctx, arg_data);
6358 push_args(ctx, arg_len);
6359 push_args(ctx, arg_data);
6362 size = arg_data->size;
6363 /* Bit-mask fill is not supported. */
6364 if (arg_data->mask || size < len)
6368 /* Let parse_int() fill length information first. */
6369 ret = snprintf(tmp, sizeof(tmp), "%u", len);
6372 push_args(ctx, arg_len);
6373 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
6378 buf = (uint8_t *)ctx->object + arg_data->offset;
6379 /* Output buffer is not necessarily NUL-terminated. */
6380 memcpy(buf, str, len);
6381 memset((uint8_t *)buf + len, 0x00, size - len);
6383 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
6384 /* Save address if requested. */
6385 if (arg_addr->size) {
6386 memcpy((uint8_t *)ctx->object + arg_addr->offset,
6388 (uint8_t *)ctx->object + arg_data->offset
6392 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
6394 (uint8_t *)ctx->objmask + arg_data->offset
6400 push_args(ctx, arg_addr);
6401 push_args(ctx, arg_len);
6402 push_args(ctx, arg_data);
6407 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
6413 /* Check input parameters */
6414 if ((src == NULL) ||
6420 /* Convert chars to bytes */
6421 for (i = 0, len = 0; i < *size; i += 2) {
6422 snprintf(tmp, 3, "%s", src + i);
6423 dst[len++] = strtoul(tmp, &c, 16);
6438 parse_hex(struct context *ctx, const struct token *token,
6439 const char *str, unsigned int len,
6440 void *buf, unsigned int size)
6442 const struct arg *arg_data = pop_args(ctx);
6443 const struct arg *arg_len = pop_args(ctx);
6444 const struct arg *arg_addr = pop_args(ctx);
6445 char tmp[16]; /* Ought to be enough. */
6447 unsigned int hexlen = len;
6448 unsigned int length = 256;
6449 uint8_t hex_tmp[length];
6451 /* Arguments are expected. */
6455 push_args(ctx, arg_data);
6459 push_args(ctx, arg_len);
6460 push_args(ctx, arg_data);
6463 size = arg_data->size;
6464 /* Bit-mask fill is not supported. */
6470 /* translate bytes string to array. */
6471 if (str[0] == '0' && ((str[1] == 'x') ||
6476 if (hexlen > length)
6478 ret = parse_hex_string(str, hex_tmp, &hexlen);
6481 /* Let parse_int() fill length information first. */
6482 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
6485 push_args(ctx, arg_len);
6486 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
6491 buf = (uint8_t *)ctx->object + arg_data->offset;
6492 /* Output buffer is not necessarily NUL-terminated. */
6493 memcpy(buf, hex_tmp, hexlen);
6494 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
6496 memset((uint8_t *)ctx->objmask + arg_data->offset,
6498 /* Save address if requested. */
6499 if (arg_addr->size) {
6500 memcpy((uint8_t *)ctx->object + arg_addr->offset,
6502 (uint8_t *)ctx->object + arg_data->offset
6506 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
6508 (uint8_t *)ctx->objmask + arg_data->offset
6514 push_args(ctx, arg_addr);
6515 push_args(ctx, arg_len);
6516 push_args(ctx, arg_data);
6522 * Parse a zero-ended string.
6525 parse_string0(struct context *ctx, const struct token *token __rte_unused,
6526 const char *str, unsigned int len,
6527 void *buf, unsigned int size)
6529 const struct arg *arg_data = pop_args(ctx);
6531 /* Arguments are expected. */
6534 size = arg_data->size;
6535 /* Bit-mask fill is not supported. */
6536 if (arg_data->mask || size < len + 1)
6540 buf = (uint8_t *)ctx->object + arg_data->offset;
6541 strncpy(buf, str, len);
6543 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
6546 push_args(ctx, arg_data);
6551 * Parse a MAC address.
6553 * Last argument (ctx->args) is retrieved to determine storage size and
6557 parse_mac_addr(struct context *ctx, const struct token *token,
6558 const char *str, unsigned int len,
6559 void *buf, unsigned int size)
6561 const struct arg *arg = pop_args(ctx);
6562 struct rte_ether_addr tmp;
6566 /* Argument is expected. */
6570 /* Bit-mask fill is not supported. */
6571 if (arg->mask || size != sizeof(tmp))
6573 /* Only network endian is supported. */
6576 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
6577 if (ret < 0 || (unsigned int)ret != len)
6581 buf = (uint8_t *)ctx->object + arg->offset;
6582 memcpy(buf, &tmp, size);
6584 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6587 push_args(ctx, arg);
6592 * Parse an IPv4 address.
6594 * Last argument (ctx->args) is retrieved to determine storage size and
6598 parse_ipv4_addr(struct context *ctx, const struct token *token,
6599 const char *str, unsigned int len,
6600 void *buf, unsigned int size)
6602 const struct arg *arg = pop_args(ctx);
6607 /* Argument is expected. */
6611 /* Bit-mask fill is not supported. */
6612 if (arg->mask || size != sizeof(tmp))
6614 /* Only network endian is supported. */
6617 memcpy(str2, str, len);
6619 ret = inet_pton(AF_INET, str2, &tmp);
6621 /* Attempt integer parsing. */
6622 push_args(ctx, arg);
6623 return parse_int(ctx, token, str, len, buf, size);
6627 buf = (uint8_t *)ctx->object + arg->offset;
6628 memcpy(buf, &tmp, size);
6630 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6633 push_args(ctx, arg);
6638 * Parse an IPv6 address.
6640 * Last argument (ctx->args) is retrieved to determine storage size and
6644 parse_ipv6_addr(struct context *ctx, const struct token *token,
6645 const char *str, unsigned int len,
6646 void *buf, unsigned int size)
6648 const struct arg *arg = pop_args(ctx);
6650 struct in6_addr tmp;
6654 /* Argument is expected. */
6658 /* Bit-mask fill is not supported. */
6659 if (arg->mask || size != sizeof(tmp))
6661 /* Only network endian is supported. */
6664 memcpy(str2, str, len);
6666 ret = inet_pton(AF_INET6, str2, &tmp);
6671 buf = (uint8_t *)ctx->object + arg->offset;
6672 memcpy(buf, &tmp, size);
6674 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
6677 push_args(ctx, arg);
6681 /** Boolean values (even indices stand for false). */
6682 static const char *const boolean_name[] = {
6692 * Parse a boolean value.
6694 * Last argument (ctx->args) is retrieved to determine storage size and
6698 parse_boolean(struct context *ctx, const struct token *token,
6699 const char *str, unsigned int len,
6700 void *buf, unsigned int size)
6702 const struct arg *arg = pop_args(ctx);
6706 /* Argument is expected. */
6709 for (i = 0; boolean_name[i]; ++i)
6710 if (!strcmp_partial(boolean_name[i], str, len))
6712 /* Process token as integer. */
6713 if (boolean_name[i])
6714 str = i & 1 ? "1" : "0";
6715 push_args(ctx, arg);
6716 ret = parse_int(ctx, token, str, strlen(str), buf, size);
6717 return ret > 0 ? (int)len : ret;
6720 /** Parse port and update context. */
6722 parse_port(struct context *ctx, const struct token *token,
6723 const char *str, unsigned int len,
6724 void *buf, unsigned int size)
6726 struct buffer *out = &(struct buffer){ .port = 0 };
6734 ctx->objmask = NULL;
6735 size = sizeof(*out);
6737 ret = parse_int(ctx, token, str, len, out, size);
6739 ctx->port = out->port;
6746 parse_sa_id2ptr(struct context *ctx, const struct token *token,
6747 const char *str, unsigned int len,
6748 void *buf, unsigned int size)
6750 struct rte_flow_action *action = ctx->object;
6758 ctx->objmask = NULL;
6759 ret = parse_int(ctx, token, str, len, ctx->object, sizeof(id));
6760 ctx->object = action;
6761 if (ret != (int)len)
6763 /* set shared action */
6765 action->conf = port_shared_action_get_by_id(ctx->port, id);
6766 ret = (action->conf) ? ret : -1;
6771 /** Parse set command, initialize output buffer for subsequent tokens. */
6773 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
6774 const char *str, unsigned int len,
6775 void *buf, unsigned int size)
6777 struct buffer *out = buf;
6779 /* Token name must match. */
6780 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6782 /* Nothing else to do if there is no buffer. */
6785 /* Make sure buffer is large enough. */
6786 if (size < sizeof(*out))
6789 ctx->objmask = NULL;
6793 out->command = ctx->curr;
6794 /* For encap/decap we need is pattern */
6795 out->args.vc.pattern = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6800 /** Parse set command, initialize output buffer for subsequent tokens. */
6802 parse_set_sample_action(struct context *ctx, const struct token *token,
6803 const char *str, unsigned int len,
6804 void *buf, unsigned int size)
6806 struct buffer *out = buf;
6808 /* Token name must match. */
6809 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6811 /* Nothing else to do if there is no buffer. */
6814 /* Make sure buffer is large enough. */
6815 if (size < sizeof(*out))
6818 ctx->objmask = NULL;
6822 out->command = ctx->curr;
6823 /* For sampler we need is actions */
6824 out->args.vc.actions = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6830 * Parse set raw_encap/raw_decap command,
6831 * initialize output buffer for subsequent tokens.
6834 parse_set_init(struct context *ctx, const struct token *token,
6835 const char *str, unsigned int len,
6836 void *buf, unsigned int size)
6838 struct buffer *out = buf;
6840 /* Token name must match. */
6841 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
6843 /* Nothing else to do if there is no buffer. */
6846 /* Make sure buffer is large enough. */
6847 if (size < sizeof(*out))
6849 /* Initialize buffer. */
6850 memset(out, 0x00, sizeof(*out));
6851 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
6854 ctx->objmask = NULL;
6855 if (!out->command) {
6856 if (ctx->curr != SET)
6858 if (sizeof(*out) > size)
6860 out->command = ctx->curr;
6861 out->args.vc.data = (uint8_t *)out + size;
6862 ctx->object = (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
6868 /** No completion. */
6870 comp_none(struct context *ctx, const struct token *token,
6871 unsigned int ent, char *buf, unsigned int size)
6881 /** Complete boolean values. */
6883 comp_boolean(struct context *ctx, const struct token *token,
6884 unsigned int ent, char *buf, unsigned int size)
6890 for (i = 0; boolean_name[i]; ++i)
6891 if (buf && i == ent)
6892 return strlcpy(buf, boolean_name[i], size);
6898 /** Complete action names. */
6900 comp_action(struct context *ctx, const struct token *token,
6901 unsigned int ent, char *buf, unsigned int size)
6907 for (i = 0; next_action[i]; ++i)
6908 if (buf && i == ent)
6909 return strlcpy(buf, token_list[next_action[i]].name,
6916 /** Complete available ports. */
6918 comp_port(struct context *ctx, const struct token *token,
6919 unsigned int ent, char *buf, unsigned int size)
6926 RTE_ETH_FOREACH_DEV(p) {
6927 if (buf && i == ent)
6928 return snprintf(buf, size, "%u", p);
6936 /** Complete available rule IDs. */
6938 comp_rule_id(struct context *ctx, const struct token *token,
6939 unsigned int ent, char *buf, unsigned int size)
6942 struct rte_port *port;
6943 struct port_flow *pf;
6946 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
6947 ctx->port == (portid_t)RTE_PORT_ALL)
6949 port = &ports[ctx->port];
6950 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
6951 if (buf && i == ent)
6952 return snprintf(buf, size, "%u", pf->id);
6960 /** Complete type field for RSS action. */
6962 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
6963 unsigned int ent, char *buf, unsigned int size)
6969 for (i = 0; rss_type_table[i].str; ++i)
6974 return strlcpy(buf, rss_type_table[ent].str, size);
6976 return snprintf(buf, size, "end");
6980 /** Complete queue field for RSS action. */
6982 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
6983 unsigned int ent, char *buf, unsigned int size)
6990 return snprintf(buf, size, "%u", ent);
6992 return snprintf(buf, size, "end");
6996 /** Complete index number for set raw_encap/raw_decap commands. */
6998 comp_set_raw_index(struct context *ctx, const struct token *token,
6999 unsigned int ent, char *buf, unsigned int size)
7005 RTE_SET_USED(token);
7006 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
7007 if (buf && idx == ent)
7008 return snprintf(buf, size, "%u", idx);
7014 /** Complete index number for set raw_encap/raw_decap commands. */
7016 comp_set_sample_index(struct context *ctx, const struct token *token,
7017 unsigned int ent, char *buf, unsigned int size)
7023 RTE_SET_USED(token);
7024 for (idx = 0; idx < RAW_SAMPLE_CONFS_MAX_NUM; ++idx) {
7025 if (buf && idx == ent)
7026 return snprintf(buf, size, "%u", idx);
7032 /** Internal context. */
7033 static struct context cmd_flow_context;
7035 /** Global parser instance (cmdline API). */
7036 cmdline_parse_inst_t cmd_flow;
7037 cmdline_parse_inst_t cmd_set_raw;
7039 /** Initialize context. */
7041 cmd_flow_context_init(struct context *ctx)
7043 /* A full memset() is not necessary. */
7053 ctx->objmask = NULL;
7056 /** Parse a token (cmdline API). */
7058 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
7061 struct context *ctx = &cmd_flow_context;
7062 const struct token *token;
7063 const enum index *list;
7068 token = &token_list[ctx->curr];
7069 /* Check argument length. */
7072 for (len = 0; src[len]; ++len)
7073 if (src[len] == '#' || isspace(src[len]))
7077 /* Last argument and EOL detection. */
7078 for (i = len; src[i]; ++i)
7079 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
7081 else if (!isspace(src[i])) {
7086 if (src[i] == '\r' || src[i] == '\n') {
7090 /* Initialize context if necessary. */
7091 if (!ctx->next_num) {
7094 ctx->next[ctx->next_num++] = token->next[0];
7096 /* Process argument through candidates. */
7097 ctx->prev = ctx->curr;
7098 list = ctx->next[ctx->next_num - 1];
7099 for (i = 0; list[i]; ++i) {
7100 const struct token *next = &token_list[list[i]];
7103 ctx->curr = list[i];
7105 tmp = next->call(ctx, next, src, len, result, size);
7107 tmp = parse_default(ctx, next, src, len, result, size);
7108 if (tmp == -1 || tmp != len)
7116 /* Push subsequent tokens if any. */
7118 for (i = 0; token->next[i]; ++i) {
7119 if (ctx->next_num == RTE_DIM(ctx->next))
7121 ctx->next[ctx->next_num++] = token->next[i];
7123 /* Push arguments if any. */
7125 for (i = 0; token->args[i]; ++i) {
7126 if (ctx->args_num == RTE_DIM(ctx->args))
7128 ctx->args[ctx->args_num++] = token->args[i];
7133 /** Return number of completion entries (cmdline API). */
7135 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
7137 struct context *ctx = &cmd_flow_context;
7138 const struct token *token = &token_list[ctx->curr];
7139 const enum index *list;
7143 /* Count number of tokens in current list. */
7145 list = ctx->next[ctx->next_num - 1];
7147 list = token->next[0];
7148 for (i = 0; list[i]; ++i)
7153 * If there is a single token, use its completion callback, otherwise
7154 * return the number of entries.
7156 token = &token_list[list[0]];
7157 if (i == 1 && token->comp) {
7158 /* Save index for cmd_flow_get_help(). */
7159 ctx->prev = list[0];
7160 return token->comp(ctx, token, 0, NULL, 0);
7165 /** Return a completion entry (cmdline API). */
7167 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
7168 char *dst, unsigned int size)
7170 struct context *ctx = &cmd_flow_context;
7171 const struct token *token = &token_list[ctx->curr];
7172 const enum index *list;
7176 /* Count number of tokens in current list. */
7178 list = ctx->next[ctx->next_num - 1];
7180 list = token->next[0];
7181 for (i = 0; list[i]; ++i)
7185 /* If there is a single token, use its completion callback. */
7186 token = &token_list[list[0]];
7187 if (i == 1 && token->comp) {
7188 /* Save index for cmd_flow_get_help(). */
7189 ctx->prev = list[0];
7190 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
7192 /* Otherwise make sure the index is valid and use defaults. */
7195 token = &token_list[list[index]];
7196 strlcpy(dst, token->name, size);
7197 /* Save index for cmd_flow_get_help(). */
7198 ctx->prev = list[index];
7202 /** Populate help strings for current token (cmdline API). */
7204 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
7206 struct context *ctx = &cmd_flow_context;
7207 const struct token *token = &token_list[ctx->prev];
7212 /* Set token type and update global help with details. */
7213 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
7215 cmd_flow.help_str = token->help;
7217 cmd_flow.help_str = token->name;
7221 /** Token definition template (cmdline API). */
7222 static struct cmdline_token_hdr cmd_flow_token_hdr = {
7223 .ops = &(struct cmdline_token_ops){
7224 .parse = cmd_flow_parse,
7225 .complete_get_nb = cmd_flow_complete_get_nb,
7226 .complete_get_elt = cmd_flow_complete_get_elt,
7227 .get_help = cmd_flow_get_help,
7232 /** Populate the next dynamic token. */
7234 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
7235 cmdline_parse_token_hdr_t **hdr_inst)
7237 struct context *ctx = &cmd_flow_context;
7239 /* Always reinitialize context before requesting the first token. */
7240 if (!(hdr_inst - cmd_flow.tokens))
7241 cmd_flow_context_init(ctx);
7242 /* Return NULL when no more tokens are expected. */
7243 if (!ctx->next_num && ctx->curr) {
7247 /* Determine if command should end here. */
7248 if (ctx->eol && ctx->last && ctx->next_num) {
7249 const enum index *list = ctx->next[ctx->next_num - 1];
7252 for (i = 0; list[i]; ++i) {
7259 *hdr = &cmd_flow_token_hdr;
7262 /** Dispatch parsed buffer to function calls. */
7264 cmd_flow_parsed(const struct buffer *in)
7266 switch (in->command) {
7267 case SHARED_ACTION_CREATE:
7268 port_shared_action_create(
7269 in->port, in->args.vc.attr.group,
7270 &((const struct rte_flow_shared_action_conf) {
7271 .ingress = in->args.vc.attr.ingress,
7272 .egress = in->args.vc.attr.egress,
7273 .transfer = in->args.vc.attr.transfer,
7275 in->args.vc.actions);
7277 case SHARED_ACTION_DESTROY:
7278 port_shared_action_destroy(in->port,
7279 in->args.sa_destroy.action_id_n,
7280 in->args.sa_destroy.action_id);
7282 case SHARED_ACTION_UPDATE:
7283 port_shared_action_update(in->port, in->args.vc.attr.group,
7284 in->args.vc.actions);
7286 case SHARED_ACTION_QUERY:
7287 port_shared_action_query(in->port, in->args.sa.action_id);
7290 port_flow_validate(in->port, &in->args.vc.attr,
7291 in->args.vc.pattern, in->args.vc.actions,
7292 &in->args.vc.tunnel_ops);
7295 port_flow_create(in->port, &in->args.vc.attr,
7296 in->args.vc.pattern, in->args.vc.actions,
7297 &in->args.vc.tunnel_ops);
7300 port_flow_destroy(in->port, in->args.destroy.rule_n,
7301 in->args.destroy.rule);
7304 port_flow_flush(in->port);
7307 port_flow_dump(in->port, in->args.dump.file);
7310 port_flow_query(in->port, in->args.query.rule,
7311 &in->args.query.action);
7314 port_flow_list(in->port, in->args.list.group_n,
7315 in->args.list.group);
7318 port_flow_isolate(in->port, in->args.isolate.set);
7321 port_flow_aged(in->port, in->args.aged.destroy);
7324 port_flow_tunnel_create(in->port, &in->args.vc.tunnel_ops);
7326 case TUNNEL_DESTROY:
7327 port_flow_tunnel_destroy(in->port, in->args.vc.tunnel_ops.id);
7330 port_flow_tunnel_list(in->port);
7337 /** Token generator and output processing callback (cmdline API). */
7339 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
7342 cmd_flow_tok(arg0, arg2);
7344 cmd_flow_parsed(arg0);
7347 /** Global parser instance (cmdline API). */
7348 cmdline_parse_inst_t cmd_flow = {
7350 .data = NULL, /**< Unused. */
7351 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
7354 }, /**< Tokens are returned by cmd_flow_tok(). */
7357 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
7360 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
7362 struct rte_ipv4_hdr *ipv4;
7363 struct rte_ether_hdr *eth;
7364 struct rte_ipv6_hdr *ipv6;
7365 struct rte_vxlan_hdr *vxlan;
7366 struct rte_vxlan_gpe_hdr *gpe;
7367 struct rte_flow_item_nvgre *nvgre;
7368 uint32_t ipv6_vtc_flow;
7370 switch (item->type) {
7371 case RTE_FLOW_ITEM_TYPE_ETH:
7372 eth = (struct rte_ether_hdr *)buf;
7374 eth->ether_type = rte_cpu_to_be_16(next_proto);
7376 case RTE_FLOW_ITEM_TYPE_IPV4:
7377 ipv4 = (struct rte_ipv4_hdr *)buf;
7378 ipv4->version_ihl = 0x45;
7379 if (next_proto && ipv4->next_proto_id == 0)
7380 ipv4->next_proto_id = (uint8_t)next_proto;
7382 case RTE_FLOW_ITEM_TYPE_IPV6:
7383 ipv6 = (struct rte_ipv6_hdr *)buf;
7384 if (next_proto && ipv6->proto == 0)
7385 ipv6->proto = (uint8_t)next_proto;
7386 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->vtc_flow);
7387 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
7388 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
7389 ipv6->vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
7391 case RTE_FLOW_ITEM_TYPE_VXLAN:
7392 vxlan = (struct rte_vxlan_hdr *)buf;
7393 vxlan->vx_flags = 0x08;
7395 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7396 gpe = (struct rte_vxlan_gpe_hdr *)buf;
7397 gpe->vx_flags = 0x0C;
7399 case RTE_FLOW_ITEM_TYPE_NVGRE:
7400 nvgre = (struct rte_flow_item_nvgre *)buf;
7401 nvgre->protocol = rte_cpu_to_be_16(0x6558);
7402 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
7409 /** Helper of get item's default mask. */
7411 flow_item_default_mask(const struct rte_flow_item *item)
7413 const void *mask = NULL;
7414 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
7416 switch (item->type) {
7417 case RTE_FLOW_ITEM_TYPE_ANY:
7418 mask = &rte_flow_item_any_mask;
7420 case RTE_FLOW_ITEM_TYPE_VF:
7421 mask = &rte_flow_item_vf_mask;
7423 case RTE_FLOW_ITEM_TYPE_PORT_ID:
7424 mask = &rte_flow_item_port_id_mask;
7426 case RTE_FLOW_ITEM_TYPE_RAW:
7427 mask = &rte_flow_item_raw_mask;
7429 case RTE_FLOW_ITEM_TYPE_ETH:
7430 mask = &rte_flow_item_eth_mask;
7432 case RTE_FLOW_ITEM_TYPE_VLAN:
7433 mask = &rte_flow_item_vlan_mask;
7435 case RTE_FLOW_ITEM_TYPE_IPV4:
7436 mask = &rte_flow_item_ipv4_mask;
7438 case RTE_FLOW_ITEM_TYPE_IPV6:
7439 mask = &rte_flow_item_ipv6_mask;
7441 case RTE_FLOW_ITEM_TYPE_ICMP:
7442 mask = &rte_flow_item_icmp_mask;
7444 case RTE_FLOW_ITEM_TYPE_UDP:
7445 mask = &rte_flow_item_udp_mask;
7447 case RTE_FLOW_ITEM_TYPE_TCP:
7448 mask = &rte_flow_item_tcp_mask;
7450 case RTE_FLOW_ITEM_TYPE_SCTP:
7451 mask = &rte_flow_item_sctp_mask;
7453 case RTE_FLOW_ITEM_TYPE_VXLAN:
7454 mask = &rte_flow_item_vxlan_mask;
7456 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7457 mask = &rte_flow_item_vxlan_gpe_mask;
7459 case RTE_FLOW_ITEM_TYPE_E_TAG:
7460 mask = &rte_flow_item_e_tag_mask;
7462 case RTE_FLOW_ITEM_TYPE_NVGRE:
7463 mask = &rte_flow_item_nvgre_mask;
7465 case RTE_FLOW_ITEM_TYPE_MPLS:
7466 mask = &rte_flow_item_mpls_mask;
7468 case RTE_FLOW_ITEM_TYPE_GRE:
7469 mask = &rte_flow_item_gre_mask;
7471 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7472 mask = &gre_key_default_mask;
7474 case RTE_FLOW_ITEM_TYPE_META:
7475 mask = &rte_flow_item_meta_mask;
7477 case RTE_FLOW_ITEM_TYPE_FUZZY:
7478 mask = &rte_flow_item_fuzzy_mask;
7480 case RTE_FLOW_ITEM_TYPE_GTP:
7481 mask = &rte_flow_item_gtp_mask;
7483 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7484 mask = &rte_flow_item_gtp_psc_mask;
7486 case RTE_FLOW_ITEM_TYPE_GENEVE:
7487 mask = &rte_flow_item_geneve_mask;
7489 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
7490 mask = &rte_flow_item_pppoe_proto_id_mask;
7492 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
7493 mask = &rte_flow_item_l2tpv3oip_mask;
7495 case RTE_FLOW_ITEM_TYPE_ESP:
7496 mask = &rte_flow_item_esp_mask;
7498 case RTE_FLOW_ITEM_TYPE_AH:
7499 mask = &rte_flow_item_ah_mask;
7501 case RTE_FLOW_ITEM_TYPE_PFCP:
7502 mask = &rte_flow_item_pfcp_mask;
7510 /** Dispatch parsed buffer to function calls. */
7512 cmd_set_raw_parsed_sample(const struct buffer *in)
7514 uint32_t n = in->args.vc.actions_n;
7516 struct rte_flow_action *action = NULL;
7517 struct rte_flow_action *data = NULL;
7519 uint16_t idx = in->port; /* We borrow port field as index */
7520 uint32_t max_size = sizeof(struct rte_flow_action) *
7521 ACTION_SAMPLE_ACTIONS_NUM;
7523 RTE_ASSERT(in->command == SET_SAMPLE_ACTIONS);
7524 data = (struct rte_flow_action *)&raw_sample_confs[idx].data;
7525 memset(data, 0x00, max_size);
7526 for (; i <= n - 1; i++) {
7527 action = in->args.vc.actions + i;
7528 if (action->type == RTE_FLOW_ACTION_TYPE_END)
7530 switch (action->type) {
7531 case RTE_FLOW_ACTION_TYPE_MARK:
7532 size = sizeof(struct rte_flow_action_mark);
7533 rte_memcpy(&sample_mark[idx],
7534 (const void *)action->conf, size);
7535 action->conf = &sample_mark[idx];
7537 case RTE_FLOW_ACTION_TYPE_COUNT:
7538 size = sizeof(struct rte_flow_action_count);
7539 rte_memcpy(&sample_count[idx],
7540 (const void *)action->conf, size);
7541 action->conf = &sample_count[idx];
7543 case RTE_FLOW_ACTION_TYPE_QUEUE:
7544 size = sizeof(struct rte_flow_action_queue);
7545 rte_memcpy(&sample_queue[idx],
7546 (const void *)action->conf, size);
7547 action->conf = &sample_queue[idx];
7549 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
7550 size = sizeof(struct rte_flow_action_raw_encap);
7551 rte_memcpy(&sample_encap[idx],
7552 (const void *)action->conf, size);
7553 action->conf = &sample_encap[idx];
7555 case RTE_FLOW_ACTION_TYPE_PORT_ID:
7556 size = sizeof(struct rte_flow_action_port_id);
7557 rte_memcpy(&sample_port_id[idx],
7558 (const void *)action->conf, size);
7559 action->conf = &sample_port_id[idx];
7562 printf("Error - Not supported action\n");
7565 rte_memcpy(data, action, sizeof(struct rte_flow_action));
7570 /** Dispatch parsed buffer to function calls. */
7572 cmd_set_raw_parsed(const struct buffer *in)
7574 uint32_t n = in->args.vc.pattern_n;
7576 struct rte_flow_item *item = NULL;
7578 uint8_t *data = NULL;
7579 uint8_t *data_tail = NULL;
7580 size_t *total_size = NULL;
7581 uint16_t upper_layer = 0;
7583 uint16_t idx = in->port; /* We borrow port field as index */
7584 int gtp_psc = -1; /* GTP PSC option index. */
7586 if (in->command == SET_SAMPLE_ACTIONS)
7587 return cmd_set_raw_parsed_sample(in);
7588 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
7589 in->command == SET_RAW_DECAP);
7590 if (in->command == SET_RAW_ENCAP) {
7591 total_size = &raw_encap_confs[idx].size;
7592 data = (uint8_t *)&raw_encap_confs[idx].data;
7594 total_size = &raw_decap_confs[idx].size;
7595 data = (uint8_t *)&raw_decap_confs[idx].data;
7598 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
7599 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
7600 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
7601 for (i = n - 1 ; i >= 0; --i) {
7602 const struct rte_flow_item_gtp *gtp;
7604 item = in->args.vc.pattern + i;
7605 if (item->spec == NULL)
7606 item->spec = flow_item_default_mask(item);
7607 switch (item->type) {
7608 case RTE_FLOW_ITEM_TYPE_ETH:
7609 size = sizeof(struct rte_ether_hdr);
7611 case RTE_FLOW_ITEM_TYPE_VLAN:
7612 size = sizeof(struct rte_vlan_hdr);
7613 proto = RTE_ETHER_TYPE_VLAN;
7615 case RTE_FLOW_ITEM_TYPE_IPV4:
7616 size = sizeof(struct rte_ipv4_hdr);
7617 proto = RTE_ETHER_TYPE_IPV4;
7619 case RTE_FLOW_ITEM_TYPE_IPV6:
7620 size = sizeof(struct rte_ipv6_hdr);
7621 proto = RTE_ETHER_TYPE_IPV6;
7623 case RTE_FLOW_ITEM_TYPE_UDP:
7624 size = sizeof(struct rte_udp_hdr);
7627 case RTE_FLOW_ITEM_TYPE_TCP:
7628 size = sizeof(struct rte_tcp_hdr);
7631 case RTE_FLOW_ITEM_TYPE_VXLAN:
7632 size = sizeof(struct rte_vxlan_hdr);
7634 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
7635 size = sizeof(struct rte_vxlan_gpe_hdr);
7637 case RTE_FLOW_ITEM_TYPE_GRE:
7638 size = sizeof(struct rte_gre_hdr);
7641 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
7642 size = sizeof(rte_be32_t);
7645 case RTE_FLOW_ITEM_TYPE_MPLS:
7646 size = sizeof(struct rte_mpls_hdr);
7649 case RTE_FLOW_ITEM_TYPE_NVGRE:
7650 size = sizeof(struct rte_flow_item_nvgre);
7653 case RTE_FLOW_ITEM_TYPE_GENEVE:
7654 size = sizeof(struct rte_geneve_hdr);
7656 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
7657 size = sizeof(rte_be32_t);
7660 case RTE_FLOW_ITEM_TYPE_ESP:
7661 size = sizeof(struct rte_esp_hdr);
7664 case RTE_FLOW_ITEM_TYPE_AH:
7665 size = sizeof(struct rte_flow_item_ah);
7668 case RTE_FLOW_ITEM_TYPE_GTP:
7670 size = sizeof(struct rte_gtp_hdr);
7673 if (gtp_psc != i + 1) {
7674 printf("Error - GTP PSC does not follow GTP\n");
7678 if ((gtp->v_pt_rsv_flags & 0x07) != 0x04) {
7679 /* Only E flag should be set. */
7680 printf("Error - GTP unsupported flags\n");
7683 struct rte_gtp_hdr_ext_word ext_word = {
7687 /* We have to add GTP header extra word. */
7688 *total_size += sizeof(ext_word);
7689 rte_memcpy(data_tail - (*total_size),
7690 &ext_word, sizeof(ext_word));
7692 size = sizeof(struct rte_gtp_hdr);
7694 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
7696 printf("Error - Multiple GTP PSC items\n");
7699 const struct rte_flow_item_gtp_psc
7708 if (opt->pdu_type & 0x0F) {
7709 /* Support the minimal option only. */
7710 printf("Error - GTP PSC option with "
7711 "extra fields not supported\n");
7714 psc.len = sizeof(psc);
7715 psc.pdu_type = opt->pdu_type;
7718 *total_size += sizeof(psc);
7719 rte_memcpy(data_tail - (*total_size),
7725 case RTE_FLOW_ITEM_TYPE_PFCP:
7726 size = sizeof(struct rte_flow_item_pfcp);
7729 printf("Error - Not supported item\n");
7732 *total_size += size;
7733 rte_memcpy(data_tail - (*total_size), item->spec, size);
7734 /* update some fields which cannot be set by cmdline */
7735 update_fields((data_tail - (*total_size)), item,
7737 upper_layer = proto;
7739 if (verbose_level & 0x1)
7740 printf("total data size is %zu\n", (*total_size));
7741 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
7742 memmove(data, (data_tail - (*total_size)), *total_size);
7747 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
7750 /** Populate help strings for current token (cmdline API). */
7752 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
7755 struct context *ctx = &cmd_flow_context;
7756 const struct token *token = &token_list[ctx->prev];
7761 /* Set token type and update global help with details. */
7762 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
7764 cmd_set_raw.help_str = token->help;
7766 cmd_set_raw.help_str = token->name;
7770 /** Token definition template (cmdline API). */
7771 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
7772 .ops = &(struct cmdline_token_ops){
7773 .parse = cmd_flow_parse,
7774 .complete_get_nb = cmd_flow_complete_get_nb,
7775 .complete_get_elt = cmd_flow_complete_get_elt,
7776 .get_help = cmd_set_raw_get_help,
7781 /** Populate the next dynamic token. */
7783 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
7784 cmdline_parse_token_hdr_t **hdr_inst)
7786 struct context *ctx = &cmd_flow_context;
7788 /* Always reinitialize context before requesting the first token. */
7789 if (!(hdr_inst - cmd_set_raw.tokens)) {
7790 cmd_flow_context_init(ctx);
7791 ctx->curr = START_SET;
7793 /* Return NULL when no more tokens are expected. */
7794 if (!ctx->next_num && (ctx->curr != START_SET)) {
7798 /* Determine if command should end here. */
7799 if (ctx->eol && ctx->last && ctx->next_num) {
7800 const enum index *list = ctx->next[ctx->next_num - 1];
7803 for (i = 0; list[i]; ++i) {
7810 *hdr = &cmd_set_raw_token_hdr;
7813 /** Token generator and output processing callback (cmdline API). */
7815 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
7818 cmd_set_raw_tok(arg0, arg2);
7820 cmd_set_raw_parsed(arg0);
7823 /** Global parser instance (cmdline API). */
7824 cmdline_parse_inst_t cmd_set_raw = {
7825 .f = cmd_set_raw_cb,
7826 .data = NULL, /**< Unused. */
7827 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
7830 }, /**< Tokens are returned by cmd_flow_tok(). */
7833 /* *** display raw_encap/raw_decap buf */
7834 struct cmd_show_set_raw_result {
7835 cmdline_fixed_string_t cmd_show;
7836 cmdline_fixed_string_t cmd_what;
7837 cmdline_fixed_string_t cmd_all;
7842 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
7844 struct cmd_show_set_raw_result *res = parsed_result;
7845 uint16_t index = res->cmd_index;
7847 uint8_t *raw_data = NULL;
7848 size_t raw_size = 0;
7849 char title[16] = {0};
7853 if (!strcmp(res->cmd_all, "all")) {
7856 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
7857 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
7861 if (!strcmp(res->cmd_what, "raw_encap")) {
7862 raw_data = (uint8_t *)&raw_encap_confs[index].data;
7863 raw_size = raw_encap_confs[index].size;
7864 snprintf(title, 16, "\nindex: %u", index);
7865 rte_hexdump(stdout, title, raw_data, raw_size);
7867 raw_data = (uint8_t *)&raw_decap_confs[index].data;
7868 raw_size = raw_decap_confs[index].size;
7869 snprintf(title, 16, "\nindex: %u", index);
7870 rte_hexdump(stdout, title, raw_data, raw_size);
7872 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
7875 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
7876 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7878 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
7879 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7880 cmd_what, "raw_encap#raw_decap");
7881 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
7882 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
7883 cmd_index, RTE_UINT16);
7884 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
7885 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
7887 cmdline_parse_inst_t cmd_show_set_raw = {
7888 .f = cmd_show_set_raw_parsed,
7890 .help_str = "show <raw_encap|raw_decap> <index>",
7892 (void *)&cmd_show_set_raw_cmd_show,
7893 (void *)&cmd_show_set_raw_cmd_what,
7894 (void *)&cmd_show_set_raw_cmd_index,
7898 cmdline_parse_inst_t cmd_show_set_raw_all = {
7899 .f = cmd_show_set_raw_parsed,
7901 .help_str = "show <raw_encap|raw_decap> all",
7903 (void *)&cmd_show_set_raw_cmd_show,
7904 (void *)&cmd_show_set_raw_cmd_what,
7905 (void *)&cmd_show_set_raw_cmd_all,