1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
29 /** Parser token indices. */
52 /* Top-level command. */
54 /* Sub-leve commands. */
59 /* Top-level command. */
61 /* Sub-level commands. */
70 /* Destroy arguments. */
73 /* Query arguments. */
79 /* Validate/create arguments. */
86 /* Validate/create pattern. */
123 ITEM_VLAN_INNER_TYPE,
155 ITEM_E_TAG_GRP_ECID_B,
164 ITEM_GRE_C_RSVD0_VER,
180 ITEM_ARP_ETH_IPV4_SHA,
181 ITEM_ARP_ETH_IPV4_SPA,
182 ITEM_ARP_ETH_IPV4_THA,
183 ITEM_ARP_ETH_IPV4_TPA,
185 ITEM_IPV6_EXT_NEXT_HDR,
190 ITEM_ICMP6_ND_NS_TARGET_ADDR,
192 ITEM_ICMP6_ND_NA_TARGET_ADDR,
194 ITEM_ICMP6_ND_OPT_TYPE,
195 ITEM_ICMP6_ND_OPT_SLA_ETH,
196 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
197 ITEM_ICMP6_ND_OPT_TLA_ETH,
198 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
211 ITEM_HIGIG2_CLASSIFICATION,
217 /* Validate/create actions. */
237 ACTION_RSS_FUNC_DEFAULT,
238 ACTION_RSS_FUNC_TOEPLITZ,
239 ACTION_RSS_FUNC_SIMPLE_XOR,
240 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
252 ACTION_PHY_PORT_ORIGINAL,
253 ACTION_PHY_PORT_INDEX,
255 ACTION_PORT_ID_ORIGINAL,
259 ACTION_OF_SET_MPLS_TTL,
260 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
261 ACTION_OF_DEC_MPLS_TTL,
262 ACTION_OF_SET_NW_TTL,
263 ACTION_OF_SET_NW_TTL_NW_TTL,
264 ACTION_OF_DEC_NW_TTL,
265 ACTION_OF_COPY_TTL_OUT,
266 ACTION_OF_COPY_TTL_IN,
269 ACTION_OF_PUSH_VLAN_ETHERTYPE,
270 ACTION_OF_SET_VLAN_VID,
271 ACTION_OF_SET_VLAN_VID_VLAN_VID,
272 ACTION_OF_SET_VLAN_PCP,
273 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
275 ACTION_OF_POP_MPLS_ETHERTYPE,
277 ACTION_OF_PUSH_MPLS_ETHERTYPE,
284 ACTION_MPLSOGRE_ENCAP,
285 ACTION_MPLSOGRE_DECAP,
286 ACTION_MPLSOUDP_ENCAP,
287 ACTION_MPLSOUDP_DECAP,
289 ACTION_SET_IPV4_SRC_IPV4_SRC,
291 ACTION_SET_IPV4_DST_IPV4_DST,
293 ACTION_SET_IPV6_SRC_IPV6_SRC,
295 ACTION_SET_IPV6_DST_IPV6_DST,
297 ACTION_SET_TP_SRC_TP_SRC,
299 ACTION_SET_TP_DST_TP_DST,
305 ACTION_SET_MAC_SRC_MAC_SRC,
307 ACTION_SET_MAC_DST_MAC_DST,
309 ACTION_INC_TCP_SEQ_VALUE,
311 ACTION_DEC_TCP_SEQ_VALUE,
313 ACTION_INC_TCP_ACK_VALUE,
315 ACTION_DEC_TCP_ACK_VALUE,
318 ACTION_RAW_ENCAP_INDEX,
319 ACTION_RAW_ENCAP_INDEX_VALUE,
320 ACTION_RAW_DECAP_INDEX,
321 ACTION_RAW_DECAP_INDEX_VALUE,
324 ACTION_SET_TAG_INDEX,
327 ACTION_SET_META_DATA,
328 ACTION_SET_META_MASK,
331 /** Maximum size for pattern in struct rte_flow_item_raw. */
332 #define ITEM_RAW_PATTERN_SIZE 40
334 /** Storage size for struct rte_flow_item_raw including pattern. */
335 #define ITEM_RAW_SIZE \
336 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
338 /** Maximum number of queue indices in struct rte_flow_action_rss. */
339 #define ACTION_RSS_QUEUE_NUM 128
341 /** Storage for struct rte_flow_action_rss including external data. */
342 struct action_rss_data {
343 struct rte_flow_action_rss conf;
344 uint8_t key[RSS_HASH_KEY_LENGTH];
345 uint16_t queue[ACTION_RSS_QUEUE_NUM];
348 /** Maximum data size in struct rte_flow_action_raw_encap. */
349 #define ACTION_RAW_ENCAP_MAX_DATA 128
350 #define RAW_ENCAP_CONFS_MAX_NUM 8
352 /** Storage for struct rte_flow_action_raw_encap. */
353 struct raw_encap_conf {
354 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
355 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
359 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
361 /** Storage for struct rte_flow_action_raw_encap including external data. */
362 struct action_raw_encap_data {
363 struct rte_flow_action_raw_encap conf;
364 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
365 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
369 /** Storage for struct rte_flow_action_raw_decap. */
370 struct raw_decap_conf {
371 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
375 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
377 /** Storage for struct rte_flow_action_raw_decap including external data. */
378 struct action_raw_decap_data {
379 struct rte_flow_action_raw_decap conf;
380 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
384 struct vxlan_encap_conf vxlan_encap_conf = {
388 .vni = "\x00\x00\x00",
390 .udp_dst = RTE_BE16(4789),
391 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
392 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
393 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
394 "\x00\x00\x00\x00\x00\x00\x00\x01",
395 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
396 "\x00\x00\x00\x00\x00\x00\x11\x11",
400 .eth_src = "\x00\x00\x00\x00\x00\x00",
401 .eth_dst = "\xff\xff\xff\xff\xff\xff",
404 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
405 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
407 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
408 struct action_vxlan_encap_data {
409 struct rte_flow_action_vxlan_encap conf;
410 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
411 struct rte_flow_item_eth item_eth;
412 struct rte_flow_item_vlan item_vlan;
414 struct rte_flow_item_ipv4 item_ipv4;
415 struct rte_flow_item_ipv6 item_ipv6;
417 struct rte_flow_item_udp item_udp;
418 struct rte_flow_item_vxlan item_vxlan;
421 struct nvgre_encap_conf nvgre_encap_conf = {
424 .tni = "\x00\x00\x00",
425 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
426 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
427 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
428 "\x00\x00\x00\x00\x00\x00\x00\x01",
429 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
430 "\x00\x00\x00\x00\x00\x00\x11\x11",
432 .eth_src = "\x00\x00\x00\x00\x00\x00",
433 .eth_dst = "\xff\xff\xff\xff\xff\xff",
436 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
437 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
439 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
440 struct action_nvgre_encap_data {
441 struct rte_flow_action_nvgre_encap conf;
442 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
443 struct rte_flow_item_eth item_eth;
444 struct rte_flow_item_vlan item_vlan;
446 struct rte_flow_item_ipv4 item_ipv4;
447 struct rte_flow_item_ipv6 item_ipv6;
449 struct rte_flow_item_nvgre item_nvgre;
452 struct l2_encap_conf l2_encap_conf;
454 struct l2_decap_conf l2_decap_conf;
456 struct mplsogre_encap_conf mplsogre_encap_conf;
458 struct mplsogre_decap_conf mplsogre_decap_conf;
460 struct mplsoudp_encap_conf mplsoudp_encap_conf;
462 struct mplsoudp_decap_conf mplsoudp_decap_conf;
464 /** Maximum number of subsequent tokens and arguments on the stack. */
465 #define CTX_STACK_SIZE 16
467 /** Parser context. */
469 /** Stack of subsequent token lists to process. */
470 const enum index *next[CTX_STACK_SIZE];
471 /** Arguments for stacked tokens. */
472 const void *args[CTX_STACK_SIZE];
473 enum index curr; /**< Current token index. */
474 enum index prev; /**< Index of the last token seen. */
475 int next_num; /**< Number of entries in next[]. */
476 int args_num; /**< Number of entries in args[]. */
477 uint32_t eol:1; /**< EOL has been detected. */
478 uint32_t last:1; /**< No more arguments. */
479 portid_t port; /**< Current port ID (for completions). */
480 uint32_t objdata; /**< Object-specific data. */
481 void *object; /**< Address of current object for relative offsets. */
482 void *objmask; /**< Object a full mask must be written to. */
485 /** Token argument. */
487 uint32_t hton:1; /**< Use network byte ordering. */
488 uint32_t sign:1; /**< Value is signed. */
489 uint32_t bounded:1; /**< Value is bounded. */
490 uintmax_t min; /**< Minimum value if bounded. */
491 uintmax_t max; /**< Maximum value if bounded. */
492 uint32_t offset; /**< Relative offset from ctx->object. */
493 uint32_t size; /**< Field size. */
494 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
497 /** Parser token definition. */
499 /** Type displayed during completion (defaults to "TOKEN"). */
501 /** Help displayed during completion (defaults to token name). */
503 /** Private data used by parser functions. */
506 * Lists of subsequent tokens to push on the stack. Each call to the
507 * parser consumes the last entry of that stack.
509 const enum index *const *next;
510 /** Arguments stack for subsequent tokens that need them. */
511 const struct arg *const *args;
513 * Token-processing callback, returns -1 in case of error, the
514 * length of the matched string otherwise. If NULL, attempts to
515 * match the token name.
517 * If buf is not NULL, the result should be stored in it according
518 * to context. An error is returned if not large enough.
520 int (*call)(struct context *ctx, const struct token *token,
521 const char *str, unsigned int len,
522 void *buf, unsigned int size);
524 * Callback that provides possible values for this token, used for
525 * completion. Returns -1 in case of error, the number of possible
526 * values otherwise. If NULL, the token name is used.
528 * If buf is not NULL, entry index ent is written to buf and the
529 * full length of the entry is returned (same behavior as
532 int (*comp)(struct context *ctx, const struct token *token,
533 unsigned int ent, char *buf, unsigned int size);
534 /** Mandatory token name, no default value. */
538 /** Static initializer for the next field. */
539 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
541 /** Static initializer for a NEXT() entry. */
542 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
544 /** Static initializer for the args field. */
545 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
547 /** Static initializer for ARGS() to target a field. */
548 #define ARGS_ENTRY(s, f) \
549 (&(const struct arg){ \
550 .offset = offsetof(s, f), \
551 .size = sizeof(((s *)0)->f), \
554 /** Static initializer for ARGS() to target a bit-field. */
555 #define ARGS_ENTRY_BF(s, f, b) \
556 (&(const struct arg){ \
558 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
561 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
562 #define ARGS_ENTRY_MASK(s, f, m) \
563 (&(const struct arg){ \
564 .offset = offsetof(s, f), \
565 .size = sizeof(((s *)0)->f), \
566 .mask = (const void *)(m), \
569 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
570 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
571 (&(const struct arg){ \
573 .offset = offsetof(s, f), \
574 .size = sizeof(((s *)0)->f), \
575 .mask = (const void *)(m), \
578 /** Static initializer for ARGS() to target a pointer. */
579 #define ARGS_ENTRY_PTR(s, f) \
580 (&(const struct arg){ \
581 .size = sizeof(*((s *)0)->f), \
584 /** Static initializer for ARGS() with arbitrary offset and size. */
585 #define ARGS_ENTRY_ARB(o, s) \
586 (&(const struct arg){ \
591 /** Same as ARGS_ENTRY_ARB() with bounded values. */
592 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
593 (&(const struct arg){ \
601 /** Same as ARGS_ENTRY() using network byte ordering. */
602 #define ARGS_ENTRY_HTON(s, f) \
603 (&(const struct arg){ \
605 .offset = offsetof(s, f), \
606 .size = sizeof(((s *)0)->f), \
609 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
610 #define ARG_ENTRY_HTON(s) \
611 (&(const struct arg){ \
617 /** Parser output buffer layout expected by cmd_flow_parsed(). */
619 enum index command; /**< Flow command. */
620 portid_t port; /**< Affected port ID. */
623 struct rte_flow_attr attr;
624 struct rte_flow_item *pattern;
625 struct rte_flow_action *actions;
629 } vc; /**< Validate/create arguments. */
633 } destroy; /**< Destroy arguments. */
636 struct rte_flow_action action;
637 } query; /**< Query arguments. */
641 } list; /**< List arguments. */
644 } isolate; /**< Isolated mode arguments. */
645 } args; /**< Command arguments. */
648 /** Private data for pattern items. */
649 struct parse_item_priv {
650 enum rte_flow_item_type type; /**< Item type. */
651 uint32_t size; /**< Size of item specification structure. */
654 #define PRIV_ITEM(t, s) \
655 (&(const struct parse_item_priv){ \
656 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
660 /** Private data for actions. */
661 struct parse_action_priv {
662 enum rte_flow_action_type type; /**< Action type. */
663 uint32_t size; /**< Size of action configuration structure. */
666 #define PRIV_ACTION(t, s) \
667 (&(const struct parse_action_priv){ \
668 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
672 static const enum index next_vc_attr[] = {
682 static const enum index next_destroy_attr[] = {
688 static const enum index next_list_attr[] = {
694 static const enum index item_param[] = {
703 static const enum index next_item[] = {
739 ITEM_ICMP6_ND_OPT_SLA_ETH,
740 ITEM_ICMP6_ND_OPT_TLA_ETH,
753 static const enum index item_fuzzy[] = {
759 static const enum index item_any[] = {
765 static const enum index item_vf[] = {
771 static const enum index item_phy_port[] = {
777 static const enum index item_port_id[] = {
783 static const enum index item_mark[] = {
789 static const enum index item_raw[] = {
799 static const enum index item_eth[] = {
807 static const enum index item_vlan[] = {
812 ITEM_VLAN_INNER_TYPE,
817 static const enum index item_ipv4[] = {
827 static const enum index item_ipv6[] = {
838 static const enum index item_icmp[] = {
845 static const enum index item_udp[] = {
852 static const enum index item_tcp[] = {
860 static const enum index item_sctp[] = {
869 static const enum index item_vxlan[] = {
875 static const enum index item_e_tag[] = {
876 ITEM_E_TAG_GRP_ECID_B,
881 static const enum index item_nvgre[] = {
887 static const enum index item_mpls[] = {
895 static const enum index item_gre[] = {
897 ITEM_GRE_C_RSVD0_VER,
905 static const enum index item_gre_key[] = {
911 static const enum index item_gtp[] = {
917 static const enum index item_geneve[] = {
924 static const enum index item_vxlan_gpe[] = {
930 static const enum index item_arp_eth_ipv4[] = {
931 ITEM_ARP_ETH_IPV4_SHA,
932 ITEM_ARP_ETH_IPV4_SPA,
933 ITEM_ARP_ETH_IPV4_THA,
934 ITEM_ARP_ETH_IPV4_TPA,
939 static const enum index item_ipv6_ext[] = {
940 ITEM_IPV6_EXT_NEXT_HDR,
945 static const enum index item_icmp6[] = {
952 static const enum index item_icmp6_nd_ns[] = {
953 ITEM_ICMP6_ND_NS_TARGET_ADDR,
958 static const enum index item_icmp6_nd_na[] = {
959 ITEM_ICMP6_ND_NA_TARGET_ADDR,
964 static const enum index item_icmp6_nd_opt[] = {
965 ITEM_ICMP6_ND_OPT_TYPE,
970 static const enum index item_icmp6_nd_opt_sla_eth[] = {
971 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
976 static const enum index item_icmp6_nd_opt_tla_eth[] = {
977 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
982 static const enum index item_meta[] = {
988 static const enum index item_gtp_psc[] = {
995 static const enum index item_pppoed[] = {
1001 static const enum index item_pppoes[] = {
1007 static const enum index item_pppoe_proto_id[] = {
1008 ITEM_PPPOE_PROTO_ID,
1013 static const enum index item_higig2[] = {
1014 ITEM_HIGIG2_CLASSIFICATION,
1020 static const enum index next_set_raw[] = {
1026 static const enum index item_tag[] = {
1033 static const enum index next_action[] = {
1049 ACTION_OF_SET_MPLS_TTL,
1050 ACTION_OF_DEC_MPLS_TTL,
1051 ACTION_OF_SET_NW_TTL,
1052 ACTION_OF_DEC_NW_TTL,
1053 ACTION_OF_COPY_TTL_OUT,
1054 ACTION_OF_COPY_TTL_IN,
1056 ACTION_OF_PUSH_VLAN,
1057 ACTION_OF_SET_VLAN_VID,
1058 ACTION_OF_SET_VLAN_PCP,
1060 ACTION_OF_PUSH_MPLS,
1067 ACTION_MPLSOGRE_ENCAP,
1068 ACTION_MPLSOGRE_DECAP,
1069 ACTION_MPLSOUDP_ENCAP,
1070 ACTION_MPLSOUDP_DECAP,
1071 ACTION_SET_IPV4_SRC,
1072 ACTION_SET_IPV4_DST,
1073 ACTION_SET_IPV6_SRC,
1074 ACTION_SET_IPV6_DST,
1093 static const enum index action_mark[] = {
1099 static const enum index action_queue[] = {
1105 static const enum index action_count[] = {
1107 ACTION_COUNT_SHARED,
1112 static const enum index action_rss[] = {
1123 static const enum index action_vf[] = {
1130 static const enum index action_phy_port[] = {
1131 ACTION_PHY_PORT_ORIGINAL,
1132 ACTION_PHY_PORT_INDEX,
1137 static const enum index action_port_id[] = {
1138 ACTION_PORT_ID_ORIGINAL,
1144 static const enum index action_meter[] = {
1150 static const enum index action_of_set_mpls_ttl[] = {
1151 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1156 static const enum index action_of_set_nw_ttl[] = {
1157 ACTION_OF_SET_NW_TTL_NW_TTL,
1162 static const enum index action_of_push_vlan[] = {
1163 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1168 static const enum index action_of_set_vlan_vid[] = {
1169 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1174 static const enum index action_of_set_vlan_pcp[] = {
1175 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1180 static const enum index action_of_pop_mpls[] = {
1181 ACTION_OF_POP_MPLS_ETHERTYPE,
1186 static const enum index action_of_push_mpls[] = {
1187 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1192 static const enum index action_set_ipv4_src[] = {
1193 ACTION_SET_IPV4_SRC_IPV4_SRC,
1198 static const enum index action_set_mac_src[] = {
1199 ACTION_SET_MAC_SRC_MAC_SRC,
1204 static const enum index action_set_ipv4_dst[] = {
1205 ACTION_SET_IPV4_DST_IPV4_DST,
1210 static const enum index action_set_ipv6_src[] = {
1211 ACTION_SET_IPV6_SRC_IPV6_SRC,
1216 static const enum index action_set_ipv6_dst[] = {
1217 ACTION_SET_IPV6_DST_IPV6_DST,
1222 static const enum index action_set_tp_src[] = {
1223 ACTION_SET_TP_SRC_TP_SRC,
1228 static const enum index action_set_tp_dst[] = {
1229 ACTION_SET_TP_DST_TP_DST,
1234 static const enum index action_set_ttl[] = {
1240 static const enum index action_jump[] = {
1246 static const enum index action_set_mac_dst[] = {
1247 ACTION_SET_MAC_DST_MAC_DST,
1252 static const enum index action_inc_tcp_seq[] = {
1253 ACTION_INC_TCP_SEQ_VALUE,
1258 static const enum index action_dec_tcp_seq[] = {
1259 ACTION_DEC_TCP_SEQ_VALUE,
1264 static const enum index action_inc_tcp_ack[] = {
1265 ACTION_INC_TCP_ACK_VALUE,
1270 static const enum index action_dec_tcp_ack[] = {
1271 ACTION_DEC_TCP_ACK_VALUE,
1276 static const enum index action_raw_encap[] = {
1277 ACTION_RAW_ENCAP_INDEX,
1282 static const enum index action_raw_decap[] = {
1283 ACTION_RAW_DECAP_INDEX,
1288 static const enum index action_set_tag[] = {
1289 ACTION_SET_TAG_DATA,
1290 ACTION_SET_TAG_INDEX,
1291 ACTION_SET_TAG_MASK,
1296 static const enum index action_set_meta[] = {
1297 ACTION_SET_META_DATA,
1298 ACTION_SET_META_MASK,
1303 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1304 const char *, unsigned int,
1305 void *, unsigned int);
1306 static int parse_set_init(struct context *, const struct token *,
1307 const char *, unsigned int,
1308 void *, unsigned int);
1309 static int parse_init(struct context *, const struct token *,
1310 const char *, unsigned int,
1311 void *, unsigned int);
1312 static int parse_vc(struct context *, const struct token *,
1313 const char *, unsigned int,
1314 void *, unsigned int);
1315 static int parse_vc_spec(struct context *, const struct token *,
1316 const char *, unsigned int, void *, unsigned int);
1317 static int parse_vc_conf(struct context *, const struct token *,
1318 const char *, unsigned int, void *, unsigned int);
1319 static int parse_vc_action_rss(struct context *, const struct token *,
1320 const char *, unsigned int, void *,
1322 static int parse_vc_action_rss_func(struct context *, const struct token *,
1323 const char *, unsigned int, void *,
1325 static int parse_vc_action_rss_type(struct context *, const struct token *,
1326 const char *, unsigned int, void *,
1328 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1329 const char *, unsigned int, void *,
1331 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1332 const char *, unsigned int, void *,
1334 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1335 const char *, unsigned int, void *,
1337 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1338 const char *, unsigned int, void *,
1340 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1341 const char *, unsigned int, void *,
1343 static int parse_vc_action_mplsogre_encap(struct context *,
1344 const struct token *, const char *,
1345 unsigned int, void *, unsigned int);
1346 static int parse_vc_action_mplsogre_decap(struct context *,
1347 const struct token *, const char *,
1348 unsigned int, void *, unsigned int);
1349 static int parse_vc_action_mplsoudp_encap(struct context *,
1350 const struct token *, const char *,
1351 unsigned int, void *, unsigned int);
1352 static int parse_vc_action_mplsoudp_decap(struct context *,
1353 const struct token *, const char *,
1354 unsigned int, void *, unsigned int);
1355 static int parse_vc_action_raw_encap(struct context *,
1356 const struct token *, const char *,
1357 unsigned int, void *, unsigned int);
1358 static int parse_vc_action_raw_decap(struct context *,
1359 const struct token *, const char *,
1360 unsigned int, void *, unsigned int);
1361 static int parse_vc_action_raw_encap_index(struct context *,
1362 const struct token *, const char *,
1363 unsigned int, void *, unsigned int);
1364 static int parse_vc_action_raw_decap_index(struct context *,
1365 const struct token *, const char *,
1366 unsigned int, void *, unsigned int);
1367 static int parse_vc_action_set_meta(struct context *ctx,
1368 const struct token *token, const char *str,
1369 unsigned int len, void *buf,
1371 static int parse_destroy(struct context *, const struct token *,
1372 const char *, unsigned int,
1373 void *, unsigned int);
1374 static int parse_flush(struct context *, const struct token *,
1375 const char *, unsigned int,
1376 void *, unsigned int);
1377 static int parse_query(struct context *, const struct token *,
1378 const char *, unsigned int,
1379 void *, unsigned int);
1380 static int parse_action(struct context *, const struct token *,
1381 const char *, unsigned int,
1382 void *, unsigned int);
1383 static int parse_list(struct context *, const struct token *,
1384 const char *, unsigned int,
1385 void *, unsigned int);
1386 static int parse_isolate(struct context *, const struct token *,
1387 const char *, unsigned int,
1388 void *, unsigned int);
1389 static int parse_int(struct context *, const struct token *,
1390 const char *, unsigned int,
1391 void *, unsigned int);
1392 static int parse_prefix(struct context *, const struct token *,
1393 const char *, unsigned int,
1394 void *, unsigned int);
1395 static int parse_boolean(struct context *, const struct token *,
1396 const char *, unsigned int,
1397 void *, unsigned int);
1398 static int parse_string(struct context *, const struct token *,
1399 const char *, unsigned int,
1400 void *, unsigned int);
1401 static int parse_hex(struct context *ctx, const struct token *token,
1402 const char *str, unsigned int len,
1403 void *buf, unsigned int size);
1404 static int parse_mac_addr(struct context *, const struct token *,
1405 const char *, unsigned int,
1406 void *, unsigned int);
1407 static int parse_ipv4_addr(struct context *, const struct token *,
1408 const char *, unsigned int,
1409 void *, unsigned int);
1410 static int parse_ipv6_addr(struct context *, const struct token *,
1411 const char *, unsigned int,
1412 void *, unsigned int);
1413 static int parse_port(struct context *, const struct token *,
1414 const char *, unsigned int,
1415 void *, unsigned int);
1416 static int comp_none(struct context *, const struct token *,
1417 unsigned int, char *, unsigned int);
1418 static int comp_boolean(struct context *, const struct token *,
1419 unsigned int, char *, unsigned int);
1420 static int comp_action(struct context *, const struct token *,
1421 unsigned int, char *, unsigned int);
1422 static int comp_port(struct context *, const struct token *,
1423 unsigned int, char *, unsigned int);
1424 static int comp_rule_id(struct context *, const struct token *,
1425 unsigned int, char *, unsigned int);
1426 static int comp_vc_action_rss_type(struct context *, const struct token *,
1427 unsigned int, char *, unsigned int);
1428 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1429 unsigned int, char *, unsigned int);
1430 static int comp_set_raw_index(struct context *, const struct token *,
1431 unsigned int, char *, unsigned int);
1433 /** Token definitions. */
1434 static const struct token token_list[] = {
1435 /* Special tokens. */
1438 .help = "null entry, abused as the entry point",
1439 .next = NEXT(NEXT_ENTRY(FLOW)),
1444 .help = "command may end here",
1447 .name = "START_SET",
1448 .help = "null entry, abused as the entry point for set",
1449 .next = NEXT(NEXT_ENTRY(SET)),
1454 .help = "set command may end here",
1456 /* Common tokens. */
1460 .help = "integer value",
1465 .name = "{unsigned}",
1467 .help = "unsigned integer value",
1474 .help = "prefix length for bit-mask",
1475 .call = parse_prefix,
1479 .name = "{boolean}",
1481 .help = "any boolean value",
1482 .call = parse_boolean,
1483 .comp = comp_boolean,
1488 .help = "fixed string",
1489 .call = parse_string,
1495 .help = "fixed string",
1500 .name = "{MAC address}",
1502 .help = "standard MAC address notation",
1503 .call = parse_mac_addr,
1507 .name = "{IPv4 address}",
1508 .type = "IPV4 ADDRESS",
1509 .help = "standard IPv4 address notation",
1510 .call = parse_ipv4_addr,
1514 .name = "{IPv6 address}",
1515 .type = "IPV6 ADDRESS",
1516 .help = "standard IPv6 address notation",
1517 .call = parse_ipv6_addr,
1521 .name = "{rule id}",
1523 .help = "rule identifier",
1525 .comp = comp_rule_id,
1528 .name = "{port_id}",
1530 .help = "port identifier",
1535 .name = "{group_id}",
1537 .help = "group identifier",
1541 [PRIORITY_LEVEL] = {
1544 .help = "priority level",
1548 /* Top-level command. */
1551 .type = "{command} {port_id} [{arg} [...]]",
1552 .help = "manage ingress/egress flow rules",
1553 .next = NEXT(NEXT_ENTRY
1563 /* Sub-level commands. */
1566 .help = "check whether a flow rule can be created",
1567 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1568 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1573 .help = "create a flow rule",
1574 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1575 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1580 .help = "destroy specific flow rules",
1581 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1582 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1583 .call = parse_destroy,
1587 .help = "destroy all flow rules",
1588 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1589 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1590 .call = parse_flush,
1594 .help = "query an existing flow rule",
1595 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1596 NEXT_ENTRY(RULE_ID),
1597 NEXT_ENTRY(PORT_ID)),
1598 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1599 ARGS_ENTRY(struct buffer, args.query.rule),
1600 ARGS_ENTRY(struct buffer, port)),
1601 .call = parse_query,
1605 .help = "list existing flow rules",
1606 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1607 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1612 .help = "restrict ingress traffic to the defined flow rules",
1613 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1614 NEXT_ENTRY(PORT_ID)),
1615 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1616 ARGS_ENTRY(struct buffer, port)),
1617 .call = parse_isolate,
1619 /* Destroy arguments. */
1622 .help = "specify a rule identifier",
1623 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1624 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1625 .call = parse_destroy,
1627 /* Query arguments. */
1631 .help = "action to query, must be part of the rule",
1632 .call = parse_action,
1633 .comp = comp_action,
1635 /* List arguments. */
1638 .help = "specify a group",
1639 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1640 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1643 /* Validate/create attributes. */
1646 .help = "specify a group",
1647 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1648 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1653 .help = "specify a priority level",
1654 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1655 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1660 .help = "affect rule to ingress",
1661 .next = NEXT(next_vc_attr),
1666 .help = "affect rule to egress",
1667 .next = NEXT(next_vc_attr),
1672 .help = "apply rule directly to endpoints found in pattern",
1673 .next = NEXT(next_vc_attr),
1676 /* Validate/create pattern. */
1679 .help = "submit a list of pattern items",
1680 .next = NEXT(next_item),
1685 .help = "match value perfectly (with full bit-mask)",
1686 .call = parse_vc_spec,
1688 [ITEM_PARAM_SPEC] = {
1690 .help = "match value according to configured bit-mask",
1691 .call = parse_vc_spec,
1693 [ITEM_PARAM_LAST] = {
1695 .help = "specify upper bound to establish a range",
1696 .call = parse_vc_spec,
1698 [ITEM_PARAM_MASK] = {
1700 .help = "specify bit-mask with relevant bits set to one",
1701 .call = parse_vc_spec,
1703 [ITEM_PARAM_PREFIX] = {
1705 .help = "generate bit-mask from a prefix length",
1706 .call = parse_vc_spec,
1710 .help = "specify next pattern item",
1711 .next = NEXT(next_item),
1715 .help = "end list of pattern items",
1716 .priv = PRIV_ITEM(END, 0),
1717 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1722 .help = "no-op pattern item",
1723 .priv = PRIV_ITEM(VOID, 0),
1724 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1729 .help = "perform actions when pattern does not match",
1730 .priv = PRIV_ITEM(INVERT, 0),
1731 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1736 .help = "match any protocol for the current layer",
1737 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1738 .next = NEXT(item_any),
1743 .help = "number of layers covered",
1744 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1745 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1749 .help = "match traffic from/to the physical function",
1750 .priv = PRIV_ITEM(PF, 0),
1751 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1756 .help = "match traffic from/to a virtual function ID",
1757 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1758 .next = NEXT(item_vf),
1764 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1765 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1769 .help = "match traffic from/to a specific physical port",
1770 .priv = PRIV_ITEM(PHY_PORT,
1771 sizeof(struct rte_flow_item_phy_port)),
1772 .next = NEXT(item_phy_port),
1775 [ITEM_PHY_PORT_INDEX] = {
1777 .help = "physical port index",
1778 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1779 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1783 .help = "match traffic from/to a given DPDK port ID",
1784 .priv = PRIV_ITEM(PORT_ID,
1785 sizeof(struct rte_flow_item_port_id)),
1786 .next = NEXT(item_port_id),
1789 [ITEM_PORT_ID_ID] = {
1791 .help = "DPDK port ID",
1792 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1793 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1797 .help = "match traffic against value set in previously matched rule",
1798 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1799 .next = NEXT(item_mark),
1804 .help = "Integer value to match against",
1805 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1806 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1810 .help = "match an arbitrary byte string",
1811 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1812 .next = NEXT(item_raw),
1815 [ITEM_RAW_RELATIVE] = {
1817 .help = "look for pattern after the previous item",
1818 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1819 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1822 [ITEM_RAW_SEARCH] = {
1824 .help = "search pattern from offset (see also limit)",
1825 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1826 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1829 [ITEM_RAW_OFFSET] = {
1831 .help = "absolute or relative offset for pattern",
1832 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1833 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1835 [ITEM_RAW_LIMIT] = {
1837 .help = "search area limit for start of pattern",
1838 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1839 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1841 [ITEM_RAW_PATTERN] = {
1843 .help = "byte string to look for",
1844 .next = NEXT(item_raw,
1846 NEXT_ENTRY(ITEM_PARAM_IS,
1849 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1850 ARGS_ENTRY(struct rte_flow_item_raw, length),
1851 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1852 ITEM_RAW_PATTERN_SIZE)),
1856 .help = "match Ethernet header",
1857 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1858 .next = NEXT(item_eth),
1863 .help = "destination MAC",
1864 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1865 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1869 .help = "source MAC",
1870 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1871 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1875 .help = "EtherType",
1876 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1877 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1881 .help = "match 802.1Q/ad VLAN tag",
1882 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1883 .next = NEXT(item_vlan),
1888 .help = "tag control information",
1889 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1890 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1894 .help = "priority code point",
1895 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1896 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1901 .help = "drop eligible indicator",
1902 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1903 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1908 .help = "VLAN identifier",
1909 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1910 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1913 [ITEM_VLAN_INNER_TYPE] = {
1914 .name = "inner_type",
1915 .help = "inner EtherType",
1916 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1917 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1922 .help = "match IPv4 header",
1923 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1924 .next = NEXT(item_ipv4),
1929 .help = "type of service",
1930 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1931 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1932 hdr.type_of_service)),
1936 .help = "time to live",
1937 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1938 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1941 [ITEM_IPV4_PROTO] = {
1943 .help = "next protocol ID",
1944 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1945 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1946 hdr.next_proto_id)),
1950 .help = "source address",
1951 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1952 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1957 .help = "destination address",
1958 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1959 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1964 .help = "match IPv6 header",
1965 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1966 .next = NEXT(item_ipv6),
1971 .help = "traffic class",
1972 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1973 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1975 "\x0f\xf0\x00\x00")),
1977 [ITEM_IPV6_FLOW] = {
1979 .help = "flow label",
1980 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1981 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1983 "\x00\x0f\xff\xff")),
1985 [ITEM_IPV6_PROTO] = {
1987 .help = "protocol (next header)",
1988 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1989 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1994 .help = "hop limit",
1995 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1996 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2001 .help = "source address",
2002 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2003 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2008 .help = "destination address",
2009 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2010 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2015 .help = "match ICMP header",
2016 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2017 .next = NEXT(item_icmp),
2020 [ITEM_ICMP_TYPE] = {
2022 .help = "ICMP packet type",
2023 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2024 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2027 [ITEM_ICMP_CODE] = {
2029 .help = "ICMP packet code",
2030 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2031 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2036 .help = "match UDP header",
2037 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2038 .next = NEXT(item_udp),
2043 .help = "UDP source port",
2044 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2045 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2050 .help = "UDP destination port",
2051 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2052 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2057 .help = "match TCP header",
2058 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2059 .next = NEXT(item_tcp),
2064 .help = "TCP source port",
2065 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2066 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2071 .help = "TCP destination port",
2072 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2073 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2076 [ITEM_TCP_FLAGS] = {
2078 .help = "TCP flags",
2079 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2080 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2085 .help = "match SCTP header",
2086 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2087 .next = NEXT(item_sctp),
2092 .help = "SCTP source port",
2093 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2094 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2099 .help = "SCTP destination port",
2100 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2101 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2106 .help = "validation tag",
2107 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2108 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2111 [ITEM_SCTP_CKSUM] = {
2114 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2115 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2120 .help = "match VXLAN header",
2121 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2122 .next = NEXT(item_vxlan),
2125 [ITEM_VXLAN_VNI] = {
2127 .help = "VXLAN identifier",
2128 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2129 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2133 .help = "match E-Tag header",
2134 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2135 .next = NEXT(item_e_tag),
2138 [ITEM_E_TAG_GRP_ECID_B] = {
2139 .name = "grp_ecid_b",
2140 .help = "GRP and E-CID base",
2141 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2142 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2148 .help = "match NVGRE header",
2149 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2150 .next = NEXT(item_nvgre),
2153 [ITEM_NVGRE_TNI] = {
2155 .help = "virtual subnet ID",
2156 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2157 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2161 .help = "match MPLS header",
2162 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2163 .next = NEXT(item_mpls),
2166 [ITEM_MPLS_LABEL] = {
2168 .help = "MPLS label",
2169 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2170 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2176 .help = "MPLS Traffic Class",
2177 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2178 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2184 .help = "MPLS Bottom-of-Stack",
2185 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2186 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2192 .help = "match GRE header",
2193 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2194 .next = NEXT(item_gre),
2197 [ITEM_GRE_PROTO] = {
2199 .help = "GRE protocol type",
2200 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2201 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2204 [ITEM_GRE_C_RSVD0_VER] = {
2205 .name = "c_rsvd0_ver",
2207 "checksum (1b), undefined (1b), key bit (1b),"
2208 " sequence number (1b), reserved 0 (9b),"
2210 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2211 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2214 [ITEM_GRE_C_BIT] = {
2216 .help = "checksum bit (C)",
2217 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2218 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2220 "\x80\x00\x00\x00")),
2222 [ITEM_GRE_S_BIT] = {
2224 .help = "sequence number bit (S)",
2225 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2226 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2228 "\x10\x00\x00\x00")),
2230 [ITEM_GRE_K_BIT] = {
2232 .help = "key bit (K)",
2233 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2234 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2236 "\x20\x00\x00\x00")),
2240 .help = "fuzzy pattern match, expect faster than default",
2241 .priv = PRIV_ITEM(FUZZY,
2242 sizeof(struct rte_flow_item_fuzzy)),
2243 .next = NEXT(item_fuzzy),
2246 [ITEM_FUZZY_THRESH] = {
2248 .help = "match accuracy threshold",
2249 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2250 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2255 .help = "match GTP header",
2256 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2257 .next = NEXT(item_gtp),
2262 .help = "tunnel endpoint identifier",
2263 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2264 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2268 .help = "match GTP header",
2269 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2270 .next = NEXT(item_gtp),
2275 .help = "match GTP header",
2276 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2277 .next = NEXT(item_gtp),
2282 .help = "match GENEVE header",
2283 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2284 .next = NEXT(item_geneve),
2287 [ITEM_GENEVE_VNI] = {
2289 .help = "virtual network identifier",
2290 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2291 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2293 [ITEM_GENEVE_PROTO] = {
2295 .help = "GENEVE protocol type",
2296 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2297 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2300 [ITEM_VXLAN_GPE] = {
2301 .name = "vxlan-gpe",
2302 .help = "match VXLAN-GPE header",
2303 .priv = PRIV_ITEM(VXLAN_GPE,
2304 sizeof(struct rte_flow_item_vxlan_gpe)),
2305 .next = NEXT(item_vxlan_gpe),
2308 [ITEM_VXLAN_GPE_VNI] = {
2310 .help = "VXLAN-GPE identifier",
2311 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2312 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2315 [ITEM_ARP_ETH_IPV4] = {
2316 .name = "arp_eth_ipv4",
2317 .help = "match ARP header for Ethernet/IPv4",
2318 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2319 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2320 .next = NEXT(item_arp_eth_ipv4),
2323 [ITEM_ARP_ETH_IPV4_SHA] = {
2325 .help = "sender hardware address",
2326 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2328 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2331 [ITEM_ARP_ETH_IPV4_SPA] = {
2333 .help = "sender IPv4 address",
2334 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2336 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2339 [ITEM_ARP_ETH_IPV4_THA] = {
2341 .help = "target hardware address",
2342 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2344 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2347 [ITEM_ARP_ETH_IPV4_TPA] = {
2349 .help = "target IPv4 address",
2350 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2352 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2357 .help = "match presence of any IPv6 extension header",
2358 .priv = PRIV_ITEM(IPV6_EXT,
2359 sizeof(struct rte_flow_item_ipv6_ext)),
2360 .next = NEXT(item_ipv6_ext),
2363 [ITEM_IPV6_EXT_NEXT_HDR] = {
2365 .help = "next header",
2366 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2367 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2372 .help = "match any ICMPv6 header",
2373 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2374 .next = NEXT(item_icmp6),
2377 [ITEM_ICMP6_TYPE] = {
2379 .help = "ICMPv6 type",
2380 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2381 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2384 [ITEM_ICMP6_CODE] = {
2386 .help = "ICMPv6 code",
2387 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2388 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2391 [ITEM_ICMP6_ND_NS] = {
2392 .name = "icmp6_nd_ns",
2393 .help = "match ICMPv6 neighbor discovery solicitation",
2394 .priv = PRIV_ITEM(ICMP6_ND_NS,
2395 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2396 .next = NEXT(item_icmp6_nd_ns),
2399 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2400 .name = "target_addr",
2401 .help = "target address",
2402 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2404 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2407 [ITEM_ICMP6_ND_NA] = {
2408 .name = "icmp6_nd_na",
2409 .help = "match ICMPv6 neighbor discovery advertisement",
2410 .priv = PRIV_ITEM(ICMP6_ND_NA,
2411 sizeof(struct rte_flow_item_icmp6_nd_na)),
2412 .next = NEXT(item_icmp6_nd_na),
2415 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2416 .name = "target_addr",
2417 .help = "target address",
2418 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2420 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2423 [ITEM_ICMP6_ND_OPT] = {
2424 .name = "icmp6_nd_opt",
2425 .help = "match presence of any ICMPv6 neighbor discovery"
2427 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2428 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2429 .next = NEXT(item_icmp6_nd_opt),
2432 [ITEM_ICMP6_ND_OPT_TYPE] = {
2434 .help = "ND option type",
2435 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2437 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2440 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2441 .name = "icmp6_nd_opt_sla_eth",
2442 .help = "match ICMPv6 neighbor discovery source Ethernet"
2443 " link-layer address option",
2445 (ICMP6_ND_OPT_SLA_ETH,
2446 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2447 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2450 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2452 .help = "source Ethernet LLA",
2453 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2455 .args = ARGS(ARGS_ENTRY_HTON
2456 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2458 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2459 .name = "icmp6_nd_opt_tla_eth",
2460 .help = "match ICMPv6 neighbor discovery target Ethernet"
2461 " link-layer address option",
2463 (ICMP6_ND_OPT_TLA_ETH,
2464 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2465 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2468 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2470 .help = "target Ethernet LLA",
2471 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2473 .args = ARGS(ARGS_ENTRY_HTON
2474 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2478 .help = "match metadata header",
2479 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2480 .next = NEXT(item_meta),
2483 [ITEM_META_DATA] = {
2485 .help = "metadata value",
2486 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2487 .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta,
2488 data, "\xff\xff\xff\xff")),
2492 .help = "match GRE key",
2493 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2494 .next = NEXT(item_gre_key),
2497 [ITEM_GRE_KEY_VALUE] = {
2499 .help = "key value",
2500 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2501 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2505 .help = "match GTP extension header with type 0x85",
2506 .priv = PRIV_ITEM(GTP_PSC,
2507 sizeof(struct rte_flow_item_gtp_psc)),
2508 .next = NEXT(item_gtp_psc),
2511 [ITEM_GTP_PSC_QFI] = {
2513 .help = "QoS flow identifier",
2514 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2515 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2518 [ITEM_GTP_PSC_PDU_T] = {
2521 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2522 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2527 .help = "match PPPoE session header",
2528 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2529 .next = NEXT(item_pppoes),
2534 .help = "match PPPoE discovery header",
2535 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2536 .next = NEXT(item_pppoed),
2539 [ITEM_PPPOE_SEID] = {
2541 .help = "session identifier",
2542 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2543 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2546 [ITEM_PPPOE_PROTO_ID] = {
2548 .help = "match PPPoE session protocol identifier",
2549 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2550 sizeof(struct rte_flow_item_pppoe_proto_id)),
2551 .next = NEXT(item_pppoe_proto_id),
2556 .help = "matches higig2 header",
2557 .priv = PRIV_ITEM(HIGIG2,
2558 sizeof(struct rte_flow_item_higig2_hdr)),
2559 .next = NEXT(item_higig2),
2562 [ITEM_HIGIG2_CLASSIFICATION] = {
2563 .name = "classification",
2564 .help = "matches classification of higig2 header",
2565 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2566 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2567 hdr.ppt1.classification)),
2569 [ITEM_HIGIG2_VID] = {
2571 .help = "matches vid of higig2 header",
2572 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2573 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2578 .help = "match tag value",
2579 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
2580 .next = NEXT(item_tag),
2585 .help = "tag value to match",
2586 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
2587 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)),
2589 [ITEM_TAG_INDEX] = {
2591 .help = "index of tag array to match",
2592 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
2593 NEXT_ENTRY(ITEM_PARAM_IS)),
2594 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
2596 /* Validate/create actions. */
2599 .help = "submit a list of associated actions",
2600 .next = NEXT(next_action),
2605 .help = "specify next action",
2606 .next = NEXT(next_action),
2610 .help = "end list of actions",
2611 .priv = PRIV_ACTION(END, 0),
2616 .help = "no-op action",
2617 .priv = PRIV_ACTION(VOID, 0),
2618 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2621 [ACTION_PASSTHRU] = {
2623 .help = "let subsequent rule process matched packets",
2624 .priv = PRIV_ACTION(PASSTHRU, 0),
2625 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2630 .help = "redirect traffic to a given group",
2631 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2632 .next = NEXT(action_jump),
2635 [ACTION_JUMP_GROUP] = {
2637 .help = "group to redirect traffic to",
2638 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2639 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2640 .call = parse_vc_conf,
2644 .help = "attach 32 bit value to packets",
2645 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2646 .next = NEXT(action_mark),
2649 [ACTION_MARK_ID] = {
2651 .help = "32 bit value to return with packets",
2652 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2653 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2654 .call = parse_vc_conf,
2658 .help = "flag packets",
2659 .priv = PRIV_ACTION(FLAG, 0),
2660 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2665 .help = "assign packets to a given queue index",
2666 .priv = PRIV_ACTION(QUEUE,
2667 sizeof(struct rte_flow_action_queue)),
2668 .next = NEXT(action_queue),
2671 [ACTION_QUEUE_INDEX] = {
2673 .help = "queue index to use",
2674 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2675 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2676 .call = parse_vc_conf,
2680 .help = "drop packets (note: passthru has priority)",
2681 .priv = PRIV_ACTION(DROP, 0),
2682 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2687 .help = "enable counters for this rule",
2688 .priv = PRIV_ACTION(COUNT,
2689 sizeof(struct rte_flow_action_count)),
2690 .next = NEXT(action_count),
2693 [ACTION_COUNT_ID] = {
2694 .name = "identifier",
2695 .help = "counter identifier to use",
2696 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2697 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2698 .call = parse_vc_conf,
2700 [ACTION_COUNT_SHARED] = {
2702 .help = "shared counter",
2703 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2704 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2706 .call = parse_vc_conf,
2710 .help = "spread packets among several queues",
2711 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2712 .next = NEXT(action_rss),
2713 .call = parse_vc_action_rss,
2715 [ACTION_RSS_FUNC] = {
2717 .help = "RSS hash function to apply",
2718 .next = NEXT(action_rss,
2719 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2720 ACTION_RSS_FUNC_TOEPLITZ,
2721 ACTION_RSS_FUNC_SIMPLE_XOR,
2722 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2724 [ACTION_RSS_FUNC_DEFAULT] = {
2726 .help = "default hash function",
2727 .call = parse_vc_action_rss_func,
2729 [ACTION_RSS_FUNC_TOEPLITZ] = {
2731 .help = "Toeplitz hash function",
2732 .call = parse_vc_action_rss_func,
2734 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2735 .name = "simple_xor",
2736 .help = "simple XOR hash function",
2737 .call = parse_vc_action_rss_func,
2739 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2740 .name = "symmetric_toeplitz",
2741 .help = "Symmetric Toeplitz hash function",
2742 .call = parse_vc_action_rss_func,
2744 [ACTION_RSS_LEVEL] = {
2746 .help = "encapsulation level for \"types\"",
2747 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2748 .args = ARGS(ARGS_ENTRY_ARB
2749 (offsetof(struct action_rss_data, conf) +
2750 offsetof(struct rte_flow_action_rss, level),
2751 sizeof(((struct rte_flow_action_rss *)0)->
2754 [ACTION_RSS_TYPES] = {
2756 .help = "specific RSS hash types",
2757 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2759 [ACTION_RSS_TYPE] = {
2761 .help = "RSS hash type",
2762 .call = parse_vc_action_rss_type,
2763 .comp = comp_vc_action_rss_type,
2765 [ACTION_RSS_KEY] = {
2767 .help = "RSS hash key",
2768 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2769 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2771 (offsetof(struct action_rss_data, conf) +
2772 offsetof(struct rte_flow_action_rss, key_len),
2773 sizeof(((struct rte_flow_action_rss *)0)->
2775 ARGS_ENTRY(struct action_rss_data, key)),
2777 [ACTION_RSS_KEY_LEN] = {
2779 .help = "RSS hash key length in bytes",
2780 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2781 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2782 (offsetof(struct action_rss_data, conf) +
2783 offsetof(struct rte_flow_action_rss, key_len),
2784 sizeof(((struct rte_flow_action_rss *)0)->
2787 RSS_HASH_KEY_LENGTH)),
2789 [ACTION_RSS_QUEUES] = {
2791 .help = "queue indices to use",
2792 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2793 .call = parse_vc_conf,
2795 [ACTION_RSS_QUEUE] = {
2797 .help = "queue index",
2798 .call = parse_vc_action_rss_queue,
2799 .comp = comp_vc_action_rss_queue,
2803 .help = "direct traffic to physical function",
2804 .priv = PRIV_ACTION(PF, 0),
2805 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2810 .help = "direct traffic to a virtual function ID",
2811 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2812 .next = NEXT(action_vf),
2815 [ACTION_VF_ORIGINAL] = {
2817 .help = "use original VF ID if possible",
2818 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2819 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2821 .call = parse_vc_conf,
2826 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2827 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2828 .call = parse_vc_conf,
2830 [ACTION_PHY_PORT] = {
2832 .help = "direct packets to physical port index",
2833 .priv = PRIV_ACTION(PHY_PORT,
2834 sizeof(struct rte_flow_action_phy_port)),
2835 .next = NEXT(action_phy_port),
2838 [ACTION_PHY_PORT_ORIGINAL] = {
2840 .help = "use original port index if possible",
2841 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2842 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2844 .call = parse_vc_conf,
2846 [ACTION_PHY_PORT_INDEX] = {
2848 .help = "physical port index",
2849 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2850 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2852 .call = parse_vc_conf,
2854 [ACTION_PORT_ID] = {
2856 .help = "direct matching traffic to a given DPDK port ID",
2857 .priv = PRIV_ACTION(PORT_ID,
2858 sizeof(struct rte_flow_action_port_id)),
2859 .next = NEXT(action_port_id),
2862 [ACTION_PORT_ID_ORIGINAL] = {
2864 .help = "use original DPDK port ID if possible",
2865 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2866 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2868 .call = parse_vc_conf,
2870 [ACTION_PORT_ID_ID] = {
2872 .help = "DPDK port ID",
2873 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2874 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2875 .call = parse_vc_conf,
2879 .help = "meter the directed packets at given id",
2880 .priv = PRIV_ACTION(METER,
2881 sizeof(struct rte_flow_action_meter)),
2882 .next = NEXT(action_meter),
2885 [ACTION_METER_ID] = {
2887 .help = "meter id to use",
2888 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2889 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2890 .call = parse_vc_conf,
2892 [ACTION_OF_SET_MPLS_TTL] = {
2893 .name = "of_set_mpls_ttl",
2894 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2897 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2898 .next = NEXT(action_of_set_mpls_ttl),
2901 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2904 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2905 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2907 .call = parse_vc_conf,
2909 [ACTION_OF_DEC_MPLS_TTL] = {
2910 .name = "of_dec_mpls_ttl",
2911 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2912 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2913 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2916 [ACTION_OF_SET_NW_TTL] = {
2917 .name = "of_set_nw_ttl",
2918 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2921 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2922 .next = NEXT(action_of_set_nw_ttl),
2925 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2928 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2929 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2931 .call = parse_vc_conf,
2933 [ACTION_OF_DEC_NW_TTL] = {
2934 .name = "of_dec_nw_ttl",
2935 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2936 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2937 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2940 [ACTION_OF_COPY_TTL_OUT] = {
2941 .name = "of_copy_ttl_out",
2942 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2943 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2944 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2947 [ACTION_OF_COPY_TTL_IN] = {
2948 .name = "of_copy_ttl_in",
2949 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2950 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2951 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2954 [ACTION_OF_POP_VLAN] = {
2955 .name = "of_pop_vlan",
2956 .help = "OpenFlow's OFPAT_POP_VLAN",
2957 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2958 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2961 [ACTION_OF_PUSH_VLAN] = {
2962 .name = "of_push_vlan",
2963 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2966 sizeof(struct rte_flow_action_of_push_vlan)),
2967 .next = NEXT(action_of_push_vlan),
2970 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2971 .name = "ethertype",
2972 .help = "EtherType",
2973 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2974 .args = ARGS(ARGS_ENTRY_HTON
2975 (struct rte_flow_action_of_push_vlan,
2977 .call = parse_vc_conf,
2979 [ACTION_OF_SET_VLAN_VID] = {
2980 .name = "of_set_vlan_vid",
2981 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2984 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2985 .next = NEXT(action_of_set_vlan_vid),
2988 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2991 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2992 .args = ARGS(ARGS_ENTRY_HTON
2993 (struct rte_flow_action_of_set_vlan_vid,
2995 .call = parse_vc_conf,
2997 [ACTION_OF_SET_VLAN_PCP] = {
2998 .name = "of_set_vlan_pcp",
2999 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
3002 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
3003 .next = NEXT(action_of_set_vlan_pcp),
3006 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
3008 .help = "VLAN priority",
3009 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
3010 .args = ARGS(ARGS_ENTRY_HTON
3011 (struct rte_flow_action_of_set_vlan_pcp,
3013 .call = parse_vc_conf,
3015 [ACTION_OF_POP_MPLS] = {
3016 .name = "of_pop_mpls",
3017 .help = "OpenFlow's OFPAT_POP_MPLS",
3018 .priv = PRIV_ACTION(OF_POP_MPLS,
3019 sizeof(struct rte_flow_action_of_pop_mpls)),
3020 .next = NEXT(action_of_pop_mpls),
3023 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3024 .name = "ethertype",
3025 .help = "EtherType",
3026 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3027 .args = ARGS(ARGS_ENTRY_HTON
3028 (struct rte_flow_action_of_pop_mpls,
3030 .call = parse_vc_conf,
3032 [ACTION_OF_PUSH_MPLS] = {
3033 .name = "of_push_mpls",
3034 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3037 sizeof(struct rte_flow_action_of_push_mpls)),
3038 .next = NEXT(action_of_push_mpls),
3041 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3042 .name = "ethertype",
3043 .help = "EtherType",
3044 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3045 .args = ARGS(ARGS_ENTRY_HTON
3046 (struct rte_flow_action_of_push_mpls,
3048 .call = parse_vc_conf,
3050 [ACTION_VXLAN_ENCAP] = {
3051 .name = "vxlan_encap",
3052 .help = "VXLAN encapsulation, uses configuration set by \"set"
3054 .priv = PRIV_ACTION(VXLAN_ENCAP,
3055 sizeof(struct action_vxlan_encap_data)),
3056 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3057 .call = parse_vc_action_vxlan_encap,
3059 [ACTION_VXLAN_DECAP] = {
3060 .name = "vxlan_decap",
3061 .help = "Performs a decapsulation action by stripping all"
3062 " headers of the VXLAN tunnel network overlay from the"
3064 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3065 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3068 [ACTION_NVGRE_ENCAP] = {
3069 .name = "nvgre_encap",
3070 .help = "NVGRE encapsulation, uses configuration set by \"set"
3072 .priv = PRIV_ACTION(NVGRE_ENCAP,
3073 sizeof(struct action_nvgre_encap_data)),
3074 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3075 .call = parse_vc_action_nvgre_encap,
3077 [ACTION_NVGRE_DECAP] = {
3078 .name = "nvgre_decap",
3079 .help = "Performs a decapsulation action by stripping all"
3080 " headers of the NVGRE tunnel network overlay from the"
3082 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3083 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3086 [ACTION_L2_ENCAP] = {
3088 .help = "l2 encap, uses configuration set by"
3089 " \"set l2_encap\"",
3090 .priv = PRIV_ACTION(RAW_ENCAP,
3091 sizeof(struct action_raw_encap_data)),
3092 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3093 .call = parse_vc_action_l2_encap,
3095 [ACTION_L2_DECAP] = {
3097 .help = "l2 decap, uses configuration set by"
3098 " \"set l2_decap\"",
3099 .priv = PRIV_ACTION(RAW_DECAP,
3100 sizeof(struct action_raw_decap_data)),
3101 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3102 .call = parse_vc_action_l2_decap,
3104 [ACTION_MPLSOGRE_ENCAP] = {
3105 .name = "mplsogre_encap",
3106 .help = "mplsogre encapsulation, uses configuration set by"
3107 " \"set mplsogre_encap\"",
3108 .priv = PRIV_ACTION(RAW_ENCAP,
3109 sizeof(struct action_raw_encap_data)),
3110 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3111 .call = parse_vc_action_mplsogre_encap,
3113 [ACTION_MPLSOGRE_DECAP] = {
3114 .name = "mplsogre_decap",
3115 .help = "mplsogre decapsulation, uses configuration set by"
3116 " \"set mplsogre_decap\"",
3117 .priv = PRIV_ACTION(RAW_DECAP,
3118 sizeof(struct action_raw_decap_data)),
3119 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3120 .call = parse_vc_action_mplsogre_decap,
3122 [ACTION_MPLSOUDP_ENCAP] = {
3123 .name = "mplsoudp_encap",
3124 .help = "mplsoudp encapsulation, uses configuration set by"
3125 " \"set mplsoudp_encap\"",
3126 .priv = PRIV_ACTION(RAW_ENCAP,
3127 sizeof(struct action_raw_encap_data)),
3128 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3129 .call = parse_vc_action_mplsoudp_encap,
3131 [ACTION_MPLSOUDP_DECAP] = {
3132 .name = "mplsoudp_decap",
3133 .help = "mplsoudp decapsulation, uses configuration set by"
3134 " \"set mplsoudp_decap\"",
3135 .priv = PRIV_ACTION(RAW_DECAP,
3136 sizeof(struct action_raw_decap_data)),
3137 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3138 .call = parse_vc_action_mplsoudp_decap,
3140 [ACTION_SET_IPV4_SRC] = {
3141 .name = "set_ipv4_src",
3142 .help = "Set a new IPv4 source address in the outermost"
3144 .priv = PRIV_ACTION(SET_IPV4_SRC,
3145 sizeof(struct rte_flow_action_set_ipv4)),
3146 .next = NEXT(action_set_ipv4_src),
3149 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3150 .name = "ipv4_addr",
3151 .help = "new IPv4 source address to set",
3152 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3153 .args = ARGS(ARGS_ENTRY_HTON
3154 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3155 .call = parse_vc_conf,
3157 [ACTION_SET_IPV4_DST] = {
3158 .name = "set_ipv4_dst",
3159 .help = "Set a new IPv4 destination address in the outermost"
3161 .priv = PRIV_ACTION(SET_IPV4_DST,
3162 sizeof(struct rte_flow_action_set_ipv4)),
3163 .next = NEXT(action_set_ipv4_dst),
3166 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3167 .name = "ipv4_addr",
3168 .help = "new IPv4 destination address to set",
3169 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3170 .args = ARGS(ARGS_ENTRY_HTON
3171 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3172 .call = parse_vc_conf,
3174 [ACTION_SET_IPV6_SRC] = {
3175 .name = "set_ipv6_src",
3176 .help = "Set a new IPv6 source address in the outermost"
3178 .priv = PRIV_ACTION(SET_IPV6_SRC,
3179 sizeof(struct rte_flow_action_set_ipv6)),
3180 .next = NEXT(action_set_ipv6_src),
3183 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3184 .name = "ipv6_addr",
3185 .help = "new IPv6 source address to set",
3186 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3187 .args = ARGS(ARGS_ENTRY_HTON
3188 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3189 .call = parse_vc_conf,
3191 [ACTION_SET_IPV6_DST] = {
3192 .name = "set_ipv6_dst",
3193 .help = "Set a new IPv6 destination address in the outermost"
3195 .priv = PRIV_ACTION(SET_IPV6_DST,
3196 sizeof(struct rte_flow_action_set_ipv6)),
3197 .next = NEXT(action_set_ipv6_dst),
3200 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3201 .name = "ipv6_addr",
3202 .help = "new IPv6 destination address to set",
3203 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3204 .args = ARGS(ARGS_ENTRY_HTON
3205 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3206 .call = parse_vc_conf,
3208 [ACTION_SET_TP_SRC] = {
3209 .name = "set_tp_src",
3210 .help = "set a new source port number in the outermost"
3212 .priv = PRIV_ACTION(SET_TP_SRC,
3213 sizeof(struct rte_flow_action_set_tp)),
3214 .next = NEXT(action_set_tp_src),
3217 [ACTION_SET_TP_SRC_TP_SRC] = {
3219 .help = "new source port number to set",
3220 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3221 .args = ARGS(ARGS_ENTRY_HTON
3222 (struct rte_flow_action_set_tp, port)),
3223 .call = parse_vc_conf,
3225 [ACTION_SET_TP_DST] = {
3226 .name = "set_tp_dst",
3227 .help = "set a new destination port number in the outermost"
3229 .priv = PRIV_ACTION(SET_TP_DST,
3230 sizeof(struct rte_flow_action_set_tp)),
3231 .next = NEXT(action_set_tp_dst),
3234 [ACTION_SET_TP_DST_TP_DST] = {
3236 .help = "new destination port number to set",
3237 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3238 .args = ARGS(ARGS_ENTRY_HTON
3239 (struct rte_flow_action_set_tp, port)),
3240 .call = parse_vc_conf,
3242 [ACTION_MAC_SWAP] = {
3244 .help = "Swap the source and destination MAC addresses"
3245 " in the outermost Ethernet header",
3246 .priv = PRIV_ACTION(MAC_SWAP, 0),
3247 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3250 [ACTION_DEC_TTL] = {
3252 .help = "decrease network TTL if available",
3253 .priv = PRIV_ACTION(DEC_TTL, 0),
3254 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3257 [ACTION_SET_TTL] = {
3259 .help = "set ttl value",
3260 .priv = PRIV_ACTION(SET_TTL,
3261 sizeof(struct rte_flow_action_set_ttl)),
3262 .next = NEXT(action_set_ttl),
3265 [ACTION_SET_TTL_TTL] = {
3266 .name = "ttl_value",
3267 .help = "new ttl value to set",
3268 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3269 .args = ARGS(ARGS_ENTRY_HTON
3270 (struct rte_flow_action_set_ttl, ttl_value)),
3271 .call = parse_vc_conf,
3273 [ACTION_SET_MAC_SRC] = {
3274 .name = "set_mac_src",
3275 .help = "set source mac address",
3276 .priv = PRIV_ACTION(SET_MAC_SRC,
3277 sizeof(struct rte_flow_action_set_mac)),
3278 .next = NEXT(action_set_mac_src),
3281 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3283 .help = "new source mac address",
3284 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3285 .args = ARGS(ARGS_ENTRY_HTON
3286 (struct rte_flow_action_set_mac, mac_addr)),
3287 .call = parse_vc_conf,
3289 [ACTION_SET_MAC_DST] = {
3290 .name = "set_mac_dst",
3291 .help = "set destination mac address",
3292 .priv = PRIV_ACTION(SET_MAC_DST,
3293 sizeof(struct rte_flow_action_set_mac)),
3294 .next = NEXT(action_set_mac_dst),
3297 [ACTION_SET_MAC_DST_MAC_DST] = {
3299 .help = "new destination mac address to set",
3300 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3301 .args = ARGS(ARGS_ENTRY_HTON
3302 (struct rte_flow_action_set_mac, mac_addr)),
3303 .call = parse_vc_conf,
3305 [ACTION_INC_TCP_SEQ] = {
3306 .name = "inc_tcp_seq",
3307 .help = "increase TCP sequence number",
3308 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3309 .next = NEXT(action_inc_tcp_seq),
3312 [ACTION_INC_TCP_SEQ_VALUE] = {
3314 .help = "the value to increase TCP sequence number by",
3315 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3316 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3317 .call = parse_vc_conf,
3319 [ACTION_DEC_TCP_SEQ] = {
3320 .name = "dec_tcp_seq",
3321 .help = "decrease TCP sequence number",
3322 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3323 .next = NEXT(action_dec_tcp_seq),
3326 [ACTION_DEC_TCP_SEQ_VALUE] = {
3328 .help = "the value to decrease TCP sequence number by",
3329 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3330 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3331 .call = parse_vc_conf,
3333 [ACTION_INC_TCP_ACK] = {
3334 .name = "inc_tcp_ack",
3335 .help = "increase TCP acknowledgment number",
3336 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3337 .next = NEXT(action_inc_tcp_ack),
3340 [ACTION_INC_TCP_ACK_VALUE] = {
3342 .help = "the value to increase TCP acknowledgment number by",
3343 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3344 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3345 .call = parse_vc_conf,
3347 [ACTION_DEC_TCP_ACK] = {
3348 .name = "dec_tcp_ack",
3349 .help = "decrease TCP acknowledgment number",
3350 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3351 .next = NEXT(action_dec_tcp_ack),
3354 [ACTION_DEC_TCP_ACK_VALUE] = {
3356 .help = "the value to decrease TCP acknowledgment number by",
3357 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3358 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3359 .call = parse_vc_conf,
3361 [ACTION_RAW_ENCAP] = {
3362 .name = "raw_encap",
3363 .help = "encapsulation data, defined by set raw_encap",
3364 .priv = PRIV_ACTION(RAW_ENCAP,
3365 sizeof(struct action_raw_encap_data)),
3366 .next = NEXT(action_raw_encap),
3367 .call = parse_vc_action_raw_encap,
3369 [ACTION_RAW_ENCAP_INDEX] = {
3371 .help = "the index of raw_encap_confs",
3372 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
3374 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
3377 .help = "unsigned integer value",
3378 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3379 .call = parse_vc_action_raw_encap_index,
3380 .comp = comp_set_raw_index,
3382 [ACTION_RAW_DECAP] = {
3383 .name = "raw_decap",
3384 .help = "decapsulation data, defined by set raw_encap",
3385 .priv = PRIV_ACTION(RAW_DECAP,
3386 sizeof(struct action_raw_decap_data)),
3387 .next = NEXT(action_raw_decap),
3388 .call = parse_vc_action_raw_decap,
3390 [ACTION_RAW_DECAP_INDEX] = {
3392 .help = "the index of raw_encap_confs",
3393 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
3395 [ACTION_RAW_DECAP_INDEX_VALUE] = {
3398 .help = "unsigned integer value",
3399 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3400 .call = parse_vc_action_raw_decap_index,
3401 .comp = comp_set_raw_index,
3403 /* Top level command. */
3406 .help = "set raw encap/decap data",
3407 .type = "set raw_encap|raw_decap <index> <pattern>",
3408 .next = NEXT(NEXT_ENTRY
3411 .call = parse_set_init,
3413 /* Sub-level commands. */
3415 .name = "raw_encap",
3416 .help = "set raw encap data",
3417 .next = NEXT(next_set_raw),
3418 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3419 (offsetof(struct buffer, port),
3420 sizeof(((struct buffer *)0)->port),
3421 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3422 .call = parse_set_raw_encap_decap,
3425 .name = "raw_decap",
3426 .help = "set raw decap data",
3427 .next = NEXT(next_set_raw),
3428 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3429 (offsetof(struct buffer, port),
3430 sizeof(((struct buffer *)0)->port),
3431 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3432 .call = parse_set_raw_encap_decap,
3437 .help = "index of raw_encap/raw_decap data",
3438 .next = NEXT(next_item),
3441 [ACTION_SET_TAG] = {
3444 .priv = PRIV_ACTION(SET_TAG,
3445 sizeof(struct rte_flow_action_set_tag)),
3446 .next = NEXT(action_set_tag),
3449 [ACTION_SET_TAG_INDEX] = {
3451 .help = "index of tag array",
3452 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3453 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
3454 .call = parse_vc_conf,
3456 [ACTION_SET_TAG_DATA] = {
3458 .help = "tag value",
3459 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3460 .args = ARGS(ARGS_ENTRY
3461 (struct rte_flow_action_set_tag, data)),
3462 .call = parse_vc_conf,
3464 [ACTION_SET_TAG_MASK] = {
3466 .help = "mask for tag value",
3467 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3468 .args = ARGS(ARGS_ENTRY
3469 (struct rte_flow_action_set_tag, mask)),
3470 .call = parse_vc_conf,
3472 [ACTION_SET_META] = {
3474 .help = "set metadata",
3475 .priv = PRIV_ACTION(SET_META,
3476 sizeof(struct rte_flow_action_set_meta)),
3477 .next = NEXT(action_set_meta),
3478 .call = parse_vc_action_set_meta,
3480 [ACTION_SET_META_DATA] = {
3482 .help = "metadata value",
3483 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
3484 .args = ARGS(ARGS_ENTRY
3485 (struct rte_flow_action_set_meta, data)),
3486 .call = parse_vc_conf,
3488 [ACTION_SET_META_MASK] = {
3490 .help = "mask for metadata value",
3491 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
3492 .args = ARGS(ARGS_ENTRY
3493 (struct rte_flow_action_set_meta, mask)),
3494 .call = parse_vc_conf,
3498 /** Remove and return last entry from argument stack. */
3499 static const struct arg *
3500 pop_args(struct context *ctx)
3502 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3505 /** Add entry on top of the argument stack. */
3507 push_args(struct context *ctx, const struct arg *arg)
3509 if (ctx->args_num == CTX_STACK_SIZE)
3511 ctx->args[ctx->args_num++] = arg;
3515 /** Spread value into buffer according to bit-mask. */
3517 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3519 uint32_t i = arg->size;
3527 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3536 unsigned int shift = 0;
3537 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3539 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3540 if (!(arg->mask[i] & (1 << shift)))
3545 *buf &= ~(1 << shift);
3546 *buf |= (val & 1) << shift;
3554 /** Compare a string with a partial one of a given length. */
3556 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3558 int r = strncmp(full, partial, partial_len);
3562 if (strlen(full) <= partial_len)
3564 return full[partial_len];
3568 * Parse a prefix length and generate a bit-mask.
3570 * Last argument (ctx->args) is retrieved to determine mask size, storage
3571 * location and whether the result must use network byte ordering.
3574 parse_prefix(struct context *ctx, const struct token *token,
3575 const char *str, unsigned int len,
3576 void *buf, unsigned int size)
3578 const struct arg *arg = pop_args(ctx);
3579 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3586 /* Argument is expected. */
3590 u = strtoumax(str, &end, 0);
3591 if (errno || (size_t)(end - str) != len)
3596 extra = arg_entry_bf_fill(NULL, 0, arg);
3605 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3606 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3613 if (bytes > size || bytes + !!extra > size)
3617 buf = (uint8_t *)ctx->object + arg->offset;
3618 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3620 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3621 memset(buf, 0x00, size - bytes);
3623 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3627 memset(buf, 0xff, bytes);
3628 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3630 ((uint8_t *)buf)[bytes] = conv[extra];
3633 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3636 push_args(ctx, arg);
3640 /** Default parsing function for token name matching. */
3642 parse_default(struct context *ctx, const struct token *token,
3643 const char *str, unsigned int len,
3644 void *buf, unsigned int size)
3649 if (strcmp_partial(token->name, str, len))
3654 /** Parse flow command, initialize output buffer for subsequent tokens. */
3656 parse_init(struct context *ctx, const struct token *token,
3657 const char *str, unsigned int len,
3658 void *buf, unsigned int size)
3660 struct buffer *out = buf;
3662 /* Token name must match. */
3663 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3665 /* Nothing else to do if there is no buffer. */
3668 /* Make sure buffer is large enough. */
3669 if (size < sizeof(*out))
3671 /* Initialize buffer. */
3672 memset(out, 0x00, sizeof(*out));
3673 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3676 ctx->objmask = NULL;
3680 /** Parse tokens for validate/create commands. */
3682 parse_vc(struct context *ctx, const struct token *token,
3683 const char *str, unsigned int len,
3684 void *buf, unsigned int size)
3686 struct buffer *out = buf;
3690 /* Token name must match. */
3691 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3693 /* Nothing else to do if there is no buffer. */
3696 if (!out->command) {
3697 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3699 if (sizeof(*out) > size)
3701 out->command = ctx->curr;
3704 ctx->objmask = NULL;
3705 out->args.vc.data = (uint8_t *)out + size;
3709 ctx->object = &out->args.vc.attr;
3710 ctx->objmask = NULL;
3711 switch (ctx->curr) {
3716 out->args.vc.attr.ingress = 1;
3719 out->args.vc.attr.egress = 1;
3722 out->args.vc.attr.transfer = 1;
3725 out->args.vc.pattern =
3726 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3728 ctx->object = out->args.vc.pattern;
3729 ctx->objmask = NULL;
3732 out->args.vc.actions =
3733 (void *)RTE_ALIGN_CEIL((uintptr_t)
3734 (out->args.vc.pattern +
3735 out->args.vc.pattern_n),
3737 ctx->object = out->args.vc.actions;
3738 ctx->objmask = NULL;
3745 if (!out->args.vc.actions) {
3746 const struct parse_item_priv *priv = token->priv;
3747 struct rte_flow_item *item =
3748 out->args.vc.pattern + out->args.vc.pattern_n;
3750 data_size = priv->size * 3; /* spec, last, mask */
3751 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3752 (out->args.vc.data - data_size),
3754 if ((uint8_t *)item + sizeof(*item) > data)
3756 *item = (struct rte_flow_item){
3759 ++out->args.vc.pattern_n;
3761 ctx->objmask = NULL;
3763 const struct parse_action_priv *priv = token->priv;
3764 struct rte_flow_action *action =
3765 out->args.vc.actions + out->args.vc.actions_n;
3767 data_size = priv->size; /* configuration */
3768 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3769 (out->args.vc.data - data_size),
3771 if ((uint8_t *)action + sizeof(*action) > data)
3773 *action = (struct rte_flow_action){
3775 .conf = data_size ? data : NULL,
3777 ++out->args.vc.actions_n;
3778 ctx->object = action;
3779 ctx->objmask = NULL;
3781 memset(data, 0, data_size);
3782 out->args.vc.data = data;
3783 ctx->objdata = data_size;
3787 /** Parse pattern item parameter type. */
3789 parse_vc_spec(struct context *ctx, const struct token *token,
3790 const char *str, unsigned int len,
3791 void *buf, unsigned int size)
3793 struct buffer *out = buf;
3794 struct rte_flow_item *item;
3800 /* Token name must match. */
3801 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3803 /* Parse parameter types. */
3804 switch (ctx->curr) {
3805 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3811 case ITEM_PARAM_SPEC:
3814 case ITEM_PARAM_LAST:
3817 case ITEM_PARAM_PREFIX:
3818 /* Modify next token to expect a prefix. */
3819 if (ctx->next_num < 2)
3821 ctx->next[ctx->next_num - 2] = prefix;
3823 case ITEM_PARAM_MASK:
3829 /* Nothing else to do if there is no buffer. */
3832 if (!out->args.vc.pattern_n)
3834 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3835 data_size = ctx->objdata / 3; /* spec, last, mask */
3836 /* Point to selected object. */
3837 ctx->object = out->args.vc.data + (data_size * index);
3839 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3840 item->mask = ctx->objmask;
3842 ctx->objmask = NULL;
3843 /* Update relevant item pointer. */
3844 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3849 /** Parse action configuration field. */
3851 parse_vc_conf(struct context *ctx, const struct token *token,
3852 const char *str, unsigned int len,
3853 void *buf, unsigned int size)
3855 struct buffer *out = buf;
3858 /* Token name must match. */
3859 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3861 /* Nothing else to do if there is no buffer. */
3864 /* Point to selected object. */
3865 ctx->object = out->args.vc.data;
3866 ctx->objmask = NULL;
3870 /** Parse RSS action. */
3872 parse_vc_action_rss(struct context *ctx, const struct token *token,
3873 const char *str, unsigned int len,
3874 void *buf, unsigned int size)
3876 struct buffer *out = buf;
3877 struct rte_flow_action *action;
3878 struct action_rss_data *action_rss_data;
3882 ret = parse_vc(ctx, token, str, len, buf, size);
3885 /* Nothing else to do if there is no buffer. */
3888 if (!out->args.vc.actions_n)
3890 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3891 /* Point to selected object. */
3892 ctx->object = out->args.vc.data;
3893 ctx->objmask = NULL;
3894 /* Set up default configuration. */
3895 action_rss_data = ctx->object;
3896 *action_rss_data = (struct action_rss_data){
3897 .conf = (struct rte_flow_action_rss){
3898 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3901 .key_len = sizeof(action_rss_data->key),
3902 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3903 .key = action_rss_data->key,
3904 .queue = action_rss_data->queue,
3906 .key = "testpmd's default RSS hash key, "
3907 "override it for better balancing",
3910 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3911 action_rss_data->queue[i] = i;
3912 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3913 ctx->port != (portid_t)RTE_PORT_ALL) {
3914 struct rte_eth_dev_info info;
3917 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3921 action_rss_data->conf.key_len =
3922 RTE_MIN(sizeof(action_rss_data->key),
3923 info.hash_key_size);
3925 action->conf = &action_rss_data->conf;
3930 * Parse func field for RSS action.
3932 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3933 * ACTION_RSS_FUNC_* index that called this function.
3936 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3937 const char *str, unsigned int len,
3938 void *buf, unsigned int size)
3940 struct action_rss_data *action_rss_data;
3941 enum rte_eth_hash_function func;
3945 /* Token name must match. */
3946 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3948 switch (ctx->curr) {
3949 case ACTION_RSS_FUNC_DEFAULT:
3950 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3952 case ACTION_RSS_FUNC_TOEPLITZ:
3953 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3955 case ACTION_RSS_FUNC_SIMPLE_XOR:
3956 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3958 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
3959 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
3966 action_rss_data = ctx->object;
3967 action_rss_data->conf.func = func;
3972 * Parse type field for RSS action.
3974 * Valid tokens are type field names and the "end" token.
3977 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3978 const char *str, unsigned int len,
3979 void *buf, unsigned int size)
3981 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3982 struct action_rss_data *action_rss_data;
3988 if (ctx->curr != ACTION_RSS_TYPE)
3990 if (!(ctx->objdata >> 16) && ctx->object) {
3991 action_rss_data = ctx->object;
3992 action_rss_data->conf.types = 0;
3994 if (!strcmp_partial("end", str, len)) {
3995 ctx->objdata &= 0xffff;
3998 for (i = 0; rss_type_table[i].str; ++i)
3999 if (!strcmp_partial(rss_type_table[i].str, str, len))
4001 if (!rss_type_table[i].str)
4003 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
4005 if (ctx->next_num == RTE_DIM(ctx->next))
4007 ctx->next[ctx->next_num++] = next;
4010 action_rss_data = ctx->object;
4011 action_rss_data->conf.types |= rss_type_table[i].rss_type;
4016 * Parse queue field for RSS action.
4018 * Valid tokens are queue indices and the "end" token.
4021 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
4022 const char *str, unsigned int len,
4023 void *buf, unsigned int size)
4025 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
4026 struct action_rss_data *action_rss_data;
4027 const struct arg *arg;
4034 if (ctx->curr != ACTION_RSS_QUEUE)
4036 i = ctx->objdata >> 16;
4037 if (!strcmp_partial("end", str, len)) {
4038 ctx->objdata &= 0xffff;
4041 if (i >= ACTION_RSS_QUEUE_NUM)
4043 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4044 i * sizeof(action_rss_data->queue[i]),
4045 sizeof(action_rss_data->queue[i]));
4046 if (push_args(ctx, arg))
4048 ret = parse_int(ctx, token, str, len, NULL, 0);
4054 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
4056 if (ctx->next_num == RTE_DIM(ctx->next))
4058 ctx->next[ctx->next_num++] = next;
4062 action_rss_data = ctx->object;
4063 action_rss_data->conf.queue_num = i;
4064 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
4068 /** Parse VXLAN encap action. */
4070 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
4071 const char *str, unsigned int len,
4072 void *buf, unsigned int size)
4074 struct buffer *out = buf;
4075 struct rte_flow_action *action;
4076 struct action_vxlan_encap_data *action_vxlan_encap_data;
4079 ret = parse_vc(ctx, token, str, len, buf, size);
4082 /* Nothing else to do if there is no buffer. */
4085 if (!out->args.vc.actions_n)
4087 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4088 /* Point to selected object. */
4089 ctx->object = out->args.vc.data;
4090 ctx->objmask = NULL;
4091 /* Set up default configuration. */
4092 action_vxlan_encap_data = ctx->object;
4093 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
4094 .conf = (struct rte_flow_action_vxlan_encap){
4095 .definition = action_vxlan_encap_data->items,
4099 .type = RTE_FLOW_ITEM_TYPE_ETH,
4100 .spec = &action_vxlan_encap_data->item_eth,
4101 .mask = &rte_flow_item_eth_mask,
4104 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4105 .spec = &action_vxlan_encap_data->item_vlan,
4106 .mask = &rte_flow_item_vlan_mask,
4109 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4110 .spec = &action_vxlan_encap_data->item_ipv4,
4111 .mask = &rte_flow_item_ipv4_mask,
4114 .type = RTE_FLOW_ITEM_TYPE_UDP,
4115 .spec = &action_vxlan_encap_data->item_udp,
4116 .mask = &rte_flow_item_udp_mask,
4119 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
4120 .spec = &action_vxlan_encap_data->item_vxlan,
4121 .mask = &rte_flow_item_vxlan_mask,
4124 .type = RTE_FLOW_ITEM_TYPE_END,
4129 .tci = vxlan_encap_conf.vlan_tci,
4133 .src_addr = vxlan_encap_conf.ipv4_src,
4134 .dst_addr = vxlan_encap_conf.ipv4_dst,
4137 .src_port = vxlan_encap_conf.udp_src,
4138 .dst_port = vxlan_encap_conf.udp_dst,
4140 .item_vxlan.flags = 0,
4142 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
4143 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4144 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
4145 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4146 if (!vxlan_encap_conf.select_ipv4) {
4147 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
4148 &vxlan_encap_conf.ipv6_src,
4149 sizeof(vxlan_encap_conf.ipv6_src));
4150 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
4151 &vxlan_encap_conf.ipv6_dst,
4152 sizeof(vxlan_encap_conf.ipv6_dst));
4153 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
4154 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4155 .spec = &action_vxlan_encap_data->item_ipv6,
4156 .mask = &rte_flow_item_ipv6_mask,
4159 if (!vxlan_encap_conf.select_vlan)
4160 action_vxlan_encap_data->items[1].type =
4161 RTE_FLOW_ITEM_TYPE_VOID;
4162 if (vxlan_encap_conf.select_tos_ttl) {
4163 if (vxlan_encap_conf.select_ipv4) {
4164 static struct rte_flow_item_ipv4 ipv4_mask_tos;
4166 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
4167 sizeof(ipv4_mask_tos));
4168 ipv4_mask_tos.hdr.type_of_service = 0xff;
4169 ipv4_mask_tos.hdr.time_to_live = 0xff;
4170 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
4171 vxlan_encap_conf.ip_tos;
4172 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
4173 vxlan_encap_conf.ip_ttl;
4174 action_vxlan_encap_data->items[2].mask =
4177 static struct rte_flow_item_ipv6 ipv6_mask_tos;
4179 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
4180 sizeof(ipv6_mask_tos));
4181 ipv6_mask_tos.hdr.vtc_flow |=
4182 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
4183 ipv6_mask_tos.hdr.hop_limits = 0xff;
4184 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
4186 ((uint32_t)vxlan_encap_conf.ip_tos <<
4187 RTE_IPV6_HDR_TC_SHIFT);
4188 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
4189 vxlan_encap_conf.ip_ttl;
4190 action_vxlan_encap_data->items[2].mask =
4194 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
4195 RTE_DIM(vxlan_encap_conf.vni));
4196 action->conf = &action_vxlan_encap_data->conf;
4200 /** Parse NVGRE encap action. */
4202 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
4203 const char *str, unsigned int len,
4204 void *buf, unsigned int size)
4206 struct buffer *out = buf;
4207 struct rte_flow_action *action;
4208 struct action_nvgre_encap_data *action_nvgre_encap_data;
4211 ret = parse_vc(ctx, token, str, len, buf, size);
4214 /* Nothing else to do if there is no buffer. */
4217 if (!out->args.vc.actions_n)
4219 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4220 /* Point to selected object. */
4221 ctx->object = out->args.vc.data;
4222 ctx->objmask = NULL;
4223 /* Set up default configuration. */
4224 action_nvgre_encap_data = ctx->object;
4225 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
4226 .conf = (struct rte_flow_action_nvgre_encap){
4227 .definition = action_nvgre_encap_data->items,
4231 .type = RTE_FLOW_ITEM_TYPE_ETH,
4232 .spec = &action_nvgre_encap_data->item_eth,
4233 .mask = &rte_flow_item_eth_mask,
4236 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4237 .spec = &action_nvgre_encap_data->item_vlan,
4238 .mask = &rte_flow_item_vlan_mask,
4241 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4242 .spec = &action_nvgre_encap_data->item_ipv4,
4243 .mask = &rte_flow_item_ipv4_mask,
4246 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4247 .spec = &action_nvgre_encap_data->item_nvgre,
4248 .mask = &rte_flow_item_nvgre_mask,
4251 .type = RTE_FLOW_ITEM_TYPE_END,
4256 .tci = nvgre_encap_conf.vlan_tci,
4260 .src_addr = nvgre_encap_conf.ipv4_src,
4261 .dst_addr = nvgre_encap_conf.ipv4_dst,
4263 .item_nvgre.flow_id = 0,
4265 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4266 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4267 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4268 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4269 if (!nvgre_encap_conf.select_ipv4) {
4270 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4271 &nvgre_encap_conf.ipv6_src,
4272 sizeof(nvgre_encap_conf.ipv6_src));
4273 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4274 &nvgre_encap_conf.ipv6_dst,
4275 sizeof(nvgre_encap_conf.ipv6_dst));
4276 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4277 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4278 .spec = &action_nvgre_encap_data->item_ipv6,
4279 .mask = &rte_flow_item_ipv6_mask,
4282 if (!nvgre_encap_conf.select_vlan)
4283 action_nvgre_encap_data->items[1].type =
4284 RTE_FLOW_ITEM_TYPE_VOID;
4285 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4286 RTE_DIM(nvgre_encap_conf.tni));
4287 action->conf = &action_nvgre_encap_data->conf;
4291 /** Parse l2 encap action. */
4293 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4294 const char *str, unsigned int len,
4295 void *buf, unsigned int size)
4297 struct buffer *out = buf;
4298 struct rte_flow_action *action;
4299 struct action_raw_encap_data *action_encap_data;
4300 struct rte_flow_item_eth eth = { .type = 0, };
4301 struct rte_flow_item_vlan vlan = {
4302 .tci = mplsoudp_encap_conf.vlan_tci,
4308 ret = parse_vc(ctx, token, str, len, buf, size);
4311 /* Nothing else to do if there is no buffer. */
4314 if (!out->args.vc.actions_n)
4316 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4317 /* Point to selected object. */
4318 ctx->object = out->args.vc.data;
4319 ctx->objmask = NULL;
4320 /* Copy the headers to the buffer. */
4321 action_encap_data = ctx->object;
4322 *action_encap_data = (struct action_raw_encap_data) {
4323 .conf = (struct rte_flow_action_raw_encap){
4324 .data = action_encap_data->data,
4328 header = action_encap_data->data;
4329 if (l2_encap_conf.select_vlan)
4330 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4331 else if (l2_encap_conf.select_ipv4)
4332 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4334 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4335 memcpy(eth.dst.addr_bytes,
4336 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4337 memcpy(eth.src.addr_bytes,
4338 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4339 memcpy(header, ð, sizeof(eth));
4340 header += sizeof(eth);
4341 if (l2_encap_conf.select_vlan) {
4342 if (l2_encap_conf.select_ipv4)
4343 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4345 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4346 memcpy(header, &vlan, sizeof(vlan));
4347 header += sizeof(vlan);
4349 action_encap_data->conf.size = header -
4350 action_encap_data->data;
4351 action->conf = &action_encap_data->conf;
4355 /** Parse l2 decap action. */
4357 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4358 const char *str, unsigned int len,
4359 void *buf, unsigned int size)
4361 struct buffer *out = buf;
4362 struct rte_flow_action *action;
4363 struct action_raw_decap_data *action_decap_data;
4364 struct rte_flow_item_eth eth = { .type = 0, };
4365 struct rte_flow_item_vlan vlan = {
4366 .tci = mplsoudp_encap_conf.vlan_tci,
4372 ret = parse_vc(ctx, token, str, len, buf, size);
4375 /* Nothing else to do if there is no buffer. */
4378 if (!out->args.vc.actions_n)
4380 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4381 /* Point to selected object. */
4382 ctx->object = out->args.vc.data;
4383 ctx->objmask = NULL;
4384 /* Copy the headers to the buffer. */
4385 action_decap_data = ctx->object;
4386 *action_decap_data = (struct action_raw_decap_data) {
4387 .conf = (struct rte_flow_action_raw_decap){
4388 .data = action_decap_data->data,
4392 header = action_decap_data->data;
4393 if (l2_decap_conf.select_vlan)
4394 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4395 memcpy(header, ð, sizeof(eth));
4396 header += sizeof(eth);
4397 if (l2_decap_conf.select_vlan) {
4398 memcpy(header, &vlan, sizeof(vlan));
4399 header += sizeof(vlan);
4401 action_decap_data->conf.size = header -
4402 action_decap_data->data;
4403 action->conf = &action_decap_data->conf;
4407 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4409 /** Parse MPLSOGRE encap action. */
4411 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4412 const char *str, unsigned int len,
4413 void *buf, unsigned int size)
4415 struct buffer *out = buf;
4416 struct rte_flow_action *action;
4417 struct action_raw_encap_data *action_encap_data;
4418 struct rte_flow_item_eth eth = { .type = 0, };
4419 struct rte_flow_item_vlan vlan = {
4420 .tci = mplsogre_encap_conf.vlan_tci,
4423 struct rte_flow_item_ipv4 ipv4 = {
4425 .src_addr = mplsogre_encap_conf.ipv4_src,
4426 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4427 .next_proto_id = IPPROTO_GRE,
4428 .version_ihl = RTE_IPV4_VHL_DEF,
4429 .time_to_live = IPDEFTTL,
4432 struct rte_flow_item_ipv6 ipv6 = {
4434 .proto = IPPROTO_GRE,
4435 .hop_limits = IPDEFTTL,
4438 struct rte_flow_item_gre gre = {
4439 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4441 struct rte_flow_item_mpls mpls;
4445 ret = parse_vc(ctx, token, str, len, buf, size);
4448 /* Nothing else to do if there is no buffer. */
4451 if (!out->args.vc.actions_n)
4453 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4454 /* Point to selected object. */
4455 ctx->object = out->args.vc.data;
4456 ctx->objmask = NULL;
4457 /* Copy the headers to the buffer. */
4458 action_encap_data = ctx->object;
4459 *action_encap_data = (struct action_raw_encap_data) {
4460 .conf = (struct rte_flow_action_raw_encap){
4461 .data = action_encap_data->data,
4466 header = action_encap_data->data;
4467 if (mplsogre_encap_conf.select_vlan)
4468 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4469 else if (mplsogre_encap_conf.select_ipv4)
4470 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4472 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4473 memcpy(eth.dst.addr_bytes,
4474 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4475 memcpy(eth.src.addr_bytes,
4476 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4477 memcpy(header, ð, sizeof(eth));
4478 header += sizeof(eth);
4479 if (mplsogre_encap_conf.select_vlan) {
4480 if (mplsogre_encap_conf.select_ipv4)
4481 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4483 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4484 memcpy(header, &vlan, sizeof(vlan));
4485 header += sizeof(vlan);
4487 if (mplsogre_encap_conf.select_ipv4) {
4488 memcpy(header, &ipv4, sizeof(ipv4));
4489 header += sizeof(ipv4);
4491 memcpy(&ipv6.hdr.src_addr,
4492 &mplsogre_encap_conf.ipv6_src,
4493 sizeof(mplsogre_encap_conf.ipv6_src));
4494 memcpy(&ipv6.hdr.dst_addr,
4495 &mplsogre_encap_conf.ipv6_dst,
4496 sizeof(mplsogre_encap_conf.ipv6_dst));
4497 memcpy(header, &ipv6, sizeof(ipv6));
4498 header += sizeof(ipv6);
4500 memcpy(header, &gre, sizeof(gre));
4501 header += sizeof(gre);
4502 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4503 RTE_DIM(mplsogre_encap_conf.label));
4504 mpls.label_tc_s[2] |= 0x1;
4505 memcpy(header, &mpls, sizeof(mpls));
4506 header += sizeof(mpls);
4507 action_encap_data->conf.size = header -
4508 action_encap_data->data;
4509 action->conf = &action_encap_data->conf;
4513 /** Parse MPLSOGRE decap action. */
4515 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4516 const char *str, unsigned int len,
4517 void *buf, unsigned int size)
4519 struct buffer *out = buf;
4520 struct rte_flow_action *action;
4521 struct action_raw_decap_data *action_decap_data;
4522 struct rte_flow_item_eth eth = { .type = 0, };
4523 struct rte_flow_item_vlan vlan = {.tci = 0};
4524 struct rte_flow_item_ipv4 ipv4 = {
4526 .next_proto_id = IPPROTO_GRE,
4529 struct rte_flow_item_ipv6 ipv6 = {
4531 .proto = IPPROTO_GRE,
4534 struct rte_flow_item_gre gre = {
4535 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4537 struct rte_flow_item_mpls mpls;
4541 ret = parse_vc(ctx, token, str, len, buf, size);
4544 /* Nothing else to do if there is no buffer. */
4547 if (!out->args.vc.actions_n)
4549 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4550 /* Point to selected object. */
4551 ctx->object = out->args.vc.data;
4552 ctx->objmask = NULL;
4553 /* Copy the headers to the buffer. */
4554 action_decap_data = ctx->object;
4555 *action_decap_data = (struct action_raw_decap_data) {
4556 .conf = (struct rte_flow_action_raw_decap){
4557 .data = action_decap_data->data,
4561 header = action_decap_data->data;
4562 if (mplsogre_decap_conf.select_vlan)
4563 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4564 else if (mplsogre_encap_conf.select_ipv4)
4565 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4567 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4568 memcpy(eth.dst.addr_bytes,
4569 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4570 memcpy(eth.src.addr_bytes,
4571 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4572 memcpy(header, ð, sizeof(eth));
4573 header += sizeof(eth);
4574 if (mplsogre_encap_conf.select_vlan) {
4575 if (mplsogre_encap_conf.select_ipv4)
4576 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4578 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4579 memcpy(header, &vlan, sizeof(vlan));
4580 header += sizeof(vlan);
4582 if (mplsogre_encap_conf.select_ipv4) {
4583 memcpy(header, &ipv4, sizeof(ipv4));
4584 header += sizeof(ipv4);
4586 memcpy(header, &ipv6, sizeof(ipv6));
4587 header += sizeof(ipv6);
4589 memcpy(header, &gre, sizeof(gre));
4590 header += sizeof(gre);
4591 memset(&mpls, 0, sizeof(mpls));
4592 memcpy(header, &mpls, sizeof(mpls));
4593 header += sizeof(mpls);
4594 action_decap_data->conf.size = header -
4595 action_decap_data->data;
4596 action->conf = &action_decap_data->conf;
4600 /** Parse MPLSOUDP encap action. */
4602 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4603 const char *str, unsigned int len,
4604 void *buf, unsigned int size)
4606 struct buffer *out = buf;
4607 struct rte_flow_action *action;
4608 struct action_raw_encap_data *action_encap_data;
4609 struct rte_flow_item_eth eth = { .type = 0, };
4610 struct rte_flow_item_vlan vlan = {
4611 .tci = mplsoudp_encap_conf.vlan_tci,
4614 struct rte_flow_item_ipv4 ipv4 = {
4616 .src_addr = mplsoudp_encap_conf.ipv4_src,
4617 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4618 .next_proto_id = IPPROTO_UDP,
4619 .version_ihl = RTE_IPV4_VHL_DEF,
4620 .time_to_live = IPDEFTTL,
4623 struct rte_flow_item_ipv6 ipv6 = {
4625 .proto = IPPROTO_UDP,
4626 .hop_limits = IPDEFTTL,
4629 struct rte_flow_item_udp udp = {
4631 .src_port = mplsoudp_encap_conf.udp_src,
4632 .dst_port = mplsoudp_encap_conf.udp_dst,
4635 struct rte_flow_item_mpls mpls;
4639 ret = parse_vc(ctx, token, str, len, buf, size);
4642 /* Nothing else to do if there is no buffer. */
4645 if (!out->args.vc.actions_n)
4647 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4648 /* Point to selected object. */
4649 ctx->object = out->args.vc.data;
4650 ctx->objmask = NULL;
4651 /* Copy the headers to the buffer. */
4652 action_encap_data = ctx->object;
4653 *action_encap_data = (struct action_raw_encap_data) {
4654 .conf = (struct rte_flow_action_raw_encap){
4655 .data = action_encap_data->data,
4660 header = action_encap_data->data;
4661 if (mplsoudp_encap_conf.select_vlan)
4662 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4663 else if (mplsoudp_encap_conf.select_ipv4)
4664 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4666 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4667 memcpy(eth.dst.addr_bytes,
4668 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4669 memcpy(eth.src.addr_bytes,
4670 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4671 memcpy(header, ð, sizeof(eth));
4672 header += sizeof(eth);
4673 if (mplsoudp_encap_conf.select_vlan) {
4674 if (mplsoudp_encap_conf.select_ipv4)
4675 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4677 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4678 memcpy(header, &vlan, sizeof(vlan));
4679 header += sizeof(vlan);
4681 if (mplsoudp_encap_conf.select_ipv4) {
4682 memcpy(header, &ipv4, sizeof(ipv4));
4683 header += sizeof(ipv4);
4685 memcpy(&ipv6.hdr.src_addr,
4686 &mplsoudp_encap_conf.ipv6_src,
4687 sizeof(mplsoudp_encap_conf.ipv6_src));
4688 memcpy(&ipv6.hdr.dst_addr,
4689 &mplsoudp_encap_conf.ipv6_dst,
4690 sizeof(mplsoudp_encap_conf.ipv6_dst));
4691 memcpy(header, &ipv6, sizeof(ipv6));
4692 header += sizeof(ipv6);
4694 memcpy(header, &udp, sizeof(udp));
4695 header += sizeof(udp);
4696 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4697 RTE_DIM(mplsoudp_encap_conf.label));
4698 mpls.label_tc_s[2] |= 0x1;
4699 memcpy(header, &mpls, sizeof(mpls));
4700 header += sizeof(mpls);
4701 action_encap_data->conf.size = header -
4702 action_encap_data->data;
4703 action->conf = &action_encap_data->conf;
4707 /** Parse MPLSOUDP decap action. */
4709 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4710 const char *str, unsigned int len,
4711 void *buf, unsigned int size)
4713 struct buffer *out = buf;
4714 struct rte_flow_action *action;
4715 struct action_raw_decap_data *action_decap_data;
4716 struct rte_flow_item_eth eth = { .type = 0, };
4717 struct rte_flow_item_vlan vlan = {.tci = 0};
4718 struct rte_flow_item_ipv4 ipv4 = {
4720 .next_proto_id = IPPROTO_UDP,
4723 struct rte_flow_item_ipv6 ipv6 = {
4725 .proto = IPPROTO_UDP,
4728 struct rte_flow_item_udp udp = {
4730 .dst_port = rte_cpu_to_be_16(6635),
4733 struct rte_flow_item_mpls mpls;
4737 ret = parse_vc(ctx, token, str, len, buf, size);
4740 /* Nothing else to do if there is no buffer. */
4743 if (!out->args.vc.actions_n)
4745 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4746 /* Point to selected object. */
4747 ctx->object = out->args.vc.data;
4748 ctx->objmask = NULL;
4749 /* Copy the headers to the buffer. */
4750 action_decap_data = ctx->object;
4751 *action_decap_data = (struct action_raw_decap_data) {
4752 .conf = (struct rte_flow_action_raw_decap){
4753 .data = action_decap_data->data,
4757 header = action_decap_data->data;
4758 if (mplsoudp_decap_conf.select_vlan)
4759 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4760 else if (mplsoudp_encap_conf.select_ipv4)
4761 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4763 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4764 memcpy(eth.dst.addr_bytes,
4765 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4766 memcpy(eth.src.addr_bytes,
4767 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4768 memcpy(header, ð, sizeof(eth));
4769 header += sizeof(eth);
4770 if (mplsoudp_encap_conf.select_vlan) {
4771 if (mplsoudp_encap_conf.select_ipv4)
4772 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4774 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4775 memcpy(header, &vlan, sizeof(vlan));
4776 header += sizeof(vlan);
4778 if (mplsoudp_encap_conf.select_ipv4) {
4779 memcpy(header, &ipv4, sizeof(ipv4));
4780 header += sizeof(ipv4);
4782 memcpy(header, &ipv6, sizeof(ipv6));
4783 header += sizeof(ipv6);
4785 memcpy(header, &udp, sizeof(udp));
4786 header += sizeof(udp);
4787 memset(&mpls, 0, sizeof(mpls));
4788 memcpy(header, &mpls, sizeof(mpls));
4789 header += sizeof(mpls);
4790 action_decap_data->conf.size = header -
4791 action_decap_data->data;
4792 action->conf = &action_decap_data->conf;
4797 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
4798 const char *str, unsigned int len, void *buf,
4801 struct action_raw_decap_data *action_raw_decap_data;
4802 struct rte_flow_action *action;
4803 const struct arg *arg;
4804 struct buffer *out = buf;
4808 RTE_SET_USED(token);
4811 arg = ARGS_ENTRY_ARB_BOUNDED
4812 (offsetof(struct action_raw_decap_data, idx),
4813 sizeof(((struct action_raw_decap_data *)0)->idx),
4814 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4815 if (push_args(ctx, arg))
4817 ret = parse_int(ctx, token, str, len, NULL, 0);
4824 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4825 action_raw_decap_data = ctx->object;
4826 idx = action_raw_decap_data->idx;
4827 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
4828 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
4829 action->conf = &action_raw_decap_data->conf;
4835 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
4836 const char *str, unsigned int len, void *buf,
4839 struct action_raw_encap_data *action_raw_encap_data;
4840 struct rte_flow_action *action;
4841 const struct arg *arg;
4842 struct buffer *out = buf;
4846 RTE_SET_USED(token);
4849 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
4851 arg = ARGS_ENTRY_ARB_BOUNDED
4852 (offsetof(struct action_raw_encap_data, idx),
4853 sizeof(((struct action_raw_encap_data *)0)->idx),
4854 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4855 if (push_args(ctx, arg))
4857 ret = parse_int(ctx, token, str, len, NULL, 0);
4864 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4865 action_raw_encap_data = ctx->object;
4866 idx = action_raw_encap_data->idx;
4867 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
4868 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
4869 action_raw_encap_data->conf.preserve = NULL;
4870 action->conf = &action_raw_encap_data->conf;
4875 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4876 const char *str, unsigned int len, void *buf,
4879 struct buffer *out = buf;
4880 struct rte_flow_action *action;
4881 struct action_raw_encap_data *action_raw_encap_data = NULL;
4884 ret = parse_vc(ctx, token, str, len, buf, size);
4887 /* Nothing else to do if there is no buffer. */
4890 if (!out->args.vc.actions_n)
4892 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4893 /* Point to selected object. */
4894 ctx->object = out->args.vc.data;
4895 ctx->objmask = NULL;
4896 /* Copy the headers to the buffer. */
4897 action_raw_encap_data = ctx->object;
4898 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
4899 action_raw_encap_data->conf.preserve = NULL;
4900 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
4901 action->conf = &action_raw_encap_data->conf;
4906 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4907 const char *str, unsigned int len, void *buf,
4910 struct buffer *out = buf;
4911 struct rte_flow_action *action;
4912 struct action_raw_decap_data *action_raw_decap_data = NULL;
4915 ret = parse_vc(ctx, token, str, len, buf, size);
4918 /* Nothing else to do if there is no buffer. */
4921 if (!out->args.vc.actions_n)
4923 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4924 /* Point to selected object. */
4925 ctx->object = out->args.vc.data;
4926 ctx->objmask = NULL;
4927 /* Copy the headers to the buffer. */
4928 action_raw_decap_data = ctx->object;
4929 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
4930 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
4931 action->conf = &action_raw_decap_data->conf;
4936 parse_vc_action_set_meta(struct context *ctx, const struct token *token,
4937 const char *str, unsigned int len, void *buf,
4942 ret = parse_vc(ctx, token, str, len, buf, size);
4945 ret = rte_flow_dynf_metadata_register();
4951 /** Parse tokens for destroy command. */
4953 parse_destroy(struct context *ctx, const struct token *token,
4954 const char *str, unsigned int len,
4955 void *buf, unsigned int size)
4957 struct buffer *out = buf;
4959 /* Token name must match. */
4960 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4962 /* Nothing else to do if there is no buffer. */
4965 if (!out->command) {
4966 if (ctx->curr != DESTROY)
4968 if (sizeof(*out) > size)
4970 out->command = ctx->curr;
4973 ctx->objmask = NULL;
4974 out->args.destroy.rule =
4975 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4979 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4980 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4983 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4984 ctx->objmask = NULL;
4988 /** Parse tokens for flush command. */
4990 parse_flush(struct context *ctx, const struct token *token,
4991 const char *str, unsigned int len,
4992 void *buf, unsigned int size)
4994 struct buffer *out = buf;
4996 /* Token name must match. */
4997 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4999 /* Nothing else to do if there is no buffer. */
5002 if (!out->command) {
5003 if (ctx->curr != FLUSH)
5005 if (sizeof(*out) > size)
5007 out->command = ctx->curr;
5010 ctx->objmask = NULL;
5015 /** Parse tokens for query command. */
5017 parse_query(struct context *ctx, const struct token *token,
5018 const char *str, unsigned int len,
5019 void *buf, unsigned int size)
5021 struct buffer *out = buf;
5023 /* Token name must match. */
5024 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5026 /* Nothing else to do if there is no buffer. */
5029 if (!out->command) {
5030 if (ctx->curr != QUERY)
5032 if (sizeof(*out) > size)
5034 out->command = ctx->curr;
5037 ctx->objmask = NULL;
5042 /** Parse action names. */
5044 parse_action(struct context *ctx, const struct token *token,
5045 const char *str, unsigned int len,
5046 void *buf, unsigned int size)
5048 struct buffer *out = buf;
5049 const struct arg *arg = pop_args(ctx);
5053 /* Argument is expected. */
5056 /* Parse action name. */
5057 for (i = 0; next_action[i]; ++i) {
5058 const struct parse_action_priv *priv;
5060 token = &token_list[next_action[i]];
5061 if (strcmp_partial(token->name, str, len))
5067 memcpy((uint8_t *)ctx->object + arg->offset,
5073 push_args(ctx, arg);
5077 /** Parse tokens for list command. */
5079 parse_list(struct context *ctx, const struct token *token,
5080 const char *str, unsigned int len,
5081 void *buf, unsigned int size)
5083 struct buffer *out = buf;
5085 /* Token name must match. */
5086 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5088 /* Nothing else to do if there is no buffer. */
5091 if (!out->command) {
5092 if (ctx->curr != LIST)
5094 if (sizeof(*out) > size)
5096 out->command = ctx->curr;
5099 ctx->objmask = NULL;
5100 out->args.list.group =
5101 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5105 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
5106 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
5109 ctx->object = out->args.list.group + out->args.list.group_n++;
5110 ctx->objmask = NULL;
5114 /** Parse tokens for isolate command. */
5116 parse_isolate(struct context *ctx, const struct token *token,
5117 const char *str, unsigned int len,
5118 void *buf, unsigned int size)
5120 struct buffer *out = buf;
5122 /* Token name must match. */
5123 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5125 /* Nothing else to do if there is no buffer. */
5128 if (!out->command) {
5129 if (ctx->curr != ISOLATE)
5131 if (sizeof(*out) > size)
5133 out->command = ctx->curr;
5136 ctx->objmask = NULL;
5142 * Parse signed/unsigned integers 8 to 64-bit long.
5144 * Last argument (ctx->args) is retrieved to determine integer type and
5148 parse_int(struct context *ctx, const struct token *token,
5149 const char *str, unsigned int len,
5150 void *buf, unsigned int size)
5152 const struct arg *arg = pop_args(ctx);
5157 /* Argument is expected. */
5162 (uintmax_t)strtoimax(str, &end, 0) :
5163 strtoumax(str, &end, 0);
5164 if (errno || (size_t)(end - str) != len)
5167 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
5168 (intmax_t)u > (intmax_t)arg->max)) ||
5169 (!arg->sign && (u < arg->min || u > arg->max))))
5174 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
5175 !arg_entry_bf_fill(ctx->objmask, -1, arg))
5179 buf = (uint8_t *)ctx->object + arg->offset;
5181 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
5185 case sizeof(uint8_t):
5186 *(uint8_t *)buf = u;
5188 case sizeof(uint16_t):
5189 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
5191 case sizeof(uint8_t [3]):
5192 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5194 ((uint8_t *)buf)[0] = u;
5195 ((uint8_t *)buf)[1] = u >> 8;
5196 ((uint8_t *)buf)[2] = u >> 16;
5200 ((uint8_t *)buf)[0] = u >> 16;
5201 ((uint8_t *)buf)[1] = u >> 8;
5202 ((uint8_t *)buf)[2] = u;
5204 case sizeof(uint32_t):
5205 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
5207 case sizeof(uint64_t):
5208 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
5213 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
5215 buf = (uint8_t *)ctx->objmask + arg->offset;
5220 push_args(ctx, arg);
5227 * Three arguments (ctx->args) are retrieved from the stack to store data,
5228 * its actual length and address (in that order).
5231 parse_string(struct context *ctx, const struct token *token,
5232 const char *str, unsigned int len,
5233 void *buf, unsigned int size)
5235 const struct arg *arg_data = pop_args(ctx);
5236 const struct arg *arg_len = pop_args(ctx);
5237 const struct arg *arg_addr = pop_args(ctx);
5238 char tmp[16]; /* Ought to be enough. */
5241 /* Arguments are expected. */
5245 push_args(ctx, arg_data);
5249 push_args(ctx, arg_len);
5250 push_args(ctx, arg_data);
5253 size = arg_data->size;
5254 /* Bit-mask fill is not supported. */
5255 if (arg_data->mask || size < len)
5259 /* Let parse_int() fill length information first. */
5260 ret = snprintf(tmp, sizeof(tmp), "%u", len);
5263 push_args(ctx, arg_len);
5264 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5269 buf = (uint8_t *)ctx->object + arg_data->offset;
5270 /* Output buffer is not necessarily NUL-terminated. */
5271 memcpy(buf, str, len);
5272 memset((uint8_t *)buf + len, 0x00, size - len);
5274 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
5275 /* Save address if requested. */
5276 if (arg_addr->size) {
5277 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5279 (uint8_t *)ctx->object + arg_data->offset
5283 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5285 (uint8_t *)ctx->objmask + arg_data->offset
5291 push_args(ctx, arg_addr);
5292 push_args(ctx, arg_len);
5293 push_args(ctx, arg_data);
5298 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
5304 /* Check input parameters */
5305 if ((src == NULL) ||
5311 /* Convert chars to bytes */
5312 for (i = 0, len = 0; i < *size; i += 2) {
5313 snprintf(tmp, 3, "%s", src + i);
5314 dst[len++] = strtoul(tmp, &c, 16);
5329 parse_hex(struct context *ctx, const struct token *token,
5330 const char *str, unsigned int len,
5331 void *buf, unsigned int size)
5333 const struct arg *arg_data = pop_args(ctx);
5334 const struct arg *arg_len = pop_args(ctx);
5335 const struct arg *arg_addr = pop_args(ctx);
5336 char tmp[16]; /* Ought to be enough. */
5338 unsigned int hexlen = len;
5339 unsigned int length = 256;
5340 uint8_t hex_tmp[length];
5342 /* Arguments are expected. */
5346 push_args(ctx, arg_data);
5350 push_args(ctx, arg_len);
5351 push_args(ctx, arg_data);
5354 size = arg_data->size;
5355 /* Bit-mask fill is not supported. */
5361 /* translate bytes string to array. */
5362 if (str[0] == '0' && ((str[1] == 'x') ||
5367 if (hexlen > length)
5369 ret = parse_hex_string(str, hex_tmp, &hexlen);
5372 /* Let parse_int() fill length information first. */
5373 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5376 push_args(ctx, arg_len);
5377 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5382 buf = (uint8_t *)ctx->object + arg_data->offset;
5383 /* Output buffer is not necessarily NUL-terminated. */
5384 memcpy(buf, hex_tmp, hexlen);
5385 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5387 memset((uint8_t *)ctx->objmask + arg_data->offset,
5389 /* Save address if requested. */
5390 if (arg_addr->size) {
5391 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5393 (uint8_t *)ctx->object + arg_data->offset
5397 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5399 (uint8_t *)ctx->objmask + arg_data->offset
5405 push_args(ctx, arg_addr);
5406 push_args(ctx, arg_len);
5407 push_args(ctx, arg_data);
5413 * Parse a MAC address.
5415 * Last argument (ctx->args) is retrieved to determine storage size and
5419 parse_mac_addr(struct context *ctx, const struct token *token,
5420 const char *str, unsigned int len,
5421 void *buf, unsigned int size)
5423 const struct arg *arg = pop_args(ctx);
5424 struct rte_ether_addr tmp;
5428 /* Argument is expected. */
5432 /* Bit-mask fill is not supported. */
5433 if (arg->mask || size != sizeof(tmp))
5435 /* Only network endian is supported. */
5438 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5439 if (ret < 0 || (unsigned int)ret != len)
5443 buf = (uint8_t *)ctx->object + arg->offset;
5444 memcpy(buf, &tmp, size);
5446 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5449 push_args(ctx, arg);
5454 * Parse an IPv4 address.
5456 * Last argument (ctx->args) is retrieved to determine storage size and
5460 parse_ipv4_addr(struct context *ctx, const struct token *token,
5461 const char *str, unsigned int len,
5462 void *buf, unsigned int size)
5464 const struct arg *arg = pop_args(ctx);
5469 /* Argument is expected. */
5473 /* Bit-mask fill is not supported. */
5474 if (arg->mask || size != sizeof(tmp))
5476 /* Only network endian is supported. */
5479 memcpy(str2, str, len);
5481 ret = inet_pton(AF_INET, str2, &tmp);
5483 /* Attempt integer parsing. */
5484 push_args(ctx, arg);
5485 return parse_int(ctx, token, str, len, buf, size);
5489 buf = (uint8_t *)ctx->object + arg->offset;
5490 memcpy(buf, &tmp, size);
5492 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5495 push_args(ctx, arg);
5500 * Parse an IPv6 address.
5502 * Last argument (ctx->args) is retrieved to determine storage size and
5506 parse_ipv6_addr(struct context *ctx, const struct token *token,
5507 const char *str, unsigned int len,
5508 void *buf, unsigned int size)
5510 const struct arg *arg = pop_args(ctx);
5512 struct in6_addr tmp;
5516 /* Argument is expected. */
5520 /* Bit-mask fill is not supported. */
5521 if (arg->mask || size != sizeof(tmp))
5523 /* Only network endian is supported. */
5526 memcpy(str2, str, len);
5528 ret = inet_pton(AF_INET6, str2, &tmp);
5533 buf = (uint8_t *)ctx->object + arg->offset;
5534 memcpy(buf, &tmp, size);
5536 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5539 push_args(ctx, arg);
5543 /** Boolean values (even indices stand for false). */
5544 static const char *const boolean_name[] = {
5554 * Parse a boolean value.
5556 * Last argument (ctx->args) is retrieved to determine storage size and
5560 parse_boolean(struct context *ctx, const struct token *token,
5561 const char *str, unsigned int len,
5562 void *buf, unsigned int size)
5564 const struct arg *arg = pop_args(ctx);
5568 /* Argument is expected. */
5571 for (i = 0; boolean_name[i]; ++i)
5572 if (!strcmp_partial(boolean_name[i], str, len))
5574 /* Process token as integer. */
5575 if (boolean_name[i])
5576 str = i & 1 ? "1" : "0";
5577 push_args(ctx, arg);
5578 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5579 return ret > 0 ? (int)len : ret;
5582 /** Parse port and update context. */
5584 parse_port(struct context *ctx, const struct token *token,
5585 const char *str, unsigned int len,
5586 void *buf, unsigned int size)
5588 struct buffer *out = &(struct buffer){ .port = 0 };
5596 ctx->objmask = NULL;
5597 size = sizeof(*out);
5599 ret = parse_int(ctx, token, str, len, out, size);
5601 ctx->port = out->port;
5607 /** Parse set command, initialize output buffer for subsequent tokens. */
5609 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5610 const char *str, unsigned int len,
5611 void *buf, unsigned int size)
5613 struct buffer *out = buf;
5615 /* Token name must match. */
5616 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5618 /* Nothing else to do if there is no buffer. */
5621 /* Make sure buffer is large enough. */
5622 if (size < sizeof(*out))
5625 ctx->objmask = NULL;
5629 out->command = ctx->curr;
5634 * Parse set raw_encap/raw_decap command,
5635 * initialize output buffer for subsequent tokens.
5638 parse_set_init(struct context *ctx, const struct token *token,
5639 const char *str, unsigned int len,
5640 void *buf, unsigned int size)
5642 struct buffer *out = buf;
5644 /* Token name must match. */
5645 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5647 /* Nothing else to do if there is no buffer. */
5650 /* Make sure buffer is large enough. */
5651 if (size < sizeof(*out))
5653 /* Initialize buffer. */
5654 memset(out, 0x00, sizeof(*out));
5655 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5658 ctx->objmask = NULL;
5659 if (!out->command) {
5660 if (ctx->curr != SET)
5662 if (sizeof(*out) > size)
5664 out->command = ctx->curr;
5665 out->args.vc.data = (uint8_t *)out + size;
5666 /* All we need is pattern */
5667 out->args.vc.pattern =
5668 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5670 ctx->object = out->args.vc.pattern;
5675 /** No completion. */
5677 comp_none(struct context *ctx, const struct token *token,
5678 unsigned int ent, char *buf, unsigned int size)
5688 /** Complete boolean values. */
5690 comp_boolean(struct context *ctx, const struct token *token,
5691 unsigned int ent, char *buf, unsigned int size)
5697 for (i = 0; boolean_name[i]; ++i)
5698 if (buf && i == ent)
5699 return strlcpy(buf, boolean_name[i], size);
5705 /** Complete action names. */
5707 comp_action(struct context *ctx, const struct token *token,
5708 unsigned int ent, char *buf, unsigned int size)
5714 for (i = 0; next_action[i]; ++i)
5715 if (buf && i == ent)
5716 return strlcpy(buf, token_list[next_action[i]].name,
5723 /** Complete available ports. */
5725 comp_port(struct context *ctx, const struct token *token,
5726 unsigned int ent, char *buf, unsigned int size)
5733 RTE_ETH_FOREACH_DEV(p) {
5734 if (buf && i == ent)
5735 return snprintf(buf, size, "%u", p);
5743 /** Complete available rule IDs. */
5745 comp_rule_id(struct context *ctx, const struct token *token,
5746 unsigned int ent, char *buf, unsigned int size)
5749 struct rte_port *port;
5750 struct port_flow *pf;
5753 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5754 ctx->port == (portid_t)RTE_PORT_ALL)
5756 port = &ports[ctx->port];
5757 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5758 if (buf && i == ent)
5759 return snprintf(buf, size, "%u", pf->id);
5767 /** Complete type field for RSS action. */
5769 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5770 unsigned int ent, char *buf, unsigned int size)
5776 for (i = 0; rss_type_table[i].str; ++i)
5781 return strlcpy(buf, rss_type_table[ent].str, size);
5783 return snprintf(buf, size, "end");
5787 /** Complete queue field for RSS action. */
5789 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5790 unsigned int ent, char *buf, unsigned int size)
5797 return snprintf(buf, size, "%u", ent);
5799 return snprintf(buf, size, "end");
5803 /** Complete index number for set raw_encap/raw_decap commands. */
5805 comp_set_raw_index(struct context *ctx, const struct token *token,
5806 unsigned int ent, char *buf, unsigned int size)
5812 RTE_SET_USED(token);
5813 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
5814 if (buf && idx == ent)
5815 return snprintf(buf, size, "%u", idx);
5821 /** Internal context. */
5822 static struct context cmd_flow_context;
5824 /** Global parser instance (cmdline API). */
5825 cmdline_parse_inst_t cmd_flow;
5826 cmdline_parse_inst_t cmd_set_raw;
5828 /** Initialize context. */
5830 cmd_flow_context_init(struct context *ctx)
5832 /* A full memset() is not necessary. */
5842 ctx->objmask = NULL;
5845 /** Parse a token (cmdline API). */
5847 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5850 struct context *ctx = &cmd_flow_context;
5851 const struct token *token;
5852 const enum index *list;
5857 token = &token_list[ctx->curr];
5858 /* Check argument length. */
5861 for (len = 0; src[len]; ++len)
5862 if (src[len] == '#' || isspace(src[len]))
5866 /* Last argument and EOL detection. */
5867 for (i = len; src[i]; ++i)
5868 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5870 else if (!isspace(src[i])) {
5875 if (src[i] == '\r' || src[i] == '\n') {
5879 /* Initialize context if necessary. */
5880 if (!ctx->next_num) {
5883 ctx->next[ctx->next_num++] = token->next[0];
5885 /* Process argument through candidates. */
5886 ctx->prev = ctx->curr;
5887 list = ctx->next[ctx->next_num - 1];
5888 for (i = 0; list[i]; ++i) {
5889 const struct token *next = &token_list[list[i]];
5892 ctx->curr = list[i];
5894 tmp = next->call(ctx, next, src, len, result, size);
5896 tmp = parse_default(ctx, next, src, len, result, size);
5897 if (tmp == -1 || tmp != len)
5905 /* Push subsequent tokens if any. */
5907 for (i = 0; token->next[i]; ++i) {
5908 if (ctx->next_num == RTE_DIM(ctx->next))
5910 ctx->next[ctx->next_num++] = token->next[i];
5912 /* Push arguments if any. */
5914 for (i = 0; token->args[i]; ++i) {
5915 if (ctx->args_num == RTE_DIM(ctx->args))
5917 ctx->args[ctx->args_num++] = token->args[i];
5922 /** Return number of completion entries (cmdline API). */
5924 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5926 struct context *ctx = &cmd_flow_context;
5927 const struct token *token = &token_list[ctx->curr];
5928 const enum index *list;
5932 /* Count number of tokens in current list. */
5934 list = ctx->next[ctx->next_num - 1];
5936 list = token->next[0];
5937 for (i = 0; list[i]; ++i)
5942 * If there is a single token, use its completion callback, otherwise
5943 * return the number of entries.
5945 token = &token_list[list[0]];
5946 if (i == 1 && token->comp) {
5947 /* Save index for cmd_flow_get_help(). */
5948 ctx->prev = list[0];
5949 return token->comp(ctx, token, 0, NULL, 0);
5954 /** Return a completion entry (cmdline API). */
5956 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5957 char *dst, unsigned int size)
5959 struct context *ctx = &cmd_flow_context;
5960 const struct token *token = &token_list[ctx->curr];
5961 const enum index *list;
5965 /* Count number of tokens in current list. */
5967 list = ctx->next[ctx->next_num - 1];
5969 list = token->next[0];
5970 for (i = 0; list[i]; ++i)
5974 /* If there is a single token, use its completion callback. */
5975 token = &token_list[list[0]];
5976 if (i == 1 && token->comp) {
5977 /* Save index for cmd_flow_get_help(). */
5978 ctx->prev = list[0];
5979 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5981 /* Otherwise make sure the index is valid and use defaults. */
5984 token = &token_list[list[index]];
5985 strlcpy(dst, token->name, size);
5986 /* Save index for cmd_flow_get_help(). */
5987 ctx->prev = list[index];
5991 /** Populate help strings for current token (cmdline API). */
5993 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5995 struct context *ctx = &cmd_flow_context;
5996 const struct token *token = &token_list[ctx->prev];
6001 /* Set token type and update global help with details. */
6002 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
6004 cmd_flow.help_str = token->help;
6006 cmd_flow.help_str = token->name;
6010 /** Token definition template (cmdline API). */
6011 static struct cmdline_token_hdr cmd_flow_token_hdr = {
6012 .ops = &(struct cmdline_token_ops){
6013 .parse = cmd_flow_parse,
6014 .complete_get_nb = cmd_flow_complete_get_nb,
6015 .complete_get_elt = cmd_flow_complete_get_elt,
6016 .get_help = cmd_flow_get_help,
6021 /** Populate the next dynamic token. */
6023 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
6024 cmdline_parse_token_hdr_t **hdr_inst)
6026 struct context *ctx = &cmd_flow_context;
6028 /* Always reinitialize context before requesting the first token. */
6029 if (!(hdr_inst - cmd_flow.tokens))
6030 cmd_flow_context_init(ctx);
6031 /* Return NULL when no more tokens are expected. */
6032 if (!ctx->next_num && ctx->curr) {
6036 /* Determine if command should end here. */
6037 if (ctx->eol && ctx->last && ctx->next_num) {
6038 const enum index *list = ctx->next[ctx->next_num - 1];
6041 for (i = 0; list[i]; ++i) {
6048 *hdr = &cmd_flow_token_hdr;
6051 /** Dispatch parsed buffer to function calls. */
6053 cmd_flow_parsed(const struct buffer *in)
6055 switch (in->command) {
6057 port_flow_validate(in->port, &in->args.vc.attr,
6058 in->args.vc.pattern, in->args.vc.actions);
6061 port_flow_create(in->port, &in->args.vc.attr,
6062 in->args.vc.pattern, in->args.vc.actions);
6065 port_flow_destroy(in->port, in->args.destroy.rule_n,
6066 in->args.destroy.rule);
6069 port_flow_flush(in->port);
6072 port_flow_query(in->port, in->args.query.rule,
6073 &in->args.query.action);
6076 port_flow_list(in->port, in->args.list.group_n,
6077 in->args.list.group);
6080 port_flow_isolate(in->port, in->args.isolate.set);
6087 /** Token generator and output processing callback (cmdline API). */
6089 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
6092 cmd_flow_tok(arg0, arg2);
6094 cmd_flow_parsed(arg0);
6097 /** Global parser instance (cmdline API). */
6098 cmdline_parse_inst_t cmd_flow = {
6100 .data = NULL, /**< Unused. */
6101 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6104 }, /**< Tokens are returned by cmd_flow_tok(). */
6107 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
6110 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
6112 struct rte_flow_item_ipv4 *ipv4;
6113 struct rte_flow_item_eth *eth;
6114 struct rte_flow_item_ipv6 *ipv6;
6115 struct rte_flow_item_vxlan *vxlan;
6116 struct rte_flow_item_vxlan_gpe *gpe;
6117 struct rte_flow_item_nvgre *nvgre;
6118 uint32_t ipv6_vtc_flow;
6120 switch (item->type) {
6121 case RTE_FLOW_ITEM_TYPE_ETH:
6122 eth = (struct rte_flow_item_eth *)buf;
6124 eth->type = rte_cpu_to_be_16(next_proto);
6126 case RTE_FLOW_ITEM_TYPE_IPV4:
6127 ipv4 = (struct rte_flow_item_ipv4 *)buf;
6128 ipv4->hdr.version_ihl = 0x45;
6129 if (next_proto && ipv4->hdr.next_proto_id == 0)
6130 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
6132 case RTE_FLOW_ITEM_TYPE_IPV6:
6133 ipv6 = (struct rte_flow_item_ipv6 *)buf;
6134 if (next_proto && ipv6->hdr.proto == 0)
6135 ipv6->hdr.proto = (uint8_t)next_proto;
6136 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
6137 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
6138 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
6139 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
6141 case RTE_FLOW_ITEM_TYPE_VXLAN:
6142 vxlan = (struct rte_flow_item_vxlan *)buf;
6143 vxlan->flags = 0x08;
6145 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6146 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
6149 case RTE_FLOW_ITEM_TYPE_NVGRE:
6150 nvgre = (struct rte_flow_item_nvgre *)buf;
6151 nvgre->protocol = rte_cpu_to_be_16(0x6558);
6152 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
6159 /** Helper of get item's default mask. */
6161 flow_item_default_mask(const struct rte_flow_item *item)
6163 const void *mask = NULL;
6164 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6166 switch (item->type) {
6167 case RTE_FLOW_ITEM_TYPE_ANY:
6168 mask = &rte_flow_item_any_mask;
6170 case RTE_FLOW_ITEM_TYPE_VF:
6171 mask = &rte_flow_item_vf_mask;
6173 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6174 mask = &rte_flow_item_port_id_mask;
6176 case RTE_FLOW_ITEM_TYPE_RAW:
6177 mask = &rte_flow_item_raw_mask;
6179 case RTE_FLOW_ITEM_TYPE_ETH:
6180 mask = &rte_flow_item_eth_mask;
6182 case RTE_FLOW_ITEM_TYPE_VLAN:
6183 mask = &rte_flow_item_vlan_mask;
6185 case RTE_FLOW_ITEM_TYPE_IPV4:
6186 mask = &rte_flow_item_ipv4_mask;
6188 case RTE_FLOW_ITEM_TYPE_IPV6:
6189 mask = &rte_flow_item_ipv6_mask;
6191 case RTE_FLOW_ITEM_TYPE_ICMP:
6192 mask = &rte_flow_item_icmp_mask;
6194 case RTE_FLOW_ITEM_TYPE_UDP:
6195 mask = &rte_flow_item_udp_mask;
6197 case RTE_FLOW_ITEM_TYPE_TCP:
6198 mask = &rte_flow_item_tcp_mask;
6200 case RTE_FLOW_ITEM_TYPE_SCTP:
6201 mask = &rte_flow_item_sctp_mask;
6203 case RTE_FLOW_ITEM_TYPE_VXLAN:
6204 mask = &rte_flow_item_vxlan_mask;
6206 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6207 mask = &rte_flow_item_vxlan_gpe_mask;
6209 case RTE_FLOW_ITEM_TYPE_E_TAG:
6210 mask = &rte_flow_item_e_tag_mask;
6212 case RTE_FLOW_ITEM_TYPE_NVGRE:
6213 mask = &rte_flow_item_nvgre_mask;
6215 case RTE_FLOW_ITEM_TYPE_MPLS:
6216 mask = &rte_flow_item_mpls_mask;
6218 case RTE_FLOW_ITEM_TYPE_GRE:
6219 mask = &rte_flow_item_gre_mask;
6221 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6222 mask = &gre_key_default_mask;
6224 case RTE_FLOW_ITEM_TYPE_META:
6225 mask = &rte_flow_item_meta_mask;
6227 case RTE_FLOW_ITEM_TYPE_FUZZY:
6228 mask = &rte_flow_item_fuzzy_mask;
6230 case RTE_FLOW_ITEM_TYPE_GTP:
6231 mask = &rte_flow_item_gtp_mask;
6233 case RTE_FLOW_ITEM_TYPE_ESP:
6234 mask = &rte_flow_item_esp_mask;
6236 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6237 mask = &rte_flow_item_gtp_psc_mask;
6239 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
6240 mask = &rte_flow_item_pppoe_proto_id_mask;
6249 /** Dispatch parsed buffer to function calls. */
6251 cmd_set_raw_parsed(const struct buffer *in)
6253 uint32_t n = in->args.vc.pattern_n;
6255 struct rte_flow_item *item = NULL;
6257 uint8_t *data = NULL;
6258 uint8_t *data_tail = NULL;
6259 size_t *total_size = NULL;
6260 uint16_t upper_layer = 0;
6262 uint16_t idx = in->port; /* We borrow port field as index */
6264 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
6265 in->command == SET_RAW_DECAP);
6266 if (in->command == SET_RAW_ENCAP) {
6267 total_size = &raw_encap_confs[idx].size;
6268 data = (uint8_t *)&raw_encap_confs[idx].data;
6270 total_size = &raw_decap_confs[idx].size;
6271 data = (uint8_t *)&raw_decap_confs[idx].data;
6274 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6275 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
6276 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
6277 for (i = n - 1 ; i >= 0; --i) {
6278 item = in->args.vc.pattern + i;
6279 if (item->spec == NULL)
6280 item->spec = flow_item_default_mask(item);
6281 switch (item->type) {
6282 case RTE_FLOW_ITEM_TYPE_ETH:
6283 size = sizeof(struct rte_flow_item_eth);
6285 case RTE_FLOW_ITEM_TYPE_VLAN:
6286 size = sizeof(struct rte_flow_item_vlan);
6287 proto = RTE_ETHER_TYPE_VLAN;
6289 case RTE_FLOW_ITEM_TYPE_IPV4:
6290 size = sizeof(struct rte_flow_item_ipv4);
6291 proto = RTE_ETHER_TYPE_IPV4;
6293 case RTE_FLOW_ITEM_TYPE_IPV6:
6294 size = sizeof(struct rte_flow_item_ipv6);
6295 proto = RTE_ETHER_TYPE_IPV6;
6297 case RTE_FLOW_ITEM_TYPE_UDP:
6298 size = sizeof(struct rte_flow_item_udp);
6301 case RTE_FLOW_ITEM_TYPE_TCP:
6302 size = sizeof(struct rte_flow_item_tcp);
6305 case RTE_FLOW_ITEM_TYPE_VXLAN:
6306 size = sizeof(struct rte_flow_item_vxlan);
6308 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6309 size = sizeof(struct rte_flow_item_vxlan_gpe);
6311 case RTE_FLOW_ITEM_TYPE_GRE:
6312 size = sizeof(struct rte_flow_item_gre);
6315 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6316 size = sizeof(rte_be32_t);
6319 case RTE_FLOW_ITEM_TYPE_MPLS:
6320 size = sizeof(struct rte_flow_item_mpls);
6323 case RTE_FLOW_ITEM_TYPE_NVGRE:
6324 size = sizeof(struct rte_flow_item_nvgre);
6327 case RTE_FLOW_ITEM_TYPE_GENEVE:
6328 size = sizeof(struct rte_flow_item_geneve);
6331 printf("Error - Not supported item\n");
6333 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6336 *total_size += size;
6337 rte_memcpy(data_tail - (*total_size), item->spec, size);
6338 /* update some fields which cannot be set by cmdline */
6339 update_fields((data_tail - (*total_size)), item,
6341 upper_layer = proto;
6343 if (verbose_level & 0x1)
6344 printf("total data size is %zu\n", (*total_size));
6345 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
6346 memmove(data, (data_tail - (*total_size)), *total_size);
6349 /** Populate help strings for current token (cmdline API). */
6351 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6354 struct context *ctx = &cmd_flow_context;
6355 const struct token *token = &token_list[ctx->prev];
6360 /* Set token type and update global help with details. */
6361 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6363 cmd_set_raw.help_str = token->help;
6365 cmd_set_raw.help_str = token->name;
6369 /** Token definition template (cmdline API). */
6370 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6371 .ops = &(struct cmdline_token_ops){
6372 .parse = cmd_flow_parse,
6373 .complete_get_nb = cmd_flow_complete_get_nb,
6374 .complete_get_elt = cmd_flow_complete_get_elt,
6375 .get_help = cmd_set_raw_get_help,
6380 /** Populate the next dynamic token. */
6382 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6383 cmdline_parse_token_hdr_t **hdr_inst)
6385 struct context *ctx = &cmd_flow_context;
6387 /* Always reinitialize context before requesting the first token. */
6388 if (!(hdr_inst - cmd_set_raw.tokens)) {
6389 cmd_flow_context_init(ctx);
6390 ctx->curr = START_SET;
6392 /* Return NULL when no more tokens are expected. */
6393 if (!ctx->next_num && (ctx->curr != START_SET)) {
6397 /* Determine if command should end here. */
6398 if (ctx->eol && ctx->last && ctx->next_num) {
6399 const enum index *list = ctx->next[ctx->next_num - 1];
6402 for (i = 0; list[i]; ++i) {
6409 *hdr = &cmd_set_raw_token_hdr;
6412 /** Token generator and output processing callback (cmdline API). */
6414 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6417 cmd_set_raw_tok(arg0, arg2);
6419 cmd_set_raw_parsed(arg0);
6422 /** Global parser instance (cmdline API). */
6423 cmdline_parse_inst_t cmd_set_raw = {
6424 .f = cmd_set_raw_cb,
6425 .data = NULL, /**< Unused. */
6426 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6429 }, /**< Tokens are returned by cmd_flow_tok(). */
6432 /* *** display raw_encap/raw_decap buf */
6433 struct cmd_show_set_raw_result {
6434 cmdline_fixed_string_t cmd_show;
6435 cmdline_fixed_string_t cmd_what;
6436 cmdline_fixed_string_t cmd_all;
6441 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
6443 struct cmd_show_set_raw_result *res = parsed_result;
6444 uint16_t index = res->cmd_index;
6446 uint8_t *raw_data = NULL;
6447 size_t raw_size = 0;
6448 char title[16] = {0};
6452 if (!strcmp(res->cmd_all, "all")) {
6455 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
6456 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
6460 if (!strcmp(res->cmd_what, "raw_encap")) {
6461 raw_data = (uint8_t *)&raw_encap_confs[index].data;
6462 raw_size = raw_encap_confs[index].size;
6463 snprintf(title, 16, "\nindex: %u", index);
6464 rte_hexdump(stdout, title, raw_data, raw_size);
6466 raw_data = (uint8_t *)&raw_decap_confs[index].data;
6467 raw_size = raw_decap_confs[index].size;
6468 snprintf(title, 16, "\nindex: %u", index);
6469 rte_hexdump(stdout, title, raw_data, raw_size);
6471 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
6474 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
6475 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6477 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
6478 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6479 cmd_what, "raw_encap#raw_decap");
6480 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
6481 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
6483 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
6484 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6486 cmdline_parse_inst_t cmd_show_set_raw = {
6487 .f = cmd_show_set_raw_parsed,
6489 .help_str = "show <raw_encap|raw_decap> <index>",
6491 (void *)&cmd_show_set_raw_cmd_show,
6492 (void *)&cmd_show_set_raw_cmd_what,
6493 (void *)&cmd_show_set_raw_cmd_index,
6497 cmdline_parse_inst_t cmd_show_set_raw_all = {
6498 .f = cmd_show_set_raw_parsed,
6500 .help_str = "show <raw_encap|raw_decap> all",
6502 (void *)&cmd_show_set_raw_cmd_show,
6503 (void *)&cmd_show_set_raw_cmd_what,
6504 (void *)&cmd_show_set_raw_cmd_all,