1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
29 /** Parser token indices. */
53 /* Top-level command. */
55 /* Sub-leve commands. */
60 /* Top-level command. */
62 /* Sub-level commands. */
72 /* Destroy arguments. */
75 /* Query arguments. */
81 /* Validate/create arguments. */
88 /* Validate/create pattern. */
125 ITEM_VLAN_INNER_TYPE,
157 ITEM_E_TAG_GRP_ECID_B,
166 ITEM_GRE_C_RSVD0_VER,
184 ITEM_ARP_ETH_IPV4_SHA,
185 ITEM_ARP_ETH_IPV4_SPA,
186 ITEM_ARP_ETH_IPV4_THA,
187 ITEM_ARP_ETH_IPV4_TPA,
189 ITEM_IPV6_EXT_NEXT_HDR,
194 ITEM_ICMP6_ND_NS_TARGET_ADDR,
196 ITEM_ICMP6_ND_NA_TARGET_ADDR,
198 ITEM_ICMP6_ND_OPT_TYPE,
199 ITEM_ICMP6_ND_OPT_SLA_ETH,
200 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
201 ITEM_ICMP6_ND_OPT_TLA_ETH,
202 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
215 ITEM_HIGIG2_CLASSIFICATION,
221 ITEM_L2TPV3OIP_SESSION_ID,
230 /* Validate/create actions. */
250 ACTION_RSS_FUNC_DEFAULT,
251 ACTION_RSS_FUNC_TOEPLITZ,
252 ACTION_RSS_FUNC_SIMPLE_XOR,
253 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
265 ACTION_PHY_PORT_ORIGINAL,
266 ACTION_PHY_PORT_INDEX,
268 ACTION_PORT_ID_ORIGINAL,
272 ACTION_OF_SET_MPLS_TTL,
273 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
274 ACTION_OF_DEC_MPLS_TTL,
275 ACTION_OF_SET_NW_TTL,
276 ACTION_OF_SET_NW_TTL_NW_TTL,
277 ACTION_OF_DEC_NW_TTL,
278 ACTION_OF_COPY_TTL_OUT,
279 ACTION_OF_COPY_TTL_IN,
282 ACTION_OF_PUSH_VLAN_ETHERTYPE,
283 ACTION_OF_SET_VLAN_VID,
284 ACTION_OF_SET_VLAN_VID_VLAN_VID,
285 ACTION_OF_SET_VLAN_PCP,
286 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
288 ACTION_OF_POP_MPLS_ETHERTYPE,
290 ACTION_OF_PUSH_MPLS_ETHERTYPE,
297 ACTION_MPLSOGRE_ENCAP,
298 ACTION_MPLSOGRE_DECAP,
299 ACTION_MPLSOUDP_ENCAP,
300 ACTION_MPLSOUDP_DECAP,
302 ACTION_SET_IPV4_SRC_IPV4_SRC,
304 ACTION_SET_IPV4_DST_IPV4_DST,
306 ACTION_SET_IPV6_SRC_IPV6_SRC,
308 ACTION_SET_IPV6_DST_IPV6_DST,
310 ACTION_SET_TP_SRC_TP_SRC,
312 ACTION_SET_TP_DST_TP_DST,
318 ACTION_SET_MAC_SRC_MAC_SRC,
320 ACTION_SET_MAC_DST_MAC_DST,
322 ACTION_INC_TCP_SEQ_VALUE,
324 ACTION_DEC_TCP_SEQ_VALUE,
326 ACTION_INC_TCP_ACK_VALUE,
328 ACTION_DEC_TCP_ACK_VALUE,
331 ACTION_RAW_ENCAP_INDEX,
332 ACTION_RAW_ENCAP_INDEX_VALUE,
333 ACTION_RAW_DECAP_INDEX,
334 ACTION_RAW_DECAP_INDEX_VALUE,
337 ACTION_SET_TAG_INDEX,
340 ACTION_SET_META_DATA,
341 ACTION_SET_META_MASK,
342 ACTION_SET_IPV4_DSCP,
343 ACTION_SET_IPV4_DSCP_VALUE,
344 ACTION_SET_IPV6_DSCP,
345 ACTION_SET_IPV6_DSCP_VALUE,
350 /** Maximum size for pattern in struct rte_flow_item_raw. */
351 #define ITEM_RAW_PATTERN_SIZE 40
353 /** Storage size for struct rte_flow_item_raw including pattern. */
354 #define ITEM_RAW_SIZE \
355 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
357 /** Maximum number of queue indices in struct rte_flow_action_rss. */
358 #define ACTION_RSS_QUEUE_NUM 128
360 /** Storage for struct rte_flow_action_rss including external data. */
361 struct action_rss_data {
362 struct rte_flow_action_rss conf;
363 uint8_t key[RSS_HASH_KEY_LENGTH];
364 uint16_t queue[ACTION_RSS_QUEUE_NUM];
367 /** Maximum data size in struct rte_flow_action_raw_encap. */
368 #define ACTION_RAW_ENCAP_MAX_DATA 128
369 #define RAW_ENCAP_CONFS_MAX_NUM 8
371 /** Storage for struct rte_flow_action_raw_encap. */
372 struct raw_encap_conf {
373 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
374 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
378 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
380 /** Storage for struct rte_flow_action_raw_encap including external data. */
381 struct action_raw_encap_data {
382 struct rte_flow_action_raw_encap conf;
383 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
384 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
388 /** Storage for struct rte_flow_action_raw_decap. */
389 struct raw_decap_conf {
390 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
394 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
396 /** Storage for struct rte_flow_action_raw_decap including external data. */
397 struct action_raw_decap_data {
398 struct rte_flow_action_raw_decap conf;
399 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
403 struct vxlan_encap_conf vxlan_encap_conf = {
407 .vni = "\x00\x00\x00",
409 .udp_dst = RTE_BE16(4789),
410 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
411 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
412 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
413 "\x00\x00\x00\x00\x00\x00\x00\x01",
414 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
415 "\x00\x00\x00\x00\x00\x00\x11\x11",
419 .eth_src = "\x00\x00\x00\x00\x00\x00",
420 .eth_dst = "\xff\xff\xff\xff\xff\xff",
423 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
424 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
426 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
427 struct action_vxlan_encap_data {
428 struct rte_flow_action_vxlan_encap conf;
429 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
430 struct rte_flow_item_eth item_eth;
431 struct rte_flow_item_vlan item_vlan;
433 struct rte_flow_item_ipv4 item_ipv4;
434 struct rte_flow_item_ipv6 item_ipv6;
436 struct rte_flow_item_udp item_udp;
437 struct rte_flow_item_vxlan item_vxlan;
440 struct nvgre_encap_conf nvgre_encap_conf = {
443 .tni = "\x00\x00\x00",
444 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
445 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
446 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
447 "\x00\x00\x00\x00\x00\x00\x00\x01",
448 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
449 "\x00\x00\x00\x00\x00\x00\x11\x11",
451 .eth_src = "\x00\x00\x00\x00\x00\x00",
452 .eth_dst = "\xff\xff\xff\xff\xff\xff",
455 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
456 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
458 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
459 struct action_nvgre_encap_data {
460 struct rte_flow_action_nvgre_encap conf;
461 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
462 struct rte_flow_item_eth item_eth;
463 struct rte_flow_item_vlan item_vlan;
465 struct rte_flow_item_ipv4 item_ipv4;
466 struct rte_flow_item_ipv6 item_ipv6;
468 struct rte_flow_item_nvgre item_nvgre;
471 struct l2_encap_conf l2_encap_conf;
473 struct l2_decap_conf l2_decap_conf;
475 struct mplsogre_encap_conf mplsogre_encap_conf;
477 struct mplsogre_decap_conf mplsogre_decap_conf;
479 struct mplsoudp_encap_conf mplsoudp_encap_conf;
481 struct mplsoudp_decap_conf mplsoudp_decap_conf;
483 /** Maximum number of subsequent tokens and arguments on the stack. */
484 #define CTX_STACK_SIZE 16
486 /** Parser context. */
488 /** Stack of subsequent token lists to process. */
489 const enum index *next[CTX_STACK_SIZE];
490 /** Arguments for stacked tokens. */
491 const void *args[CTX_STACK_SIZE];
492 enum index curr; /**< Current token index. */
493 enum index prev; /**< Index of the last token seen. */
494 int next_num; /**< Number of entries in next[]. */
495 int args_num; /**< Number of entries in args[]. */
496 uint32_t eol:1; /**< EOL has been detected. */
497 uint32_t last:1; /**< No more arguments. */
498 portid_t port; /**< Current port ID (for completions). */
499 uint32_t objdata; /**< Object-specific data. */
500 void *object; /**< Address of current object for relative offsets. */
501 void *objmask; /**< Object a full mask must be written to. */
504 /** Token argument. */
506 uint32_t hton:1; /**< Use network byte ordering. */
507 uint32_t sign:1; /**< Value is signed. */
508 uint32_t bounded:1; /**< Value is bounded. */
509 uintmax_t min; /**< Minimum value if bounded. */
510 uintmax_t max; /**< Maximum value if bounded. */
511 uint32_t offset; /**< Relative offset from ctx->object. */
512 uint32_t size; /**< Field size. */
513 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
516 /** Parser token definition. */
518 /** Type displayed during completion (defaults to "TOKEN"). */
520 /** Help displayed during completion (defaults to token name). */
522 /** Private data used by parser functions. */
525 * Lists of subsequent tokens to push on the stack. Each call to the
526 * parser consumes the last entry of that stack.
528 const enum index *const *next;
529 /** Arguments stack for subsequent tokens that need them. */
530 const struct arg *const *args;
532 * Token-processing callback, returns -1 in case of error, the
533 * length of the matched string otherwise. If NULL, attempts to
534 * match the token name.
536 * If buf is not NULL, the result should be stored in it according
537 * to context. An error is returned if not large enough.
539 int (*call)(struct context *ctx, const struct token *token,
540 const char *str, unsigned int len,
541 void *buf, unsigned int size);
543 * Callback that provides possible values for this token, used for
544 * completion. Returns -1 in case of error, the number of possible
545 * values otherwise. If NULL, the token name is used.
547 * If buf is not NULL, entry index ent is written to buf and the
548 * full length of the entry is returned (same behavior as
551 int (*comp)(struct context *ctx, const struct token *token,
552 unsigned int ent, char *buf, unsigned int size);
553 /** Mandatory token name, no default value. */
557 /** Static initializer for the next field. */
558 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
560 /** Static initializer for a NEXT() entry. */
561 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
563 /** Static initializer for the args field. */
564 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
566 /** Static initializer for ARGS() to target a field. */
567 #define ARGS_ENTRY(s, f) \
568 (&(const struct arg){ \
569 .offset = offsetof(s, f), \
570 .size = sizeof(((s *)0)->f), \
573 /** Static initializer for ARGS() to target a bit-field. */
574 #define ARGS_ENTRY_BF(s, f, b) \
575 (&(const struct arg){ \
577 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
580 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
581 #define ARGS_ENTRY_MASK(s, f, m) \
582 (&(const struct arg){ \
583 .offset = offsetof(s, f), \
584 .size = sizeof(((s *)0)->f), \
585 .mask = (const void *)(m), \
588 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
589 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
590 (&(const struct arg){ \
592 .offset = offsetof(s, f), \
593 .size = sizeof(((s *)0)->f), \
594 .mask = (const void *)(m), \
597 /** Static initializer for ARGS() to target a pointer. */
598 #define ARGS_ENTRY_PTR(s, f) \
599 (&(const struct arg){ \
600 .size = sizeof(*((s *)0)->f), \
603 /** Static initializer for ARGS() with arbitrary offset and size. */
604 #define ARGS_ENTRY_ARB(o, s) \
605 (&(const struct arg){ \
610 /** Same as ARGS_ENTRY_ARB() with bounded values. */
611 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
612 (&(const struct arg){ \
620 /** Same as ARGS_ENTRY() using network byte ordering. */
621 #define ARGS_ENTRY_HTON(s, f) \
622 (&(const struct arg){ \
624 .offset = offsetof(s, f), \
625 .size = sizeof(((s *)0)->f), \
628 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
629 #define ARG_ENTRY_HTON(s) \
630 (&(const struct arg){ \
636 /** Parser output buffer layout expected by cmd_flow_parsed(). */
638 enum index command; /**< Flow command. */
639 portid_t port; /**< Affected port ID. */
642 struct rte_flow_attr attr;
643 struct rte_flow_item *pattern;
644 struct rte_flow_action *actions;
648 } vc; /**< Validate/create arguments. */
652 } destroy; /**< Destroy arguments. */
655 } dump; /**< Dump arguments. */
658 struct rte_flow_action action;
659 } query; /**< Query arguments. */
663 } list; /**< List arguments. */
666 } isolate; /**< Isolated mode arguments. */
667 } args; /**< Command arguments. */
670 /** Private data for pattern items. */
671 struct parse_item_priv {
672 enum rte_flow_item_type type; /**< Item type. */
673 uint32_t size; /**< Size of item specification structure. */
676 #define PRIV_ITEM(t, s) \
677 (&(const struct parse_item_priv){ \
678 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
682 /** Private data for actions. */
683 struct parse_action_priv {
684 enum rte_flow_action_type type; /**< Action type. */
685 uint32_t size; /**< Size of action configuration structure. */
688 #define PRIV_ACTION(t, s) \
689 (&(const struct parse_action_priv){ \
690 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
694 static const enum index next_vc_attr[] = {
704 static const enum index next_destroy_attr[] = {
710 static const enum index next_dump_attr[] = {
716 static const enum index next_list_attr[] = {
722 static const enum index item_param[] = {
731 static const enum index next_item[] = {
767 ITEM_ICMP6_ND_OPT_SLA_ETH,
768 ITEM_ICMP6_ND_OPT_TLA_ETH,
785 static const enum index item_fuzzy[] = {
791 static const enum index item_any[] = {
797 static const enum index item_vf[] = {
803 static const enum index item_phy_port[] = {
809 static const enum index item_port_id[] = {
815 static const enum index item_mark[] = {
821 static const enum index item_raw[] = {
831 static const enum index item_eth[] = {
839 static const enum index item_vlan[] = {
844 ITEM_VLAN_INNER_TYPE,
849 static const enum index item_ipv4[] = {
859 static const enum index item_ipv6[] = {
870 static const enum index item_icmp[] = {
877 static const enum index item_udp[] = {
884 static const enum index item_tcp[] = {
892 static const enum index item_sctp[] = {
901 static const enum index item_vxlan[] = {
907 static const enum index item_e_tag[] = {
908 ITEM_E_TAG_GRP_ECID_B,
913 static const enum index item_nvgre[] = {
919 static const enum index item_mpls[] = {
927 static const enum index item_gre[] = {
929 ITEM_GRE_C_RSVD0_VER,
937 static const enum index item_gre_key[] = {
943 static const enum index item_gtp[] = {
951 static const enum index item_geneve[] = {
958 static const enum index item_vxlan_gpe[] = {
964 static const enum index item_arp_eth_ipv4[] = {
965 ITEM_ARP_ETH_IPV4_SHA,
966 ITEM_ARP_ETH_IPV4_SPA,
967 ITEM_ARP_ETH_IPV4_THA,
968 ITEM_ARP_ETH_IPV4_TPA,
973 static const enum index item_ipv6_ext[] = {
974 ITEM_IPV6_EXT_NEXT_HDR,
979 static const enum index item_icmp6[] = {
986 static const enum index item_icmp6_nd_ns[] = {
987 ITEM_ICMP6_ND_NS_TARGET_ADDR,
992 static const enum index item_icmp6_nd_na[] = {
993 ITEM_ICMP6_ND_NA_TARGET_ADDR,
998 static const enum index item_icmp6_nd_opt[] = {
999 ITEM_ICMP6_ND_OPT_TYPE,
1004 static const enum index item_icmp6_nd_opt_sla_eth[] = {
1005 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
1010 static const enum index item_icmp6_nd_opt_tla_eth[] = {
1011 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
1016 static const enum index item_meta[] = {
1022 static const enum index item_gtp_psc[] = {
1029 static const enum index item_pppoed[] = {
1035 static const enum index item_pppoes[] = {
1041 static const enum index item_pppoe_proto_id[] = {
1046 static const enum index item_higig2[] = {
1047 ITEM_HIGIG2_CLASSIFICATION,
1053 static const enum index item_esp[] = {
1059 static const enum index item_ah[] = {
1065 static const enum index item_pfcp[] = {
1072 static const enum index next_set_raw[] = {
1078 static const enum index item_tag[] = {
1085 static const enum index item_l2tpv3oip[] = {
1086 ITEM_L2TPV3OIP_SESSION_ID,
1091 static const enum index next_action[] = {
1107 ACTION_OF_SET_MPLS_TTL,
1108 ACTION_OF_DEC_MPLS_TTL,
1109 ACTION_OF_SET_NW_TTL,
1110 ACTION_OF_DEC_NW_TTL,
1111 ACTION_OF_COPY_TTL_OUT,
1112 ACTION_OF_COPY_TTL_IN,
1114 ACTION_OF_PUSH_VLAN,
1115 ACTION_OF_SET_VLAN_VID,
1116 ACTION_OF_SET_VLAN_PCP,
1118 ACTION_OF_PUSH_MPLS,
1125 ACTION_MPLSOGRE_ENCAP,
1126 ACTION_MPLSOGRE_DECAP,
1127 ACTION_MPLSOUDP_ENCAP,
1128 ACTION_MPLSOUDP_DECAP,
1129 ACTION_SET_IPV4_SRC,
1130 ACTION_SET_IPV4_DST,
1131 ACTION_SET_IPV6_SRC,
1132 ACTION_SET_IPV6_DST,
1148 ACTION_SET_IPV4_DSCP,
1149 ACTION_SET_IPV6_DSCP,
1154 static const enum index action_mark[] = {
1160 static const enum index action_queue[] = {
1166 static const enum index action_count[] = {
1168 ACTION_COUNT_SHARED,
1173 static const enum index action_rss[] = {
1184 static const enum index action_vf[] = {
1191 static const enum index action_phy_port[] = {
1192 ACTION_PHY_PORT_ORIGINAL,
1193 ACTION_PHY_PORT_INDEX,
1198 static const enum index action_port_id[] = {
1199 ACTION_PORT_ID_ORIGINAL,
1205 static const enum index action_meter[] = {
1211 static const enum index action_of_set_mpls_ttl[] = {
1212 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1217 static const enum index action_of_set_nw_ttl[] = {
1218 ACTION_OF_SET_NW_TTL_NW_TTL,
1223 static const enum index action_of_push_vlan[] = {
1224 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1229 static const enum index action_of_set_vlan_vid[] = {
1230 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1235 static const enum index action_of_set_vlan_pcp[] = {
1236 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1241 static const enum index action_of_pop_mpls[] = {
1242 ACTION_OF_POP_MPLS_ETHERTYPE,
1247 static const enum index action_of_push_mpls[] = {
1248 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1253 static const enum index action_set_ipv4_src[] = {
1254 ACTION_SET_IPV4_SRC_IPV4_SRC,
1259 static const enum index action_set_mac_src[] = {
1260 ACTION_SET_MAC_SRC_MAC_SRC,
1265 static const enum index action_set_ipv4_dst[] = {
1266 ACTION_SET_IPV4_DST_IPV4_DST,
1271 static const enum index action_set_ipv6_src[] = {
1272 ACTION_SET_IPV6_SRC_IPV6_SRC,
1277 static const enum index action_set_ipv6_dst[] = {
1278 ACTION_SET_IPV6_DST_IPV6_DST,
1283 static const enum index action_set_tp_src[] = {
1284 ACTION_SET_TP_SRC_TP_SRC,
1289 static const enum index action_set_tp_dst[] = {
1290 ACTION_SET_TP_DST_TP_DST,
1295 static const enum index action_set_ttl[] = {
1301 static const enum index action_jump[] = {
1307 static const enum index action_set_mac_dst[] = {
1308 ACTION_SET_MAC_DST_MAC_DST,
1313 static const enum index action_inc_tcp_seq[] = {
1314 ACTION_INC_TCP_SEQ_VALUE,
1319 static const enum index action_dec_tcp_seq[] = {
1320 ACTION_DEC_TCP_SEQ_VALUE,
1325 static const enum index action_inc_tcp_ack[] = {
1326 ACTION_INC_TCP_ACK_VALUE,
1331 static const enum index action_dec_tcp_ack[] = {
1332 ACTION_DEC_TCP_ACK_VALUE,
1337 static const enum index action_raw_encap[] = {
1338 ACTION_RAW_ENCAP_INDEX,
1343 static const enum index action_raw_decap[] = {
1344 ACTION_RAW_DECAP_INDEX,
1349 static const enum index action_set_tag[] = {
1350 ACTION_SET_TAG_DATA,
1351 ACTION_SET_TAG_INDEX,
1352 ACTION_SET_TAG_MASK,
1357 static const enum index action_set_meta[] = {
1358 ACTION_SET_META_DATA,
1359 ACTION_SET_META_MASK,
1364 static const enum index action_set_ipv4_dscp[] = {
1365 ACTION_SET_IPV4_DSCP_VALUE,
1370 static const enum index action_set_ipv6_dscp[] = {
1371 ACTION_SET_IPV6_DSCP_VALUE,
1376 static const enum index action_age[] = {
1383 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1384 const char *, unsigned int,
1385 void *, unsigned int);
1386 static int parse_set_init(struct context *, const struct token *,
1387 const char *, unsigned int,
1388 void *, unsigned int);
1389 static int parse_init(struct context *, const struct token *,
1390 const char *, unsigned int,
1391 void *, unsigned int);
1392 static int parse_vc(struct context *, const struct token *,
1393 const char *, unsigned int,
1394 void *, unsigned int);
1395 static int parse_vc_spec(struct context *, const struct token *,
1396 const char *, unsigned int, void *, unsigned int);
1397 static int parse_vc_conf(struct context *, const struct token *,
1398 const char *, unsigned int, void *, unsigned int);
1399 static int parse_vc_action_rss(struct context *, const struct token *,
1400 const char *, unsigned int, void *,
1402 static int parse_vc_action_rss_func(struct context *, const struct token *,
1403 const char *, unsigned int, void *,
1405 static int parse_vc_action_rss_type(struct context *, const struct token *,
1406 const char *, unsigned int, void *,
1408 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1409 const char *, unsigned int, void *,
1411 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1412 const char *, unsigned int, void *,
1414 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1415 const char *, unsigned int, void *,
1417 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1418 const char *, unsigned int, void *,
1420 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1421 const char *, unsigned int, void *,
1423 static int parse_vc_action_mplsogre_encap(struct context *,
1424 const struct token *, const char *,
1425 unsigned int, void *, unsigned int);
1426 static int parse_vc_action_mplsogre_decap(struct context *,
1427 const struct token *, const char *,
1428 unsigned int, void *, unsigned int);
1429 static int parse_vc_action_mplsoudp_encap(struct context *,
1430 const struct token *, const char *,
1431 unsigned int, void *, unsigned int);
1432 static int parse_vc_action_mplsoudp_decap(struct context *,
1433 const struct token *, const char *,
1434 unsigned int, void *, unsigned int);
1435 static int parse_vc_action_raw_encap(struct context *,
1436 const struct token *, const char *,
1437 unsigned int, void *, unsigned int);
1438 static int parse_vc_action_raw_decap(struct context *,
1439 const struct token *, const char *,
1440 unsigned int, void *, unsigned int);
1441 static int parse_vc_action_raw_encap_index(struct context *,
1442 const struct token *, const char *,
1443 unsigned int, void *, unsigned int);
1444 static int parse_vc_action_raw_decap_index(struct context *,
1445 const struct token *, const char *,
1446 unsigned int, void *, unsigned int);
1447 static int parse_vc_action_set_meta(struct context *ctx,
1448 const struct token *token, const char *str,
1449 unsigned int len, void *buf,
1451 static int parse_destroy(struct context *, const struct token *,
1452 const char *, unsigned int,
1453 void *, unsigned int);
1454 static int parse_flush(struct context *, const struct token *,
1455 const char *, unsigned int,
1456 void *, unsigned int);
1457 static int parse_dump(struct context *, const struct token *,
1458 const char *, unsigned int,
1459 void *, unsigned int);
1460 static int parse_query(struct context *, const struct token *,
1461 const char *, unsigned int,
1462 void *, unsigned int);
1463 static int parse_action(struct context *, const struct token *,
1464 const char *, unsigned int,
1465 void *, unsigned int);
1466 static int parse_list(struct context *, const struct token *,
1467 const char *, unsigned int,
1468 void *, unsigned int);
1469 static int parse_isolate(struct context *, const struct token *,
1470 const char *, unsigned int,
1471 void *, unsigned int);
1472 static int parse_int(struct context *, const struct token *,
1473 const char *, unsigned int,
1474 void *, unsigned int);
1475 static int parse_prefix(struct context *, const struct token *,
1476 const char *, unsigned int,
1477 void *, unsigned int);
1478 static int parse_boolean(struct context *, const struct token *,
1479 const char *, unsigned int,
1480 void *, unsigned int);
1481 static int parse_string(struct context *, const struct token *,
1482 const char *, unsigned int,
1483 void *, unsigned int);
1484 static int parse_hex(struct context *ctx, const struct token *token,
1485 const char *str, unsigned int len,
1486 void *buf, unsigned int size);
1487 static int parse_string0(struct context *, const struct token *,
1488 const char *, unsigned int,
1489 void *, unsigned int);
1490 static int parse_mac_addr(struct context *, const struct token *,
1491 const char *, unsigned int,
1492 void *, unsigned int);
1493 static int parse_ipv4_addr(struct context *, const struct token *,
1494 const char *, unsigned int,
1495 void *, unsigned int);
1496 static int parse_ipv6_addr(struct context *, const struct token *,
1497 const char *, unsigned int,
1498 void *, unsigned int);
1499 static int parse_port(struct context *, const struct token *,
1500 const char *, unsigned int,
1501 void *, unsigned int);
1502 static int comp_none(struct context *, const struct token *,
1503 unsigned int, char *, unsigned int);
1504 static int comp_boolean(struct context *, const struct token *,
1505 unsigned int, char *, unsigned int);
1506 static int comp_action(struct context *, const struct token *,
1507 unsigned int, char *, unsigned int);
1508 static int comp_port(struct context *, const struct token *,
1509 unsigned int, char *, unsigned int);
1510 static int comp_rule_id(struct context *, const struct token *,
1511 unsigned int, char *, unsigned int);
1512 static int comp_vc_action_rss_type(struct context *, const struct token *,
1513 unsigned int, char *, unsigned int);
1514 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1515 unsigned int, char *, unsigned int);
1516 static int comp_set_raw_index(struct context *, const struct token *,
1517 unsigned int, char *, unsigned int);
1519 /** Token definitions. */
1520 static const struct token token_list[] = {
1521 /* Special tokens. */
1524 .help = "null entry, abused as the entry point",
1525 .next = NEXT(NEXT_ENTRY(FLOW)),
1530 .help = "command may end here",
1533 .name = "START_SET",
1534 .help = "null entry, abused as the entry point for set",
1535 .next = NEXT(NEXT_ENTRY(SET)),
1540 .help = "set command may end here",
1542 /* Common tokens. */
1546 .help = "integer value",
1551 .name = "{unsigned}",
1553 .help = "unsigned integer value",
1560 .help = "prefix length for bit-mask",
1561 .call = parse_prefix,
1565 .name = "{boolean}",
1567 .help = "any boolean value",
1568 .call = parse_boolean,
1569 .comp = comp_boolean,
1574 .help = "fixed string",
1575 .call = parse_string,
1581 .help = "fixed string",
1585 .name = "{file path}",
1587 .help = "file path",
1588 .call = parse_string0,
1592 .name = "{MAC address}",
1594 .help = "standard MAC address notation",
1595 .call = parse_mac_addr,
1599 .name = "{IPv4 address}",
1600 .type = "IPV4 ADDRESS",
1601 .help = "standard IPv4 address notation",
1602 .call = parse_ipv4_addr,
1606 .name = "{IPv6 address}",
1607 .type = "IPV6 ADDRESS",
1608 .help = "standard IPv6 address notation",
1609 .call = parse_ipv6_addr,
1613 .name = "{rule id}",
1615 .help = "rule identifier",
1617 .comp = comp_rule_id,
1620 .name = "{port_id}",
1622 .help = "port identifier",
1627 .name = "{group_id}",
1629 .help = "group identifier",
1633 [PRIORITY_LEVEL] = {
1636 .help = "priority level",
1640 /* Top-level command. */
1643 .type = "{command} {port_id} [{arg} [...]]",
1644 .help = "manage ingress/egress flow rules",
1645 .next = NEXT(NEXT_ENTRY
1656 /* Sub-level commands. */
1659 .help = "check whether a flow rule can be created",
1660 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1661 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1666 .help = "create a flow rule",
1667 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1668 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1673 .help = "destroy specific flow rules",
1674 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1675 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1676 .call = parse_destroy,
1680 .help = "destroy all flow rules",
1681 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1682 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1683 .call = parse_flush,
1687 .help = "dump all flow rules to file",
1688 .next = NEXT(next_dump_attr, NEXT_ENTRY(PORT_ID)),
1689 .args = ARGS(ARGS_ENTRY(struct buffer, args.dump.file),
1690 ARGS_ENTRY(struct buffer, port)),
1695 .help = "query an existing flow rule",
1696 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1697 NEXT_ENTRY(RULE_ID),
1698 NEXT_ENTRY(PORT_ID)),
1699 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1700 ARGS_ENTRY(struct buffer, args.query.rule),
1701 ARGS_ENTRY(struct buffer, port)),
1702 .call = parse_query,
1706 .help = "list existing flow rules",
1707 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1708 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1713 .help = "restrict ingress traffic to the defined flow rules",
1714 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1715 NEXT_ENTRY(PORT_ID)),
1716 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1717 ARGS_ENTRY(struct buffer, port)),
1718 .call = parse_isolate,
1720 /* Destroy arguments. */
1723 .help = "specify a rule identifier",
1724 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1725 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1726 .call = parse_destroy,
1728 /* Query arguments. */
1732 .help = "action to query, must be part of the rule",
1733 .call = parse_action,
1734 .comp = comp_action,
1736 /* List arguments. */
1739 .help = "specify a group",
1740 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1741 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1744 /* Validate/create attributes. */
1747 .help = "specify a group",
1748 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1749 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1754 .help = "specify a priority level",
1755 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1756 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1761 .help = "affect rule to ingress",
1762 .next = NEXT(next_vc_attr),
1767 .help = "affect rule to egress",
1768 .next = NEXT(next_vc_attr),
1773 .help = "apply rule directly to endpoints found in pattern",
1774 .next = NEXT(next_vc_attr),
1777 /* Validate/create pattern. */
1780 .help = "submit a list of pattern items",
1781 .next = NEXT(next_item),
1786 .help = "match value perfectly (with full bit-mask)",
1787 .call = parse_vc_spec,
1789 [ITEM_PARAM_SPEC] = {
1791 .help = "match value according to configured bit-mask",
1792 .call = parse_vc_spec,
1794 [ITEM_PARAM_LAST] = {
1796 .help = "specify upper bound to establish a range",
1797 .call = parse_vc_spec,
1799 [ITEM_PARAM_MASK] = {
1801 .help = "specify bit-mask with relevant bits set to one",
1802 .call = parse_vc_spec,
1804 [ITEM_PARAM_PREFIX] = {
1806 .help = "generate bit-mask from a prefix length",
1807 .call = parse_vc_spec,
1811 .help = "specify next pattern item",
1812 .next = NEXT(next_item),
1816 .help = "end list of pattern items",
1817 .priv = PRIV_ITEM(END, 0),
1818 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1823 .help = "no-op pattern item",
1824 .priv = PRIV_ITEM(VOID, 0),
1825 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1830 .help = "perform actions when pattern does not match",
1831 .priv = PRIV_ITEM(INVERT, 0),
1832 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1837 .help = "match any protocol for the current layer",
1838 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1839 .next = NEXT(item_any),
1844 .help = "number of layers covered",
1845 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1846 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1850 .help = "match traffic from/to the physical function",
1851 .priv = PRIV_ITEM(PF, 0),
1852 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1857 .help = "match traffic from/to a virtual function ID",
1858 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1859 .next = NEXT(item_vf),
1865 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1866 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1870 .help = "match traffic from/to a specific physical port",
1871 .priv = PRIV_ITEM(PHY_PORT,
1872 sizeof(struct rte_flow_item_phy_port)),
1873 .next = NEXT(item_phy_port),
1876 [ITEM_PHY_PORT_INDEX] = {
1878 .help = "physical port index",
1879 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1880 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1884 .help = "match traffic from/to a given DPDK port ID",
1885 .priv = PRIV_ITEM(PORT_ID,
1886 sizeof(struct rte_flow_item_port_id)),
1887 .next = NEXT(item_port_id),
1890 [ITEM_PORT_ID_ID] = {
1892 .help = "DPDK port ID",
1893 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1894 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1898 .help = "match traffic against value set in previously matched rule",
1899 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1900 .next = NEXT(item_mark),
1905 .help = "Integer value to match against",
1906 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1907 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1911 .help = "match an arbitrary byte string",
1912 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1913 .next = NEXT(item_raw),
1916 [ITEM_RAW_RELATIVE] = {
1918 .help = "look for pattern after the previous item",
1919 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1920 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1923 [ITEM_RAW_SEARCH] = {
1925 .help = "search pattern from offset (see also limit)",
1926 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1927 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1930 [ITEM_RAW_OFFSET] = {
1932 .help = "absolute or relative offset for pattern",
1933 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1934 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1936 [ITEM_RAW_LIMIT] = {
1938 .help = "search area limit for start of pattern",
1939 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1940 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1942 [ITEM_RAW_PATTERN] = {
1944 .help = "byte string to look for",
1945 .next = NEXT(item_raw,
1947 NEXT_ENTRY(ITEM_PARAM_IS,
1950 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1951 ARGS_ENTRY(struct rte_flow_item_raw, length),
1952 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1953 ITEM_RAW_PATTERN_SIZE)),
1957 .help = "match Ethernet header",
1958 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1959 .next = NEXT(item_eth),
1964 .help = "destination MAC",
1965 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1966 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1970 .help = "source MAC",
1971 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1972 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1976 .help = "EtherType",
1977 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1978 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1982 .help = "match 802.1Q/ad VLAN tag",
1983 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1984 .next = NEXT(item_vlan),
1989 .help = "tag control information",
1990 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1991 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1995 .help = "priority code point",
1996 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1997 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2002 .help = "drop eligible indicator",
2003 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2004 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2009 .help = "VLAN identifier",
2010 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2011 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
2014 [ITEM_VLAN_INNER_TYPE] = {
2015 .name = "inner_type",
2016 .help = "inner EtherType",
2017 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
2018 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
2023 .help = "match IPv4 header",
2024 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
2025 .next = NEXT(item_ipv4),
2030 .help = "type of service",
2031 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2032 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2033 hdr.type_of_service)),
2037 .help = "time to live",
2038 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2039 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2042 [ITEM_IPV4_PROTO] = {
2044 .help = "next protocol ID",
2045 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
2046 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2047 hdr.next_proto_id)),
2051 .help = "source address",
2052 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2053 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2058 .help = "destination address",
2059 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
2060 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
2065 .help = "match IPv6 header",
2066 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
2067 .next = NEXT(item_ipv6),
2072 .help = "traffic class",
2073 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2074 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2076 "\x0f\xf0\x00\x00")),
2078 [ITEM_IPV6_FLOW] = {
2080 .help = "flow label",
2081 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2082 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
2084 "\x00\x0f\xff\xff")),
2086 [ITEM_IPV6_PROTO] = {
2088 .help = "protocol (next header)",
2089 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2090 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2095 .help = "hop limit",
2096 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
2097 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2102 .help = "source address",
2103 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2104 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2109 .help = "destination address",
2110 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
2111 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2116 .help = "match ICMP header",
2117 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2118 .next = NEXT(item_icmp),
2121 [ITEM_ICMP_TYPE] = {
2123 .help = "ICMP packet type",
2124 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2125 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2128 [ITEM_ICMP_CODE] = {
2130 .help = "ICMP packet code",
2131 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2132 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2137 .help = "match UDP header",
2138 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2139 .next = NEXT(item_udp),
2144 .help = "UDP source port",
2145 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2146 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2151 .help = "UDP destination port",
2152 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2153 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2158 .help = "match TCP header",
2159 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2160 .next = NEXT(item_tcp),
2165 .help = "TCP source port",
2166 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2167 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2172 .help = "TCP destination port",
2173 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2174 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2177 [ITEM_TCP_FLAGS] = {
2179 .help = "TCP flags",
2180 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2181 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2186 .help = "match SCTP header",
2187 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2188 .next = NEXT(item_sctp),
2193 .help = "SCTP source port",
2194 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2195 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2200 .help = "SCTP destination port",
2201 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2202 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2207 .help = "validation tag",
2208 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2209 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2212 [ITEM_SCTP_CKSUM] = {
2215 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2216 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2221 .help = "match VXLAN header",
2222 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2223 .next = NEXT(item_vxlan),
2226 [ITEM_VXLAN_VNI] = {
2228 .help = "VXLAN identifier",
2229 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2234 .help = "match E-Tag header",
2235 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2236 .next = NEXT(item_e_tag),
2239 [ITEM_E_TAG_GRP_ECID_B] = {
2240 .name = "grp_ecid_b",
2241 .help = "GRP and E-CID base",
2242 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2243 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2249 .help = "match NVGRE header",
2250 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2251 .next = NEXT(item_nvgre),
2254 [ITEM_NVGRE_TNI] = {
2256 .help = "virtual subnet ID",
2257 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2258 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2262 .help = "match MPLS header",
2263 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2264 .next = NEXT(item_mpls),
2267 [ITEM_MPLS_LABEL] = {
2269 .help = "MPLS label",
2270 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2271 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2277 .help = "MPLS Traffic Class",
2278 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2279 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2285 .help = "MPLS Bottom-of-Stack",
2286 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2287 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2293 .help = "match GRE header",
2294 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2295 .next = NEXT(item_gre),
2298 [ITEM_GRE_PROTO] = {
2300 .help = "GRE protocol type",
2301 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2302 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2305 [ITEM_GRE_C_RSVD0_VER] = {
2306 .name = "c_rsvd0_ver",
2308 "checksum (1b), undefined (1b), key bit (1b),"
2309 " sequence number (1b), reserved 0 (9b),"
2311 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2312 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2315 [ITEM_GRE_C_BIT] = {
2317 .help = "checksum bit (C)",
2318 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2319 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2321 "\x80\x00\x00\x00")),
2323 [ITEM_GRE_S_BIT] = {
2325 .help = "sequence number bit (S)",
2326 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2327 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2329 "\x10\x00\x00\x00")),
2331 [ITEM_GRE_K_BIT] = {
2333 .help = "key bit (K)",
2334 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2335 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2337 "\x20\x00\x00\x00")),
2341 .help = "fuzzy pattern match, expect faster than default",
2342 .priv = PRIV_ITEM(FUZZY,
2343 sizeof(struct rte_flow_item_fuzzy)),
2344 .next = NEXT(item_fuzzy),
2347 [ITEM_FUZZY_THRESH] = {
2349 .help = "match accuracy threshold",
2350 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2351 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2356 .help = "match GTP header",
2357 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2358 .next = NEXT(item_gtp),
2361 [ITEM_GTP_FLAGS] = {
2362 .name = "v_pt_rsv_flags",
2363 .help = "GTP flags",
2364 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2365 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp,
2368 [ITEM_GTP_MSG_TYPE] = {
2370 .help = "GTP message type",
2371 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2372 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_gtp, msg_type)),
2376 .help = "tunnel endpoint identifier",
2377 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2378 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2382 .help = "match GTP header",
2383 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2384 .next = NEXT(item_gtp),
2389 .help = "match GTP header",
2390 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2391 .next = NEXT(item_gtp),
2396 .help = "match GENEVE header",
2397 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2398 .next = NEXT(item_geneve),
2401 [ITEM_GENEVE_VNI] = {
2403 .help = "virtual network identifier",
2404 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2405 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2407 [ITEM_GENEVE_PROTO] = {
2409 .help = "GENEVE protocol type",
2410 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2411 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2414 [ITEM_VXLAN_GPE] = {
2415 .name = "vxlan-gpe",
2416 .help = "match VXLAN-GPE header",
2417 .priv = PRIV_ITEM(VXLAN_GPE,
2418 sizeof(struct rte_flow_item_vxlan_gpe)),
2419 .next = NEXT(item_vxlan_gpe),
2422 [ITEM_VXLAN_GPE_VNI] = {
2424 .help = "VXLAN-GPE identifier",
2425 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2426 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2429 [ITEM_ARP_ETH_IPV4] = {
2430 .name = "arp_eth_ipv4",
2431 .help = "match ARP header for Ethernet/IPv4",
2432 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2433 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2434 .next = NEXT(item_arp_eth_ipv4),
2437 [ITEM_ARP_ETH_IPV4_SHA] = {
2439 .help = "sender hardware address",
2440 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2442 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2445 [ITEM_ARP_ETH_IPV4_SPA] = {
2447 .help = "sender IPv4 address",
2448 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2450 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2453 [ITEM_ARP_ETH_IPV4_THA] = {
2455 .help = "target hardware address",
2456 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2458 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2461 [ITEM_ARP_ETH_IPV4_TPA] = {
2463 .help = "target IPv4 address",
2464 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2466 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2471 .help = "match presence of any IPv6 extension header",
2472 .priv = PRIV_ITEM(IPV6_EXT,
2473 sizeof(struct rte_flow_item_ipv6_ext)),
2474 .next = NEXT(item_ipv6_ext),
2477 [ITEM_IPV6_EXT_NEXT_HDR] = {
2479 .help = "next header",
2480 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2481 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2486 .help = "match any ICMPv6 header",
2487 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2488 .next = NEXT(item_icmp6),
2491 [ITEM_ICMP6_TYPE] = {
2493 .help = "ICMPv6 type",
2494 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2495 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2498 [ITEM_ICMP6_CODE] = {
2500 .help = "ICMPv6 code",
2501 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2502 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2505 [ITEM_ICMP6_ND_NS] = {
2506 .name = "icmp6_nd_ns",
2507 .help = "match ICMPv6 neighbor discovery solicitation",
2508 .priv = PRIV_ITEM(ICMP6_ND_NS,
2509 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2510 .next = NEXT(item_icmp6_nd_ns),
2513 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2514 .name = "target_addr",
2515 .help = "target address",
2516 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2518 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2521 [ITEM_ICMP6_ND_NA] = {
2522 .name = "icmp6_nd_na",
2523 .help = "match ICMPv6 neighbor discovery advertisement",
2524 .priv = PRIV_ITEM(ICMP6_ND_NA,
2525 sizeof(struct rte_flow_item_icmp6_nd_na)),
2526 .next = NEXT(item_icmp6_nd_na),
2529 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2530 .name = "target_addr",
2531 .help = "target address",
2532 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2534 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2537 [ITEM_ICMP6_ND_OPT] = {
2538 .name = "icmp6_nd_opt",
2539 .help = "match presence of any ICMPv6 neighbor discovery"
2541 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2542 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2543 .next = NEXT(item_icmp6_nd_opt),
2546 [ITEM_ICMP6_ND_OPT_TYPE] = {
2548 .help = "ND option type",
2549 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2551 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2554 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2555 .name = "icmp6_nd_opt_sla_eth",
2556 .help = "match ICMPv6 neighbor discovery source Ethernet"
2557 " link-layer address option",
2559 (ICMP6_ND_OPT_SLA_ETH,
2560 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2561 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2564 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2566 .help = "source Ethernet LLA",
2567 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2569 .args = ARGS(ARGS_ENTRY_HTON
2570 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2572 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2573 .name = "icmp6_nd_opt_tla_eth",
2574 .help = "match ICMPv6 neighbor discovery target Ethernet"
2575 " link-layer address option",
2577 (ICMP6_ND_OPT_TLA_ETH,
2578 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2579 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2582 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2584 .help = "target Ethernet LLA",
2585 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2587 .args = ARGS(ARGS_ENTRY_HTON
2588 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2592 .help = "match metadata header",
2593 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2594 .next = NEXT(item_meta),
2597 [ITEM_META_DATA] = {
2599 .help = "metadata value",
2600 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2601 .args = ARGS(ARGS_ENTRY_MASK(struct rte_flow_item_meta,
2602 data, "\xff\xff\xff\xff")),
2606 .help = "match GRE key",
2607 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2608 .next = NEXT(item_gre_key),
2611 [ITEM_GRE_KEY_VALUE] = {
2613 .help = "key value",
2614 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2615 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2619 .help = "match GTP extension header with type 0x85",
2620 .priv = PRIV_ITEM(GTP_PSC,
2621 sizeof(struct rte_flow_item_gtp_psc)),
2622 .next = NEXT(item_gtp_psc),
2625 [ITEM_GTP_PSC_QFI] = {
2627 .help = "QoS flow identifier",
2628 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2629 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2632 [ITEM_GTP_PSC_PDU_T] = {
2635 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2636 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2641 .help = "match PPPoE session header",
2642 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2643 .next = NEXT(item_pppoes),
2648 .help = "match PPPoE discovery header",
2649 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2650 .next = NEXT(item_pppoed),
2653 [ITEM_PPPOE_SEID] = {
2655 .help = "session identifier",
2656 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2657 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2660 [ITEM_PPPOE_PROTO_ID] = {
2661 .name = "pppoe_proto_id",
2662 .help = "match PPPoE session protocol identifier",
2663 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2664 sizeof(struct rte_flow_item_pppoe_proto_id)),
2665 .next = NEXT(item_pppoe_proto_id, NEXT_ENTRY(UNSIGNED),
2667 .args = ARGS(ARGS_ENTRY_HTON
2668 (struct rte_flow_item_pppoe_proto_id, proto_id)),
2673 .help = "matches higig2 header",
2674 .priv = PRIV_ITEM(HIGIG2,
2675 sizeof(struct rte_flow_item_higig2_hdr)),
2676 .next = NEXT(item_higig2),
2679 [ITEM_HIGIG2_CLASSIFICATION] = {
2680 .name = "classification",
2681 .help = "matches classification of higig2 header",
2682 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2683 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2684 hdr.ppt1.classification)),
2686 [ITEM_HIGIG2_VID] = {
2688 .help = "matches vid of higig2 header",
2689 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2690 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2695 .help = "match tag value",
2696 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
2697 .next = NEXT(item_tag),
2702 .help = "tag value to match",
2703 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
2704 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, data)),
2706 [ITEM_TAG_INDEX] = {
2708 .help = "index of tag array to match",
2709 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
2710 NEXT_ENTRY(ITEM_PARAM_IS)),
2711 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
2713 [ITEM_L2TPV3OIP] = {
2714 .name = "l2tpv3oip",
2715 .help = "match L2TPv3 over IP header",
2716 .priv = PRIV_ITEM(L2TPV3OIP,
2717 sizeof(struct rte_flow_item_l2tpv3oip)),
2718 .next = NEXT(item_l2tpv3oip),
2721 [ITEM_L2TPV3OIP_SESSION_ID] = {
2722 .name = "session_id",
2723 .help = "session identifier",
2724 .next = NEXT(item_l2tpv3oip, NEXT_ENTRY(UNSIGNED), item_param),
2725 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_l2tpv3oip,
2730 .help = "match ESP header",
2731 .priv = PRIV_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
2732 .next = NEXT(item_esp),
2737 .help = "security policy index",
2738 .next = NEXT(item_esp, NEXT_ENTRY(UNSIGNED), item_param),
2739 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_esp,
2744 .help = "match AH header",
2745 .priv = PRIV_ITEM(AH, sizeof(struct rte_flow_item_ah)),
2746 .next = NEXT(item_ah),
2751 .help = "security parameters index",
2752 .next = NEXT(item_ah, NEXT_ENTRY(UNSIGNED), item_param),
2753 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ah, spi)),
2757 .help = "match pfcp header",
2758 .priv = PRIV_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
2759 .next = NEXT(item_pfcp),
2762 [ITEM_PFCP_S_FIELD] = {
2765 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
2766 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp,
2769 [ITEM_PFCP_SEID] = {
2771 .help = "session endpoint identifier",
2772 .next = NEXT(item_pfcp, NEXT_ENTRY(UNSIGNED), item_param),
2773 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pfcp, seid)),
2775 /* Validate/create actions. */
2778 .help = "submit a list of associated actions",
2779 .next = NEXT(next_action),
2784 .help = "specify next action",
2785 .next = NEXT(next_action),
2789 .help = "end list of actions",
2790 .priv = PRIV_ACTION(END, 0),
2795 .help = "no-op action",
2796 .priv = PRIV_ACTION(VOID, 0),
2797 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2800 [ACTION_PASSTHRU] = {
2802 .help = "let subsequent rule process matched packets",
2803 .priv = PRIV_ACTION(PASSTHRU, 0),
2804 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2809 .help = "redirect traffic to a given group",
2810 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2811 .next = NEXT(action_jump),
2814 [ACTION_JUMP_GROUP] = {
2816 .help = "group to redirect traffic to",
2817 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2818 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2819 .call = parse_vc_conf,
2823 .help = "attach 32 bit value to packets",
2824 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2825 .next = NEXT(action_mark),
2828 [ACTION_MARK_ID] = {
2830 .help = "32 bit value to return with packets",
2831 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2832 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2833 .call = parse_vc_conf,
2837 .help = "flag packets",
2838 .priv = PRIV_ACTION(FLAG, 0),
2839 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2844 .help = "assign packets to a given queue index",
2845 .priv = PRIV_ACTION(QUEUE,
2846 sizeof(struct rte_flow_action_queue)),
2847 .next = NEXT(action_queue),
2850 [ACTION_QUEUE_INDEX] = {
2852 .help = "queue index to use",
2853 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2854 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2855 .call = parse_vc_conf,
2859 .help = "drop packets (note: passthru has priority)",
2860 .priv = PRIV_ACTION(DROP, 0),
2861 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2866 .help = "enable counters for this rule",
2867 .priv = PRIV_ACTION(COUNT,
2868 sizeof(struct rte_flow_action_count)),
2869 .next = NEXT(action_count),
2872 [ACTION_COUNT_ID] = {
2873 .name = "identifier",
2874 .help = "counter identifier to use",
2875 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2876 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2877 .call = parse_vc_conf,
2879 [ACTION_COUNT_SHARED] = {
2881 .help = "shared counter",
2882 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2883 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2885 .call = parse_vc_conf,
2889 .help = "spread packets among several queues",
2890 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2891 .next = NEXT(action_rss),
2892 .call = parse_vc_action_rss,
2894 [ACTION_RSS_FUNC] = {
2896 .help = "RSS hash function to apply",
2897 .next = NEXT(action_rss,
2898 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2899 ACTION_RSS_FUNC_TOEPLITZ,
2900 ACTION_RSS_FUNC_SIMPLE_XOR,
2901 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2903 [ACTION_RSS_FUNC_DEFAULT] = {
2905 .help = "default hash function",
2906 .call = parse_vc_action_rss_func,
2908 [ACTION_RSS_FUNC_TOEPLITZ] = {
2910 .help = "Toeplitz hash function",
2911 .call = parse_vc_action_rss_func,
2913 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2914 .name = "simple_xor",
2915 .help = "simple XOR hash function",
2916 .call = parse_vc_action_rss_func,
2918 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2919 .name = "symmetric_toeplitz",
2920 .help = "Symmetric Toeplitz hash function",
2921 .call = parse_vc_action_rss_func,
2923 [ACTION_RSS_LEVEL] = {
2925 .help = "encapsulation level for \"types\"",
2926 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2927 .args = ARGS(ARGS_ENTRY_ARB
2928 (offsetof(struct action_rss_data, conf) +
2929 offsetof(struct rte_flow_action_rss, level),
2930 sizeof(((struct rte_flow_action_rss *)0)->
2933 [ACTION_RSS_TYPES] = {
2935 .help = "specific RSS hash types",
2936 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2938 [ACTION_RSS_TYPE] = {
2940 .help = "RSS hash type",
2941 .call = parse_vc_action_rss_type,
2942 .comp = comp_vc_action_rss_type,
2944 [ACTION_RSS_KEY] = {
2946 .help = "RSS hash key",
2947 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2948 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2950 (offsetof(struct action_rss_data, conf) +
2951 offsetof(struct rte_flow_action_rss, key_len),
2952 sizeof(((struct rte_flow_action_rss *)0)->
2954 ARGS_ENTRY(struct action_rss_data, key)),
2956 [ACTION_RSS_KEY_LEN] = {
2958 .help = "RSS hash key length in bytes",
2959 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2960 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2961 (offsetof(struct action_rss_data, conf) +
2962 offsetof(struct rte_flow_action_rss, key_len),
2963 sizeof(((struct rte_flow_action_rss *)0)->
2966 RSS_HASH_KEY_LENGTH)),
2968 [ACTION_RSS_QUEUES] = {
2970 .help = "queue indices to use",
2971 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2972 .call = parse_vc_conf,
2974 [ACTION_RSS_QUEUE] = {
2976 .help = "queue index",
2977 .call = parse_vc_action_rss_queue,
2978 .comp = comp_vc_action_rss_queue,
2982 .help = "direct traffic to physical function",
2983 .priv = PRIV_ACTION(PF, 0),
2984 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2989 .help = "direct traffic to a virtual function ID",
2990 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2991 .next = NEXT(action_vf),
2994 [ACTION_VF_ORIGINAL] = {
2996 .help = "use original VF ID if possible",
2997 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2998 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
3000 .call = parse_vc_conf,
3005 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
3006 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
3007 .call = parse_vc_conf,
3009 [ACTION_PHY_PORT] = {
3011 .help = "direct packets to physical port index",
3012 .priv = PRIV_ACTION(PHY_PORT,
3013 sizeof(struct rte_flow_action_phy_port)),
3014 .next = NEXT(action_phy_port),
3017 [ACTION_PHY_PORT_ORIGINAL] = {
3019 .help = "use original port index if possible",
3020 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
3021 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
3023 .call = parse_vc_conf,
3025 [ACTION_PHY_PORT_INDEX] = {
3027 .help = "physical port index",
3028 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
3029 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
3031 .call = parse_vc_conf,
3033 [ACTION_PORT_ID] = {
3035 .help = "direct matching traffic to a given DPDK port ID",
3036 .priv = PRIV_ACTION(PORT_ID,
3037 sizeof(struct rte_flow_action_port_id)),
3038 .next = NEXT(action_port_id),
3041 [ACTION_PORT_ID_ORIGINAL] = {
3043 .help = "use original DPDK port ID if possible",
3044 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
3045 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
3047 .call = parse_vc_conf,
3049 [ACTION_PORT_ID_ID] = {
3051 .help = "DPDK port ID",
3052 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
3053 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
3054 .call = parse_vc_conf,
3058 .help = "meter the directed packets at given id",
3059 .priv = PRIV_ACTION(METER,
3060 sizeof(struct rte_flow_action_meter)),
3061 .next = NEXT(action_meter),
3064 [ACTION_METER_ID] = {
3066 .help = "meter id to use",
3067 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
3068 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
3069 .call = parse_vc_conf,
3071 [ACTION_OF_SET_MPLS_TTL] = {
3072 .name = "of_set_mpls_ttl",
3073 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
3076 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
3077 .next = NEXT(action_of_set_mpls_ttl),
3080 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
3083 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
3084 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
3086 .call = parse_vc_conf,
3088 [ACTION_OF_DEC_MPLS_TTL] = {
3089 .name = "of_dec_mpls_ttl",
3090 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
3091 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
3092 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3095 [ACTION_OF_SET_NW_TTL] = {
3096 .name = "of_set_nw_ttl",
3097 .help = "OpenFlow's OFPAT_SET_NW_TTL",
3100 sizeof(struct rte_flow_action_of_set_nw_ttl)),
3101 .next = NEXT(action_of_set_nw_ttl),
3104 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
3107 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
3108 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
3110 .call = parse_vc_conf,
3112 [ACTION_OF_DEC_NW_TTL] = {
3113 .name = "of_dec_nw_ttl",
3114 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
3115 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
3116 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3119 [ACTION_OF_COPY_TTL_OUT] = {
3120 .name = "of_copy_ttl_out",
3121 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
3122 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
3123 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3126 [ACTION_OF_COPY_TTL_IN] = {
3127 .name = "of_copy_ttl_in",
3128 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
3129 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
3130 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3133 [ACTION_OF_POP_VLAN] = {
3134 .name = "of_pop_vlan",
3135 .help = "OpenFlow's OFPAT_POP_VLAN",
3136 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
3137 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3140 [ACTION_OF_PUSH_VLAN] = {
3141 .name = "of_push_vlan",
3142 .help = "OpenFlow's OFPAT_PUSH_VLAN",
3145 sizeof(struct rte_flow_action_of_push_vlan)),
3146 .next = NEXT(action_of_push_vlan),
3149 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
3150 .name = "ethertype",
3151 .help = "EtherType",
3152 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
3153 .args = ARGS(ARGS_ENTRY_HTON
3154 (struct rte_flow_action_of_push_vlan,
3156 .call = parse_vc_conf,
3158 [ACTION_OF_SET_VLAN_VID] = {
3159 .name = "of_set_vlan_vid",
3160 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
3163 sizeof(struct rte_flow_action_of_set_vlan_vid)),
3164 .next = NEXT(action_of_set_vlan_vid),
3167 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
3170 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
3171 .args = ARGS(ARGS_ENTRY_HTON
3172 (struct rte_flow_action_of_set_vlan_vid,
3174 .call = parse_vc_conf,
3176 [ACTION_OF_SET_VLAN_PCP] = {
3177 .name = "of_set_vlan_pcp",
3178 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
3181 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
3182 .next = NEXT(action_of_set_vlan_pcp),
3185 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
3187 .help = "VLAN priority",
3188 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
3189 .args = ARGS(ARGS_ENTRY_HTON
3190 (struct rte_flow_action_of_set_vlan_pcp,
3192 .call = parse_vc_conf,
3194 [ACTION_OF_POP_MPLS] = {
3195 .name = "of_pop_mpls",
3196 .help = "OpenFlow's OFPAT_POP_MPLS",
3197 .priv = PRIV_ACTION(OF_POP_MPLS,
3198 sizeof(struct rte_flow_action_of_pop_mpls)),
3199 .next = NEXT(action_of_pop_mpls),
3202 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3203 .name = "ethertype",
3204 .help = "EtherType",
3205 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3206 .args = ARGS(ARGS_ENTRY_HTON
3207 (struct rte_flow_action_of_pop_mpls,
3209 .call = parse_vc_conf,
3211 [ACTION_OF_PUSH_MPLS] = {
3212 .name = "of_push_mpls",
3213 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3216 sizeof(struct rte_flow_action_of_push_mpls)),
3217 .next = NEXT(action_of_push_mpls),
3220 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3221 .name = "ethertype",
3222 .help = "EtherType",
3223 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3224 .args = ARGS(ARGS_ENTRY_HTON
3225 (struct rte_flow_action_of_push_mpls,
3227 .call = parse_vc_conf,
3229 [ACTION_VXLAN_ENCAP] = {
3230 .name = "vxlan_encap",
3231 .help = "VXLAN encapsulation, uses configuration set by \"set"
3233 .priv = PRIV_ACTION(VXLAN_ENCAP,
3234 sizeof(struct action_vxlan_encap_data)),
3235 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3236 .call = parse_vc_action_vxlan_encap,
3238 [ACTION_VXLAN_DECAP] = {
3239 .name = "vxlan_decap",
3240 .help = "Performs a decapsulation action by stripping all"
3241 " headers of the VXLAN tunnel network overlay from the"
3243 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3244 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3247 [ACTION_NVGRE_ENCAP] = {
3248 .name = "nvgre_encap",
3249 .help = "NVGRE encapsulation, uses configuration set by \"set"
3251 .priv = PRIV_ACTION(NVGRE_ENCAP,
3252 sizeof(struct action_nvgre_encap_data)),
3253 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3254 .call = parse_vc_action_nvgre_encap,
3256 [ACTION_NVGRE_DECAP] = {
3257 .name = "nvgre_decap",
3258 .help = "Performs a decapsulation action by stripping all"
3259 " headers of the NVGRE tunnel network overlay from the"
3261 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3262 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3265 [ACTION_L2_ENCAP] = {
3267 .help = "l2 encap, uses configuration set by"
3268 " \"set l2_encap\"",
3269 .priv = PRIV_ACTION(RAW_ENCAP,
3270 sizeof(struct action_raw_encap_data)),
3271 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3272 .call = parse_vc_action_l2_encap,
3274 [ACTION_L2_DECAP] = {
3276 .help = "l2 decap, uses configuration set by"
3277 " \"set l2_decap\"",
3278 .priv = PRIV_ACTION(RAW_DECAP,
3279 sizeof(struct action_raw_decap_data)),
3280 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3281 .call = parse_vc_action_l2_decap,
3283 [ACTION_MPLSOGRE_ENCAP] = {
3284 .name = "mplsogre_encap",
3285 .help = "mplsogre encapsulation, uses configuration set by"
3286 " \"set mplsogre_encap\"",
3287 .priv = PRIV_ACTION(RAW_ENCAP,
3288 sizeof(struct action_raw_encap_data)),
3289 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3290 .call = parse_vc_action_mplsogre_encap,
3292 [ACTION_MPLSOGRE_DECAP] = {
3293 .name = "mplsogre_decap",
3294 .help = "mplsogre decapsulation, uses configuration set by"
3295 " \"set mplsogre_decap\"",
3296 .priv = PRIV_ACTION(RAW_DECAP,
3297 sizeof(struct action_raw_decap_data)),
3298 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3299 .call = parse_vc_action_mplsogre_decap,
3301 [ACTION_MPLSOUDP_ENCAP] = {
3302 .name = "mplsoudp_encap",
3303 .help = "mplsoudp encapsulation, uses configuration set by"
3304 " \"set mplsoudp_encap\"",
3305 .priv = PRIV_ACTION(RAW_ENCAP,
3306 sizeof(struct action_raw_encap_data)),
3307 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3308 .call = parse_vc_action_mplsoudp_encap,
3310 [ACTION_MPLSOUDP_DECAP] = {
3311 .name = "mplsoudp_decap",
3312 .help = "mplsoudp decapsulation, uses configuration set by"
3313 " \"set mplsoudp_decap\"",
3314 .priv = PRIV_ACTION(RAW_DECAP,
3315 sizeof(struct action_raw_decap_data)),
3316 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3317 .call = parse_vc_action_mplsoudp_decap,
3319 [ACTION_SET_IPV4_SRC] = {
3320 .name = "set_ipv4_src",
3321 .help = "Set a new IPv4 source address in the outermost"
3323 .priv = PRIV_ACTION(SET_IPV4_SRC,
3324 sizeof(struct rte_flow_action_set_ipv4)),
3325 .next = NEXT(action_set_ipv4_src),
3328 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3329 .name = "ipv4_addr",
3330 .help = "new IPv4 source address to set",
3331 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3332 .args = ARGS(ARGS_ENTRY_HTON
3333 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3334 .call = parse_vc_conf,
3336 [ACTION_SET_IPV4_DST] = {
3337 .name = "set_ipv4_dst",
3338 .help = "Set a new IPv4 destination address in the outermost"
3340 .priv = PRIV_ACTION(SET_IPV4_DST,
3341 sizeof(struct rte_flow_action_set_ipv4)),
3342 .next = NEXT(action_set_ipv4_dst),
3345 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3346 .name = "ipv4_addr",
3347 .help = "new IPv4 destination address to set",
3348 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3349 .args = ARGS(ARGS_ENTRY_HTON
3350 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3351 .call = parse_vc_conf,
3353 [ACTION_SET_IPV6_SRC] = {
3354 .name = "set_ipv6_src",
3355 .help = "Set a new IPv6 source address in the outermost"
3357 .priv = PRIV_ACTION(SET_IPV6_SRC,
3358 sizeof(struct rte_flow_action_set_ipv6)),
3359 .next = NEXT(action_set_ipv6_src),
3362 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3363 .name = "ipv6_addr",
3364 .help = "new IPv6 source address to set",
3365 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3366 .args = ARGS(ARGS_ENTRY_HTON
3367 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3368 .call = parse_vc_conf,
3370 [ACTION_SET_IPV6_DST] = {
3371 .name = "set_ipv6_dst",
3372 .help = "Set a new IPv6 destination address in the outermost"
3374 .priv = PRIV_ACTION(SET_IPV6_DST,
3375 sizeof(struct rte_flow_action_set_ipv6)),
3376 .next = NEXT(action_set_ipv6_dst),
3379 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3380 .name = "ipv6_addr",
3381 .help = "new IPv6 destination address to set",
3382 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3383 .args = ARGS(ARGS_ENTRY_HTON
3384 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3385 .call = parse_vc_conf,
3387 [ACTION_SET_TP_SRC] = {
3388 .name = "set_tp_src",
3389 .help = "set a new source port number in the outermost"
3391 .priv = PRIV_ACTION(SET_TP_SRC,
3392 sizeof(struct rte_flow_action_set_tp)),
3393 .next = NEXT(action_set_tp_src),
3396 [ACTION_SET_TP_SRC_TP_SRC] = {
3398 .help = "new source port number to set",
3399 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3400 .args = ARGS(ARGS_ENTRY_HTON
3401 (struct rte_flow_action_set_tp, port)),
3402 .call = parse_vc_conf,
3404 [ACTION_SET_TP_DST] = {
3405 .name = "set_tp_dst",
3406 .help = "set a new destination port number in the outermost"
3408 .priv = PRIV_ACTION(SET_TP_DST,
3409 sizeof(struct rte_flow_action_set_tp)),
3410 .next = NEXT(action_set_tp_dst),
3413 [ACTION_SET_TP_DST_TP_DST] = {
3415 .help = "new destination port number to set",
3416 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3417 .args = ARGS(ARGS_ENTRY_HTON
3418 (struct rte_flow_action_set_tp, port)),
3419 .call = parse_vc_conf,
3421 [ACTION_MAC_SWAP] = {
3423 .help = "Swap the source and destination MAC addresses"
3424 " in the outermost Ethernet header",
3425 .priv = PRIV_ACTION(MAC_SWAP, 0),
3426 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3429 [ACTION_DEC_TTL] = {
3431 .help = "decrease network TTL if available",
3432 .priv = PRIV_ACTION(DEC_TTL, 0),
3433 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3436 [ACTION_SET_TTL] = {
3438 .help = "set ttl value",
3439 .priv = PRIV_ACTION(SET_TTL,
3440 sizeof(struct rte_flow_action_set_ttl)),
3441 .next = NEXT(action_set_ttl),
3444 [ACTION_SET_TTL_TTL] = {
3445 .name = "ttl_value",
3446 .help = "new ttl value to set",
3447 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3448 .args = ARGS(ARGS_ENTRY_HTON
3449 (struct rte_flow_action_set_ttl, ttl_value)),
3450 .call = parse_vc_conf,
3452 [ACTION_SET_MAC_SRC] = {
3453 .name = "set_mac_src",
3454 .help = "set source mac address",
3455 .priv = PRIV_ACTION(SET_MAC_SRC,
3456 sizeof(struct rte_flow_action_set_mac)),
3457 .next = NEXT(action_set_mac_src),
3460 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3462 .help = "new source mac address",
3463 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3464 .args = ARGS(ARGS_ENTRY_HTON
3465 (struct rte_flow_action_set_mac, mac_addr)),
3466 .call = parse_vc_conf,
3468 [ACTION_SET_MAC_DST] = {
3469 .name = "set_mac_dst",
3470 .help = "set destination mac address",
3471 .priv = PRIV_ACTION(SET_MAC_DST,
3472 sizeof(struct rte_flow_action_set_mac)),
3473 .next = NEXT(action_set_mac_dst),
3476 [ACTION_SET_MAC_DST_MAC_DST] = {
3478 .help = "new destination mac address to set",
3479 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3480 .args = ARGS(ARGS_ENTRY_HTON
3481 (struct rte_flow_action_set_mac, mac_addr)),
3482 .call = parse_vc_conf,
3484 [ACTION_INC_TCP_SEQ] = {
3485 .name = "inc_tcp_seq",
3486 .help = "increase TCP sequence number",
3487 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3488 .next = NEXT(action_inc_tcp_seq),
3491 [ACTION_INC_TCP_SEQ_VALUE] = {
3493 .help = "the value to increase TCP sequence number by",
3494 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3495 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3496 .call = parse_vc_conf,
3498 [ACTION_DEC_TCP_SEQ] = {
3499 .name = "dec_tcp_seq",
3500 .help = "decrease TCP sequence number",
3501 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3502 .next = NEXT(action_dec_tcp_seq),
3505 [ACTION_DEC_TCP_SEQ_VALUE] = {
3507 .help = "the value to decrease TCP sequence number by",
3508 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3509 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3510 .call = parse_vc_conf,
3512 [ACTION_INC_TCP_ACK] = {
3513 .name = "inc_tcp_ack",
3514 .help = "increase TCP acknowledgment number",
3515 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3516 .next = NEXT(action_inc_tcp_ack),
3519 [ACTION_INC_TCP_ACK_VALUE] = {
3521 .help = "the value to increase TCP acknowledgment number by",
3522 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3523 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3524 .call = parse_vc_conf,
3526 [ACTION_DEC_TCP_ACK] = {
3527 .name = "dec_tcp_ack",
3528 .help = "decrease TCP acknowledgment number",
3529 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3530 .next = NEXT(action_dec_tcp_ack),
3533 [ACTION_DEC_TCP_ACK_VALUE] = {
3535 .help = "the value to decrease TCP acknowledgment number by",
3536 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3537 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3538 .call = parse_vc_conf,
3540 [ACTION_RAW_ENCAP] = {
3541 .name = "raw_encap",
3542 .help = "encapsulation data, defined by set raw_encap",
3543 .priv = PRIV_ACTION(RAW_ENCAP,
3544 sizeof(struct action_raw_encap_data)),
3545 .next = NEXT(action_raw_encap),
3546 .call = parse_vc_action_raw_encap,
3548 [ACTION_RAW_ENCAP_INDEX] = {
3550 .help = "the index of raw_encap_confs",
3551 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
3553 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
3556 .help = "unsigned integer value",
3557 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3558 .call = parse_vc_action_raw_encap_index,
3559 .comp = comp_set_raw_index,
3561 [ACTION_RAW_DECAP] = {
3562 .name = "raw_decap",
3563 .help = "decapsulation data, defined by set raw_encap",
3564 .priv = PRIV_ACTION(RAW_DECAP,
3565 sizeof(struct action_raw_decap_data)),
3566 .next = NEXT(action_raw_decap),
3567 .call = parse_vc_action_raw_decap,
3569 [ACTION_RAW_DECAP_INDEX] = {
3571 .help = "the index of raw_encap_confs",
3572 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
3574 [ACTION_RAW_DECAP_INDEX_VALUE] = {
3577 .help = "unsigned integer value",
3578 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3579 .call = parse_vc_action_raw_decap_index,
3580 .comp = comp_set_raw_index,
3582 /* Top level command. */
3585 .help = "set raw encap/decap data",
3586 .type = "set raw_encap|raw_decap <index> <pattern>",
3587 .next = NEXT(NEXT_ENTRY
3590 .call = parse_set_init,
3592 /* Sub-level commands. */
3594 .name = "raw_encap",
3595 .help = "set raw encap data",
3596 .next = NEXT(next_set_raw),
3597 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3598 (offsetof(struct buffer, port),
3599 sizeof(((struct buffer *)0)->port),
3600 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3601 .call = parse_set_raw_encap_decap,
3604 .name = "raw_decap",
3605 .help = "set raw decap data",
3606 .next = NEXT(next_set_raw),
3607 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3608 (offsetof(struct buffer, port),
3609 sizeof(((struct buffer *)0)->port),
3610 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3611 .call = parse_set_raw_encap_decap,
3616 .help = "index of raw_encap/raw_decap data",
3617 .next = NEXT(next_item),
3620 [ACTION_SET_TAG] = {
3623 .priv = PRIV_ACTION(SET_TAG,
3624 sizeof(struct rte_flow_action_set_tag)),
3625 .next = NEXT(action_set_tag),
3628 [ACTION_SET_TAG_INDEX] = {
3630 .help = "index of tag array",
3631 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3632 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
3633 .call = parse_vc_conf,
3635 [ACTION_SET_TAG_DATA] = {
3637 .help = "tag value",
3638 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3639 .args = ARGS(ARGS_ENTRY
3640 (struct rte_flow_action_set_tag, data)),
3641 .call = parse_vc_conf,
3643 [ACTION_SET_TAG_MASK] = {
3645 .help = "mask for tag value",
3646 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3647 .args = ARGS(ARGS_ENTRY
3648 (struct rte_flow_action_set_tag, mask)),
3649 .call = parse_vc_conf,
3651 [ACTION_SET_META] = {
3653 .help = "set metadata",
3654 .priv = PRIV_ACTION(SET_META,
3655 sizeof(struct rte_flow_action_set_meta)),
3656 .next = NEXT(action_set_meta),
3657 .call = parse_vc_action_set_meta,
3659 [ACTION_SET_META_DATA] = {
3661 .help = "metadata value",
3662 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
3663 .args = ARGS(ARGS_ENTRY
3664 (struct rte_flow_action_set_meta, data)),
3665 .call = parse_vc_conf,
3667 [ACTION_SET_META_MASK] = {
3669 .help = "mask for metadata value",
3670 .next = NEXT(action_set_meta, NEXT_ENTRY(UNSIGNED)),
3671 .args = ARGS(ARGS_ENTRY
3672 (struct rte_flow_action_set_meta, mask)),
3673 .call = parse_vc_conf,
3675 [ACTION_SET_IPV4_DSCP] = {
3676 .name = "set_ipv4_dscp",
3677 .help = "set DSCP value",
3678 .priv = PRIV_ACTION(SET_IPV4_DSCP,
3679 sizeof(struct rte_flow_action_set_dscp)),
3680 .next = NEXT(action_set_ipv4_dscp),
3683 [ACTION_SET_IPV4_DSCP_VALUE] = {
3684 .name = "dscp_value",
3685 .help = "new IPv4 DSCP value to set",
3686 .next = NEXT(action_set_ipv4_dscp, NEXT_ENTRY(UNSIGNED)),
3687 .args = ARGS(ARGS_ENTRY
3688 (struct rte_flow_action_set_dscp, dscp)),
3689 .call = parse_vc_conf,
3691 [ACTION_SET_IPV6_DSCP] = {
3692 .name = "set_ipv6_dscp",
3693 .help = "set DSCP value",
3694 .priv = PRIV_ACTION(SET_IPV6_DSCP,
3695 sizeof(struct rte_flow_action_set_dscp)),
3696 .next = NEXT(action_set_ipv6_dscp),
3699 [ACTION_SET_IPV6_DSCP_VALUE] = {
3700 .name = "dscp_value",
3701 .help = "new IPv6 DSCP value to set",
3702 .next = NEXT(action_set_ipv6_dscp, NEXT_ENTRY(UNSIGNED)),
3703 .args = ARGS(ARGS_ENTRY
3704 (struct rte_flow_action_set_dscp, dscp)),
3705 .call = parse_vc_conf,
3709 .help = "set a specific metadata header",
3710 .next = NEXT(action_age),
3711 .priv = PRIV_ACTION(AGE,
3712 sizeof(struct rte_flow_action_age)),
3715 [ACTION_AGE_TIMEOUT] = {
3717 .help = "flow age timeout value",
3718 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_age,
3720 .next = NEXT(action_age, NEXT_ENTRY(UNSIGNED)),
3721 .call = parse_vc_conf,
3725 /** Remove and return last entry from argument stack. */
3726 static const struct arg *
3727 pop_args(struct context *ctx)
3729 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3732 /** Add entry on top of the argument stack. */
3734 push_args(struct context *ctx, const struct arg *arg)
3736 if (ctx->args_num == CTX_STACK_SIZE)
3738 ctx->args[ctx->args_num++] = arg;
3742 /** Spread value into buffer according to bit-mask. */
3744 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3746 uint32_t i = arg->size;
3754 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3763 unsigned int shift = 0;
3764 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3766 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3767 if (!(arg->mask[i] & (1 << shift)))
3772 *buf &= ~(1 << shift);
3773 *buf |= (val & 1) << shift;
3781 /** Compare a string with a partial one of a given length. */
3783 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3785 int r = strncmp(full, partial, partial_len);
3789 if (strlen(full) <= partial_len)
3791 return full[partial_len];
3795 * Parse a prefix length and generate a bit-mask.
3797 * Last argument (ctx->args) is retrieved to determine mask size, storage
3798 * location and whether the result must use network byte ordering.
3801 parse_prefix(struct context *ctx, const struct token *token,
3802 const char *str, unsigned int len,
3803 void *buf, unsigned int size)
3805 const struct arg *arg = pop_args(ctx);
3806 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3813 /* Argument is expected. */
3817 u = strtoumax(str, &end, 0);
3818 if (errno || (size_t)(end - str) != len)
3823 extra = arg_entry_bf_fill(NULL, 0, arg);
3832 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3833 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3840 if (bytes > size || bytes + !!extra > size)
3844 buf = (uint8_t *)ctx->object + arg->offset;
3845 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3847 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3848 memset(buf, 0x00, size - bytes);
3850 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3854 memset(buf, 0xff, bytes);
3855 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3857 ((uint8_t *)buf)[bytes] = conv[extra];
3860 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3863 push_args(ctx, arg);
3867 /** Default parsing function for token name matching. */
3869 parse_default(struct context *ctx, const struct token *token,
3870 const char *str, unsigned int len,
3871 void *buf, unsigned int size)
3876 if (strcmp_partial(token->name, str, len))
3881 /** Parse flow command, initialize output buffer for subsequent tokens. */
3883 parse_init(struct context *ctx, const struct token *token,
3884 const char *str, unsigned int len,
3885 void *buf, unsigned int size)
3887 struct buffer *out = buf;
3889 /* Token name must match. */
3890 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3892 /* Nothing else to do if there is no buffer. */
3895 /* Make sure buffer is large enough. */
3896 if (size < sizeof(*out))
3898 /* Initialize buffer. */
3899 memset(out, 0x00, sizeof(*out));
3900 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3903 ctx->objmask = NULL;
3907 /** Parse tokens for validate/create commands. */
3909 parse_vc(struct context *ctx, const struct token *token,
3910 const char *str, unsigned int len,
3911 void *buf, unsigned int size)
3913 struct buffer *out = buf;
3917 /* Token name must match. */
3918 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3920 /* Nothing else to do if there is no buffer. */
3923 if (!out->command) {
3924 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3926 if (sizeof(*out) > size)
3928 out->command = ctx->curr;
3931 ctx->objmask = NULL;
3932 out->args.vc.data = (uint8_t *)out + size;
3936 ctx->object = &out->args.vc.attr;
3937 ctx->objmask = NULL;
3938 switch (ctx->curr) {
3943 out->args.vc.attr.ingress = 1;
3946 out->args.vc.attr.egress = 1;
3949 out->args.vc.attr.transfer = 1;
3952 out->args.vc.pattern =
3953 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3955 ctx->object = out->args.vc.pattern;
3956 ctx->objmask = NULL;
3959 out->args.vc.actions =
3960 (void *)RTE_ALIGN_CEIL((uintptr_t)
3961 (out->args.vc.pattern +
3962 out->args.vc.pattern_n),
3964 ctx->object = out->args.vc.actions;
3965 ctx->objmask = NULL;
3972 if (!out->args.vc.actions) {
3973 const struct parse_item_priv *priv = token->priv;
3974 struct rte_flow_item *item =
3975 out->args.vc.pattern + out->args.vc.pattern_n;
3977 data_size = priv->size * 3; /* spec, last, mask */
3978 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3979 (out->args.vc.data - data_size),
3981 if ((uint8_t *)item + sizeof(*item) > data)
3983 *item = (struct rte_flow_item){
3986 ++out->args.vc.pattern_n;
3988 ctx->objmask = NULL;
3990 const struct parse_action_priv *priv = token->priv;
3991 struct rte_flow_action *action =
3992 out->args.vc.actions + out->args.vc.actions_n;
3994 data_size = priv->size; /* configuration */
3995 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3996 (out->args.vc.data - data_size),
3998 if ((uint8_t *)action + sizeof(*action) > data)
4000 *action = (struct rte_flow_action){
4002 .conf = data_size ? data : NULL,
4004 ++out->args.vc.actions_n;
4005 ctx->object = action;
4006 ctx->objmask = NULL;
4008 memset(data, 0, data_size);
4009 out->args.vc.data = data;
4010 ctx->objdata = data_size;
4014 /** Parse pattern item parameter type. */
4016 parse_vc_spec(struct context *ctx, const struct token *token,
4017 const char *str, unsigned int len,
4018 void *buf, unsigned int size)
4020 struct buffer *out = buf;
4021 struct rte_flow_item *item;
4027 /* Token name must match. */
4028 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4030 /* Parse parameter types. */
4031 switch (ctx->curr) {
4032 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
4038 case ITEM_PARAM_SPEC:
4041 case ITEM_PARAM_LAST:
4044 case ITEM_PARAM_PREFIX:
4045 /* Modify next token to expect a prefix. */
4046 if (ctx->next_num < 2)
4048 ctx->next[ctx->next_num - 2] = prefix;
4050 case ITEM_PARAM_MASK:
4056 /* Nothing else to do if there is no buffer. */
4059 if (!out->args.vc.pattern_n)
4061 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
4062 data_size = ctx->objdata / 3; /* spec, last, mask */
4063 /* Point to selected object. */
4064 ctx->object = out->args.vc.data + (data_size * index);
4066 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
4067 item->mask = ctx->objmask;
4069 ctx->objmask = NULL;
4070 /* Update relevant item pointer. */
4071 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
4076 /** Parse action configuration field. */
4078 parse_vc_conf(struct context *ctx, const struct token *token,
4079 const char *str, unsigned int len,
4080 void *buf, unsigned int size)
4082 struct buffer *out = buf;
4085 /* Token name must match. */
4086 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4088 /* Nothing else to do if there is no buffer. */
4091 /* Point to selected object. */
4092 ctx->object = out->args.vc.data;
4093 ctx->objmask = NULL;
4097 /** Parse RSS action. */
4099 parse_vc_action_rss(struct context *ctx, const struct token *token,
4100 const char *str, unsigned int len,
4101 void *buf, unsigned int size)
4103 struct buffer *out = buf;
4104 struct rte_flow_action *action;
4105 struct action_rss_data *action_rss_data;
4109 ret = parse_vc(ctx, token, str, len, buf, size);
4112 /* Nothing else to do if there is no buffer. */
4115 if (!out->args.vc.actions_n)
4117 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4118 /* Point to selected object. */
4119 ctx->object = out->args.vc.data;
4120 ctx->objmask = NULL;
4121 /* Set up default configuration. */
4122 action_rss_data = ctx->object;
4123 *action_rss_data = (struct action_rss_data){
4124 .conf = (struct rte_flow_action_rss){
4125 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4128 .key_len = sizeof(action_rss_data->key),
4129 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
4130 .key = action_rss_data->key,
4131 .queue = action_rss_data->queue,
4133 .key = "testpmd's default RSS hash key, "
4134 "override it for better balancing",
4137 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
4138 action_rss_data->queue[i] = i;
4139 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
4140 ctx->port != (portid_t)RTE_PORT_ALL) {
4141 struct rte_eth_dev_info info;
4144 ret2 = rte_eth_dev_info_get(ctx->port, &info);
4148 action_rss_data->conf.key_len =
4149 RTE_MIN(sizeof(action_rss_data->key),
4150 info.hash_key_size);
4152 action->conf = &action_rss_data->conf;
4157 * Parse func field for RSS action.
4159 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
4160 * ACTION_RSS_FUNC_* index that called this function.
4163 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
4164 const char *str, unsigned int len,
4165 void *buf, unsigned int size)
4167 struct action_rss_data *action_rss_data;
4168 enum rte_eth_hash_function func;
4172 /* Token name must match. */
4173 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4175 switch (ctx->curr) {
4176 case ACTION_RSS_FUNC_DEFAULT:
4177 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
4179 case ACTION_RSS_FUNC_TOEPLITZ:
4180 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
4182 case ACTION_RSS_FUNC_SIMPLE_XOR:
4183 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
4185 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
4186 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
4193 action_rss_data = ctx->object;
4194 action_rss_data->conf.func = func;
4199 * Parse type field for RSS action.
4201 * Valid tokens are type field names and the "end" token.
4204 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
4205 const char *str, unsigned int len,
4206 void *buf, unsigned int size)
4208 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
4209 struct action_rss_data *action_rss_data;
4215 if (ctx->curr != ACTION_RSS_TYPE)
4217 if (!(ctx->objdata >> 16) && ctx->object) {
4218 action_rss_data = ctx->object;
4219 action_rss_data->conf.types = 0;
4221 if (!strcmp_partial("end", str, len)) {
4222 ctx->objdata &= 0xffff;
4225 for (i = 0; rss_type_table[i].str; ++i)
4226 if (!strcmp_partial(rss_type_table[i].str, str, len))
4228 if (!rss_type_table[i].str)
4230 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
4232 if (ctx->next_num == RTE_DIM(ctx->next))
4234 ctx->next[ctx->next_num++] = next;
4237 action_rss_data = ctx->object;
4238 action_rss_data->conf.types |= rss_type_table[i].rss_type;
4243 * Parse queue field for RSS action.
4245 * Valid tokens are queue indices and the "end" token.
4248 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
4249 const char *str, unsigned int len,
4250 void *buf, unsigned int size)
4252 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
4253 struct action_rss_data *action_rss_data;
4254 const struct arg *arg;
4261 if (ctx->curr != ACTION_RSS_QUEUE)
4263 i = ctx->objdata >> 16;
4264 if (!strcmp_partial("end", str, len)) {
4265 ctx->objdata &= 0xffff;
4268 if (i >= ACTION_RSS_QUEUE_NUM)
4270 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4271 i * sizeof(action_rss_data->queue[i]),
4272 sizeof(action_rss_data->queue[i]));
4273 if (push_args(ctx, arg))
4275 ret = parse_int(ctx, token, str, len, NULL, 0);
4281 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
4283 if (ctx->next_num == RTE_DIM(ctx->next))
4285 ctx->next[ctx->next_num++] = next;
4289 action_rss_data = ctx->object;
4290 action_rss_data->conf.queue_num = i;
4291 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
4295 /** Parse VXLAN encap action. */
4297 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
4298 const char *str, unsigned int len,
4299 void *buf, unsigned int size)
4301 struct buffer *out = buf;
4302 struct rte_flow_action *action;
4303 struct action_vxlan_encap_data *action_vxlan_encap_data;
4306 ret = parse_vc(ctx, token, str, len, buf, size);
4309 /* Nothing else to do if there is no buffer. */
4312 if (!out->args.vc.actions_n)
4314 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4315 /* Point to selected object. */
4316 ctx->object = out->args.vc.data;
4317 ctx->objmask = NULL;
4318 /* Set up default configuration. */
4319 action_vxlan_encap_data = ctx->object;
4320 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
4321 .conf = (struct rte_flow_action_vxlan_encap){
4322 .definition = action_vxlan_encap_data->items,
4326 .type = RTE_FLOW_ITEM_TYPE_ETH,
4327 .spec = &action_vxlan_encap_data->item_eth,
4328 .mask = &rte_flow_item_eth_mask,
4331 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4332 .spec = &action_vxlan_encap_data->item_vlan,
4333 .mask = &rte_flow_item_vlan_mask,
4336 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4337 .spec = &action_vxlan_encap_data->item_ipv4,
4338 .mask = &rte_flow_item_ipv4_mask,
4341 .type = RTE_FLOW_ITEM_TYPE_UDP,
4342 .spec = &action_vxlan_encap_data->item_udp,
4343 .mask = &rte_flow_item_udp_mask,
4346 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
4347 .spec = &action_vxlan_encap_data->item_vxlan,
4348 .mask = &rte_flow_item_vxlan_mask,
4351 .type = RTE_FLOW_ITEM_TYPE_END,
4356 .tci = vxlan_encap_conf.vlan_tci,
4360 .src_addr = vxlan_encap_conf.ipv4_src,
4361 .dst_addr = vxlan_encap_conf.ipv4_dst,
4364 .src_port = vxlan_encap_conf.udp_src,
4365 .dst_port = vxlan_encap_conf.udp_dst,
4367 .item_vxlan.flags = 0,
4369 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
4370 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4371 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
4372 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4373 if (!vxlan_encap_conf.select_ipv4) {
4374 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
4375 &vxlan_encap_conf.ipv6_src,
4376 sizeof(vxlan_encap_conf.ipv6_src));
4377 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
4378 &vxlan_encap_conf.ipv6_dst,
4379 sizeof(vxlan_encap_conf.ipv6_dst));
4380 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
4381 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4382 .spec = &action_vxlan_encap_data->item_ipv6,
4383 .mask = &rte_flow_item_ipv6_mask,
4386 if (!vxlan_encap_conf.select_vlan)
4387 action_vxlan_encap_data->items[1].type =
4388 RTE_FLOW_ITEM_TYPE_VOID;
4389 if (vxlan_encap_conf.select_tos_ttl) {
4390 if (vxlan_encap_conf.select_ipv4) {
4391 static struct rte_flow_item_ipv4 ipv4_mask_tos;
4393 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
4394 sizeof(ipv4_mask_tos));
4395 ipv4_mask_tos.hdr.type_of_service = 0xff;
4396 ipv4_mask_tos.hdr.time_to_live = 0xff;
4397 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
4398 vxlan_encap_conf.ip_tos;
4399 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
4400 vxlan_encap_conf.ip_ttl;
4401 action_vxlan_encap_data->items[2].mask =
4404 static struct rte_flow_item_ipv6 ipv6_mask_tos;
4406 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
4407 sizeof(ipv6_mask_tos));
4408 ipv6_mask_tos.hdr.vtc_flow |=
4409 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
4410 ipv6_mask_tos.hdr.hop_limits = 0xff;
4411 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
4413 ((uint32_t)vxlan_encap_conf.ip_tos <<
4414 RTE_IPV6_HDR_TC_SHIFT);
4415 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
4416 vxlan_encap_conf.ip_ttl;
4417 action_vxlan_encap_data->items[2].mask =
4421 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
4422 RTE_DIM(vxlan_encap_conf.vni));
4423 action->conf = &action_vxlan_encap_data->conf;
4427 /** Parse NVGRE encap action. */
4429 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
4430 const char *str, unsigned int len,
4431 void *buf, unsigned int size)
4433 struct buffer *out = buf;
4434 struct rte_flow_action *action;
4435 struct action_nvgre_encap_data *action_nvgre_encap_data;
4438 ret = parse_vc(ctx, token, str, len, buf, size);
4441 /* Nothing else to do if there is no buffer. */
4444 if (!out->args.vc.actions_n)
4446 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4447 /* Point to selected object. */
4448 ctx->object = out->args.vc.data;
4449 ctx->objmask = NULL;
4450 /* Set up default configuration. */
4451 action_nvgre_encap_data = ctx->object;
4452 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
4453 .conf = (struct rte_flow_action_nvgre_encap){
4454 .definition = action_nvgre_encap_data->items,
4458 .type = RTE_FLOW_ITEM_TYPE_ETH,
4459 .spec = &action_nvgre_encap_data->item_eth,
4460 .mask = &rte_flow_item_eth_mask,
4463 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4464 .spec = &action_nvgre_encap_data->item_vlan,
4465 .mask = &rte_flow_item_vlan_mask,
4468 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4469 .spec = &action_nvgre_encap_data->item_ipv4,
4470 .mask = &rte_flow_item_ipv4_mask,
4473 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4474 .spec = &action_nvgre_encap_data->item_nvgre,
4475 .mask = &rte_flow_item_nvgre_mask,
4478 .type = RTE_FLOW_ITEM_TYPE_END,
4483 .tci = nvgre_encap_conf.vlan_tci,
4487 .src_addr = nvgre_encap_conf.ipv4_src,
4488 .dst_addr = nvgre_encap_conf.ipv4_dst,
4490 .item_nvgre.flow_id = 0,
4492 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4493 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4494 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4495 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4496 if (!nvgre_encap_conf.select_ipv4) {
4497 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4498 &nvgre_encap_conf.ipv6_src,
4499 sizeof(nvgre_encap_conf.ipv6_src));
4500 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4501 &nvgre_encap_conf.ipv6_dst,
4502 sizeof(nvgre_encap_conf.ipv6_dst));
4503 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4504 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4505 .spec = &action_nvgre_encap_data->item_ipv6,
4506 .mask = &rte_flow_item_ipv6_mask,
4509 if (!nvgre_encap_conf.select_vlan)
4510 action_nvgre_encap_data->items[1].type =
4511 RTE_FLOW_ITEM_TYPE_VOID;
4512 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4513 RTE_DIM(nvgre_encap_conf.tni));
4514 action->conf = &action_nvgre_encap_data->conf;
4518 /** Parse l2 encap action. */
4520 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4521 const char *str, unsigned int len,
4522 void *buf, unsigned int size)
4524 struct buffer *out = buf;
4525 struct rte_flow_action *action;
4526 struct action_raw_encap_data *action_encap_data;
4527 struct rte_flow_item_eth eth = { .type = 0, };
4528 struct rte_flow_item_vlan vlan = {
4529 .tci = mplsoudp_encap_conf.vlan_tci,
4535 ret = parse_vc(ctx, token, str, len, buf, size);
4538 /* Nothing else to do if there is no buffer. */
4541 if (!out->args.vc.actions_n)
4543 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4544 /* Point to selected object. */
4545 ctx->object = out->args.vc.data;
4546 ctx->objmask = NULL;
4547 /* Copy the headers to the buffer. */
4548 action_encap_data = ctx->object;
4549 *action_encap_data = (struct action_raw_encap_data) {
4550 .conf = (struct rte_flow_action_raw_encap){
4551 .data = action_encap_data->data,
4555 header = action_encap_data->data;
4556 if (l2_encap_conf.select_vlan)
4557 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4558 else if (l2_encap_conf.select_ipv4)
4559 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4561 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4562 memcpy(eth.dst.addr_bytes,
4563 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4564 memcpy(eth.src.addr_bytes,
4565 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4566 memcpy(header, ð, sizeof(eth));
4567 header += sizeof(eth);
4568 if (l2_encap_conf.select_vlan) {
4569 if (l2_encap_conf.select_ipv4)
4570 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4572 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4573 memcpy(header, &vlan, sizeof(vlan));
4574 header += sizeof(vlan);
4576 action_encap_data->conf.size = header -
4577 action_encap_data->data;
4578 action->conf = &action_encap_data->conf;
4582 /** Parse l2 decap action. */
4584 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4585 const char *str, unsigned int len,
4586 void *buf, unsigned int size)
4588 struct buffer *out = buf;
4589 struct rte_flow_action *action;
4590 struct action_raw_decap_data *action_decap_data;
4591 struct rte_flow_item_eth eth = { .type = 0, };
4592 struct rte_flow_item_vlan vlan = {
4593 .tci = mplsoudp_encap_conf.vlan_tci,
4599 ret = parse_vc(ctx, token, str, len, buf, size);
4602 /* Nothing else to do if there is no buffer. */
4605 if (!out->args.vc.actions_n)
4607 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4608 /* Point to selected object. */
4609 ctx->object = out->args.vc.data;
4610 ctx->objmask = NULL;
4611 /* Copy the headers to the buffer. */
4612 action_decap_data = ctx->object;
4613 *action_decap_data = (struct action_raw_decap_data) {
4614 .conf = (struct rte_flow_action_raw_decap){
4615 .data = action_decap_data->data,
4619 header = action_decap_data->data;
4620 if (l2_decap_conf.select_vlan)
4621 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4622 memcpy(header, ð, sizeof(eth));
4623 header += sizeof(eth);
4624 if (l2_decap_conf.select_vlan) {
4625 memcpy(header, &vlan, sizeof(vlan));
4626 header += sizeof(vlan);
4628 action_decap_data->conf.size = header -
4629 action_decap_data->data;
4630 action->conf = &action_decap_data->conf;
4634 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4636 /** Parse MPLSOGRE encap action. */
4638 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4639 const char *str, unsigned int len,
4640 void *buf, unsigned int size)
4642 struct buffer *out = buf;
4643 struct rte_flow_action *action;
4644 struct action_raw_encap_data *action_encap_data;
4645 struct rte_flow_item_eth eth = { .type = 0, };
4646 struct rte_flow_item_vlan vlan = {
4647 .tci = mplsogre_encap_conf.vlan_tci,
4650 struct rte_flow_item_ipv4 ipv4 = {
4652 .src_addr = mplsogre_encap_conf.ipv4_src,
4653 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4654 .next_proto_id = IPPROTO_GRE,
4655 .version_ihl = RTE_IPV4_VHL_DEF,
4656 .time_to_live = IPDEFTTL,
4659 struct rte_flow_item_ipv6 ipv6 = {
4661 .proto = IPPROTO_GRE,
4662 .hop_limits = IPDEFTTL,
4665 struct rte_flow_item_gre gre = {
4666 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4668 struct rte_flow_item_mpls mpls = {
4674 ret = parse_vc(ctx, token, str, len, buf, size);
4677 /* Nothing else to do if there is no buffer. */
4680 if (!out->args.vc.actions_n)
4682 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4683 /* Point to selected object. */
4684 ctx->object = out->args.vc.data;
4685 ctx->objmask = NULL;
4686 /* Copy the headers to the buffer. */
4687 action_encap_data = ctx->object;
4688 *action_encap_data = (struct action_raw_encap_data) {
4689 .conf = (struct rte_flow_action_raw_encap){
4690 .data = action_encap_data->data,
4695 header = action_encap_data->data;
4696 if (mplsogre_encap_conf.select_vlan)
4697 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4698 else if (mplsogre_encap_conf.select_ipv4)
4699 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4701 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4702 memcpy(eth.dst.addr_bytes,
4703 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4704 memcpy(eth.src.addr_bytes,
4705 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4706 memcpy(header, ð, sizeof(eth));
4707 header += sizeof(eth);
4708 if (mplsogre_encap_conf.select_vlan) {
4709 if (mplsogre_encap_conf.select_ipv4)
4710 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4712 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4713 memcpy(header, &vlan, sizeof(vlan));
4714 header += sizeof(vlan);
4716 if (mplsogre_encap_conf.select_ipv4) {
4717 memcpy(header, &ipv4, sizeof(ipv4));
4718 header += sizeof(ipv4);
4720 memcpy(&ipv6.hdr.src_addr,
4721 &mplsogre_encap_conf.ipv6_src,
4722 sizeof(mplsogre_encap_conf.ipv6_src));
4723 memcpy(&ipv6.hdr.dst_addr,
4724 &mplsogre_encap_conf.ipv6_dst,
4725 sizeof(mplsogre_encap_conf.ipv6_dst));
4726 memcpy(header, &ipv6, sizeof(ipv6));
4727 header += sizeof(ipv6);
4729 memcpy(header, &gre, sizeof(gre));
4730 header += sizeof(gre);
4731 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4732 RTE_DIM(mplsogre_encap_conf.label));
4733 mpls.label_tc_s[2] |= 0x1;
4734 memcpy(header, &mpls, sizeof(mpls));
4735 header += sizeof(mpls);
4736 action_encap_data->conf.size = header -
4737 action_encap_data->data;
4738 action->conf = &action_encap_data->conf;
4742 /** Parse MPLSOGRE decap action. */
4744 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4745 const char *str, unsigned int len,
4746 void *buf, unsigned int size)
4748 struct buffer *out = buf;
4749 struct rte_flow_action *action;
4750 struct action_raw_decap_data *action_decap_data;
4751 struct rte_flow_item_eth eth = { .type = 0, };
4752 struct rte_flow_item_vlan vlan = {.tci = 0};
4753 struct rte_flow_item_ipv4 ipv4 = {
4755 .next_proto_id = IPPROTO_GRE,
4758 struct rte_flow_item_ipv6 ipv6 = {
4760 .proto = IPPROTO_GRE,
4763 struct rte_flow_item_gre gre = {
4764 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4766 struct rte_flow_item_mpls mpls;
4770 ret = parse_vc(ctx, token, str, len, buf, size);
4773 /* Nothing else to do if there is no buffer. */
4776 if (!out->args.vc.actions_n)
4778 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4779 /* Point to selected object. */
4780 ctx->object = out->args.vc.data;
4781 ctx->objmask = NULL;
4782 /* Copy the headers to the buffer. */
4783 action_decap_data = ctx->object;
4784 *action_decap_data = (struct action_raw_decap_data) {
4785 .conf = (struct rte_flow_action_raw_decap){
4786 .data = action_decap_data->data,
4790 header = action_decap_data->data;
4791 if (mplsogre_decap_conf.select_vlan)
4792 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4793 else if (mplsogre_encap_conf.select_ipv4)
4794 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4796 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4797 memcpy(eth.dst.addr_bytes,
4798 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4799 memcpy(eth.src.addr_bytes,
4800 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4801 memcpy(header, ð, sizeof(eth));
4802 header += sizeof(eth);
4803 if (mplsogre_encap_conf.select_vlan) {
4804 if (mplsogre_encap_conf.select_ipv4)
4805 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4807 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4808 memcpy(header, &vlan, sizeof(vlan));
4809 header += sizeof(vlan);
4811 if (mplsogre_encap_conf.select_ipv4) {
4812 memcpy(header, &ipv4, sizeof(ipv4));
4813 header += sizeof(ipv4);
4815 memcpy(header, &ipv6, sizeof(ipv6));
4816 header += sizeof(ipv6);
4818 memcpy(header, &gre, sizeof(gre));
4819 header += sizeof(gre);
4820 memset(&mpls, 0, sizeof(mpls));
4821 memcpy(header, &mpls, sizeof(mpls));
4822 header += sizeof(mpls);
4823 action_decap_data->conf.size = header -
4824 action_decap_data->data;
4825 action->conf = &action_decap_data->conf;
4829 /** Parse MPLSOUDP encap action. */
4831 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4832 const char *str, unsigned int len,
4833 void *buf, unsigned int size)
4835 struct buffer *out = buf;
4836 struct rte_flow_action *action;
4837 struct action_raw_encap_data *action_encap_data;
4838 struct rte_flow_item_eth eth = { .type = 0, };
4839 struct rte_flow_item_vlan vlan = {
4840 .tci = mplsoudp_encap_conf.vlan_tci,
4843 struct rte_flow_item_ipv4 ipv4 = {
4845 .src_addr = mplsoudp_encap_conf.ipv4_src,
4846 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4847 .next_proto_id = IPPROTO_UDP,
4848 .version_ihl = RTE_IPV4_VHL_DEF,
4849 .time_to_live = IPDEFTTL,
4852 struct rte_flow_item_ipv6 ipv6 = {
4854 .proto = IPPROTO_UDP,
4855 .hop_limits = IPDEFTTL,
4858 struct rte_flow_item_udp udp = {
4860 .src_port = mplsoudp_encap_conf.udp_src,
4861 .dst_port = mplsoudp_encap_conf.udp_dst,
4864 struct rte_flow_item_mpls mpls;
4868 ret = parse_vc(ctx, token, str, len, buf, size);
4871 /* Nothing else to do if there is no buffer. */
4874 if (!out->args.vc.actions_n)
4876 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4877 /* Point to selected object. */
4878 ctx->object = out->args.vc.data;
4879 ctx->objmask = NULL;
4880 /* Copy the headers to the buffer. */
4881 action_encap_data = ctx->object;
4882 *action_encap_data = (struct action_raw_encap_data) {
4883 .conf = (struct rte_flow_action_raw_encap){
4884 .data = action_encap_data->data,
4889 header = action_encap_data->data;
4890 if (mplsoudp_encap_conf.select_vlan)
4891 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4892 else if (mplsoudp_encap_conf.select_ipv4)
4893 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4895 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4896 memcpy(eth.dst.addr_bytes,
4897 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4898 memcpy(eth.src.addr_bytes,
4899 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4900 memcpy(header, ð, sizeof(eth));
4901 header += sizeof(eth);
4902 if (mplsoudp_encap_conf.select_vlan) {
4903 if (mplsoudp_encap_conf.select_ipv4)
4904 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4906 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4907 memcpy(header, &vlan, sizeof(vlan));
4908 header += sizeof(vlan);
4910 if (mplsoudp_encap_conf.select_ipv4) {
4911 memcpy(header, &ipv4, sizeof(ipv4));
4912 header += sizeof(ipv4);
4914 memcpy(&ipv6.hdr.src_addr,
4915 &mplsoudp_encap_conf.ipv6_src,
4916 sizeof(mplsoudp_encap_conf.ipv6_src));
4917 memcpy(&ipv6.hdr.dst_addr,
4918 &mplsoudp_encap_conf.ipv6_dst,
4919 sizeof(mplsoudp_encap_conf.ipv6_dst));
4920 memcpy(header, &ipv6, sizeof(ipv6));
4921 header += sizeof(ipv6);
4923 memcpy(header, &udp, sizeof(udp));
4924 header += sizeof(udp);
4925 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4926 RTE_DIM(mplsoudp_encap_conf.label));
4927 mpls.label_tc_s[2] |= 0x1;
4928 memcpy(header, &mpls, sizeof(mpls));
4929 header += sizeof(mpls);
4930 action_encap_data->conf.size = header -
4931 action_encap_data->data;
4932 action->conf = &action_encap_data->conf;
4936 /** Parse MPLSOUDP decap action. */
4938 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4939 const char *str, unsigned int len,
4940 void *buf, unsigned int size)
4942 struct buffer *out = buf;
4943 struct rte_flow_action *action;
4944 struct action_raw_decap_data *action_decap_data;
4945 struct rte_flow_item_eth eth = { .type = 0, };
4946 struct rte_flow_item_vlan vlan = {.tci = 0};
4947 struct rte_flow_item_ipv4 ipv4 = {
4949 .next_proto_id = IPPROTO_UDP,
4952 struct rte_flow_item_ipv6 ipv6 = {
4954 .proto = IPPROTO_UDP,
4957 struct rte_flow_item_udp udp = {
4959 .dst_port = rte_cpu_to_be_16(6635),
4962 struct rte_flow_item_mpls mpls;
4966 ret = parse_vc(ctx, token, str, len, buf, size);
4969 /* Nothing else to do if there is no buffer. */
4972 if (!out->args.vc.actions_n)
4974 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4975 /* Point to selected object. */
4976 ctx->object = out->args.vc.data;
4977 ctx->objmask = NULL;
4978 /* Copy the headers to the buffer. */
4979 action_decap_data = ctx->object;
4980 *action_decap_data = (struct action_raw_decap_data) {
4981 .conf = (struct rte_flow_action_raw_decap){
4982 .data = action_decap_data->data,
4986 header = action_decap_data->data;
4987 if (mplsoudp_decap_conf.select_vlan)
4988 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4989 else if (mplsoudp_encap_conf.select_ipv4)
4990 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4992 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4993 memcpy(eth.dst.addr_bytes,
4994 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4995 memcpy(eth.src.addr_bytes,
4996 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4997 memcpy(header, ð, sizeof(eth));
4998 header += sizeof(eth);
4999 if (mplsoudp_encap_conf.select_vlan) {
5000 if (mplsoudp_encap_conf.select_ipv4)
5001 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
5003 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
5004 memcpy(header, &vlan, sizeof(vlan));
5005 header += sizeof(vlan);
5007 if (mplsoudp_encap_conf.select_ipv4) {
5008 memcpy(header, &ipv4, sizeof(ipv4));
5009 header += sizeof(ipv4);
5011 memcpy(header, &ipv6, sizeof(ipv6));
5012 header += sizeof(ipv6);
5014 memcpy(header, &udp, sizeof(udp));
5015 header += sizeof(udp);
5016 memset(&mpls, 0, sizeof(mpls));
5017 memcpy(header, &mpls, sizeof(mpls));
5018 header += sizeof(mpls);
5019 action_decap_data->conf.size = header -
5020 action_decap_data->data;
5021 action->conf = &action_decap_data->conf;
5026 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
5027 const char *str, unsigned int len, void *buf,
5030 struct action_raw_decap_data *action_raw_decap_data;
5031 struct rte_flow_action *action;
5032 const struct arg *arg;
5033 struct buffer *out = buf;
5037 RTE_SET_USED(token);
5040 arg = ARGS_ENTRY_ARB_BOUNDED
5041 (offsetof(struct action_raw_decap_data, idx),
5042 sizeof(((struct action_raw_decap_data *)0)->idx),
5043 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5044 if (push_args(ctx, arg))
5046 ret = parse_int(ctx, token, str, len, NULL, 0);
5053 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5054 action_raw_decap_data = ctx->object;
5055 idx = action_raw_decap_data->idx;
5056 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
5057 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
5058 action->conf = &action_raw_decap_data->conf;
5064 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
5065 const char *str, unsigned int len, void *buf,
5068 struct action_raw_encap_data *action_raw_encap_data;
5069 struct rte_flow_action *action;
5070 const struct arg *arg;
5071 struct buffer *out = buf;
5075 RTE_SET_USED(token);
5078 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
5080 arg = ARGS_ENTRY_ARB_BOUNDED
5081 (offsetof(struct action_raw_encap_data, idx),
5082 sizeof(((struct action_raw_encap_data *)0)->idx),
5083 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
5084 if (push_args(ctx, arg))
5086 ret = parse_int(ctx, token, str, len, NULL, 0);
5093 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5094 action_raw_encap_data = ctx->object;
5095 idx = action_raw_encap_data->idx;
5096 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
5097 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
5098 action_raw_encap_data->conf.preserve = NULL;
5099 action->conf = &action_raw_encap_data->conf;
5104 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
5105 const char *str, unsigned int len, void *buf,
5108 struct buffer *out = buf;
5109 struct rte_flow_action *action;
5110 struct action_raw_encap_data *action_raw_encap_data = NULL;
5113 ret = parse_vc(ctx, token, str, len, buf, size);
5116 /* Nothing else to do if there is no buffer. */
5119 if (!out->args.vc.actions_n)
5121 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5122 /* Point to selected object. */
5123 ctx->object = out->args.vc.data;
5124 ctx->objmask = NULL;
5125 /* Copy the headers to the buffer. */
5126 action_raw_encap_data = ctx->object;
5127 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
5128 action_raw_encap_data->conf.preserve = NULL;
5129 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
5130 action->conf = &action_raw_encap_data->conf;
5135 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
5136 const char *str, unsigned int len, void *buf,
5139 struct buffer *out = buf;
5140 struct rte_flow_action *action;
5141 struct action_raw_decap_data *action_raw_decap_data = NULL;
5144 ret = parse_vc(ctx, token, str, len, buf, size);
5147 /* Nothing else to do if there is no buffer. */
5150 if (!out->args.vc.actions_n)
5152 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
5153 /* Point to selected object. */
5154 ctx->object = out->args.vc.data;
5155 ctx->objmask = NULL;
5156 /* Copy the headers to the buffer. */
5157 action_raw_decap_data = ctx->object;
5158 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
5159 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
5160 action->conf = &action_raw_decap_data->conf;
5165 parse_vc_action_set_meta(struct context *ctx, const struct token *token,
5166 const char *str, unsigned int len, void *buf,
5171 ret = parse_vc(ctx, token, str, len, buf, size);
5174 ret = rte_flow_dynf_metadata_register();
5180 /** Parse tokens for destroy command. */
5182 parse_destroy(struct context *ctx, const struct token *token,
5183 const char *str, unsigned int len,
5184 void *buf, unsigned int size)
5186 struct buffer *out = buf;
5188 /* Token name must match. */
5189 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5191 /* Nothing else to do if there is no buffer. */
5194 if (!out->command) {
5195 if (ctx->curr != DESTROY)
5197 if (sizeof(*out) > size)
5199 out->command = ctx->curr;
5202 ctx->objmask = NULL;
5203 out->args.destroy.rule =
5204 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5208 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
5209 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
5212 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
5213 ctx->objmask = NULL;
5217 /** Parse tokens for flush command. */
5219 parse_flush(struct context *ctx, const struct token *token,
5220 const char *str, unsigned int len,
5221 void *buf, unsigned int size)
5223 struct buffer *out = buf;
5225 /* Token name must match. */
5226 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5228 /* Nothing else to do if there is no buffer. */
5231 if (!out->command) {
5232 if (ctx->curr != FLUSH)
5234 if (sizeof(*out) > size)
5236 out->command = ctx->curr;
5239 ctx->objmask = NULL;
5244 /** Parse tokens for dump command. */
5246 parse_dump(struct context *ctx, const struct token *token,
5247 const char *str, unsigned int len,
5248 void *buf, unsigned int size)
5250 struct buffer *out = buf;
5252 /* Token name must match. */
5253 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5255 /* Nothing else to do if there is no buffer. */
5258 if (!out->command) {
5259 if (ctx->curr != DUMP)
5261 if (sizeof(*out) > size)
5263 out->command = ctx->curr;
5266 ctx->objmask = NULL;
5271 /** Parse tokens for query command. */
5273 parse_query(struct context *ctx, const struct token *token,
5274 const char *str, unsigned int len,
5275 void *buf, unsigned int size)
5277 struct buffer *out = buf;
5279 /* Token name must match. */
5280 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5282 /* Nothing else to do if there is no buffer. */
5285 if (!out->command) {
5286 if (ctx->curr != QUERY)
5288 if (sizeof(*out) > size)
5290 out->command = ctx->curr;
5293 ctx->objmask = NULL;
5298 /** Parse action names. */
5300 parse_action(struct context *ctx, const struct token *token,
5301 const char *str, unsigned int len,
5302 void *buf, unsigned int size)
5304 struct buffer *out = buf;
5305 const struct arg *arg = pop_args(ctx);
5309 /* Argument is expected. */
5312 /* Parse action name. */
5313 for (i = 0; next_action[i]; ++i) {
5314 const struct parse_action_priv *priv;
5316 token = &token_list[next_action[i]];
5317 if (strcmp_partial(token->name, str, len))
5323 memcpy((uint8_t *)ctx->object + arg->offset,
5329 push_args(ctx, arg);
5333 /** Parse tokens for list command. */
5335 parse_list(struct context *ctx, const struct token *token,
5336 const char *str, unsigned int len,
5337 void *buf, unsigned int size)
5339 struct buffer *out = buf;
5341 /* Token name must match. */
5342 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5344 /* Nothing else to do if there is no buffer. */
5347 if (!out->command) {
5348 if (ctx->curr != LIST)
5350 if (sizeof(*out) > size)
5352 out->command = ctx->curr;
5355 ctx->objmask = NULL;
5356 out->args.list.group =
5357 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5361 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
5362 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
5365 ctx->object = out->args.list.group + out->args.list.group_n++;
5366 ctx->objmask = NULL;
5370 /** Parse tokens for isolate command. */
5372 parse_isolate(struct context *ctx, const struct token *token,
5373 const char *str, unsigned int len,
5374 void *buf, unsigned int size)
5376 struct buffer *out = buf;
5378 /* Token name must match. */
5379 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5381 /* Nothing else to do if there is no buffer. */
5384 if (!out->command) {
5385 if (ctx->curr != ISOLATE)
5387 if (sizeof(*out) > size)
5389 out->command = ctx->curr;
5392 ctx->objmask = NULL;
5398 * Parse signed/unsigned integers 8 to 64-bit long.
5400 * Last argument (ctx->args) is retrieved to determine integer type and
5404 parse_int(struct context *ctx, const struct token *token,
5405 const char *str, unsigned int len,
5406 void *buf, unsigned int size)
5408 const struct arg *arg = pop_args(ctx);
5413 /* Argument is expected. */
5418 (uintmax_t)strtoimax(str, &end, 0) :
5419 strtoumax(str, &end, 0);
5420 if (errno || (size_t)(end - str) != len)
5423 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
5424 (intmax_t)u > (intmax_t)arg->max)) ||
5425 (!arg->sign && (u < arg->min || u > arg->max))))
5430 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
5431 !arg_entry_bf_fill(ctx->objmask, -1, arg))
5435 buf = (uint8_t *)ctx->object + arg->offset;
5437 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
5441 case sizeof(uint8_t):
5442 *(uint8_t *)buf = u;
5444 case sizeof(uint16_t):
5445 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
5447 case sizeof(uint8_t [3]):
5448 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5450 ((uint8_t *)buf)[0] = u;
5451 ((uint8_t *)buf)[1] = u >> 8;
5452 ((uint8_t *)buf)[2] = u >> 16;
5456 ((uint8_t *)buf)[0] = u >> 16;
5457 ((uint8_t *)buf)[1] = u >> 8;
5458 ((uint8_t *)buf)[2] = u;
5460 case sizeof(uint32_t):
5461 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
5463 case sizeof(uint64_t):
5464 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
5469 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
5471 buf = (uint8_t *)ctx->objmask + arg->offset;
5476 push_args(ctx, arg);
5483 * Three arguments (ctx->args) are retrieved from the stack to store data,
5484 * its actual length and address (in that order).
5487 parse_string(struct context *ctx, const struct token *token,
5488 const char *str, unsigned int len,
5489 void *buf, unsigned int size)
5491 const struct arg *arg_data = pop_args(ctx);
5492 const struct arg *arg_len = pop_args(ctx);
5493 const struct arg *arg_addr = pop_args(ctx);
5494 char tmp[16]; /* Ought to be enough. */
5497 /* Arguments are expected. */
5501 push_args(ctx, arg_data);
5505 push_args(ctx, arg_len);
5506 push_args(ctx, arg_data);
5509 size = arg_data->size;
5510 /* Bit-mask fill is not supported. */
5511 if (arg_data->mask || size < len)
5515 /* Let parse_int() fill length information first. */
5516 ret = snprintf(tmp, sizeof(tmp), "%u", len);
5519 push_args(ctx, arg_len);
5520 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5525 buf = (uint8_t *)ctx->object + arg_data->offset;
5526 /* Output buffer is not necessarily NUL-terminated. */
5527 memcpy(buf, str, len);
5528 memset((uint8_t *)buf + len, 0x00, size - len);
5530 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
5531 /* Save address if requested. */
5532 if (arg_addr->size) {
5533 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5535 (uint8_t *)ctx->object + arg_data->offset
5539 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5541 (uint8_t *)ctx->objmask + arg_data->offset
5547 push_args(ctx, arg_addr);
5548 push_args(ctx, arg_len);
5549 push_args(ctx, arg_data);
5554 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
5560 /* Check input parameters */
5561 if ((src == NULL) ||
5567 /* Convert chars to bytes */
5568 for (i = 0, len = 0; i < *size; i += 2) {
5569 snprintf(tmp, 3, "%s", src + i);
5570 dst[len++] = strtoul(tmp, &c, 16);
5585 parse_hex(struct context *ctx, const struct token *token,
5586 const char *str, unsigned int len,
5587 void *buf, unsigned int size)
5589 const struct arg *arg_data = pop_args(ctx);
5590 const struct arg *arg_len = pop_args(ctx);
5591 const struct arg *arg_addr = pop_args(ctx);
5592 char tmp[16]; /* Ought to be enough. */
5594 unsigned int hexlen = len;
5595 unsigned int length = 256;
5596 uint8_t hex_tmp[length];
5598 /* Arguments are expected. */
5602 push_args(ctx, arg_data);
5606 push_args(ctx, arg_len);
5607 push_args(ctx, arg_data);
5610 size = arg_data->size;
5611 /* Bit-mask fill is not supported. */
5617 /* translate bytes string to array. */
5618 if (str[0] == '0' && ((str[1] == 'x') ||
5623 if (hexlen > length)
5625 ret = parse_hex_string(str, hex_tmp, &hexlen);
5628 /* Let parse_int() fill length information first. */
5629 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5632 push_args(ctx, arg_len);
5633 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5638 buf = (uint8_t *)ctx->object + arg_data->offset;
5639 /* Output buffer is not necessarily NUL-terminated. */
5640 memcpy(buf, hex_tmp, hexlen);
5641 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5643 memset((uint8_t *)ctx->objmask + arg_data->offset,
5645 /* Save address if requested. */
5646 if (arg_addr->size) {
5647 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5649 (uint8_t *)ctx->object + arg_data->offset
5653 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5655 (uint8_t *)ctx->objmask + arg_data->offset
5661 push_args(ctx, arg_addr);
5662 push_args(ctx, arg_len);
5663 push_args(ctx, arg_data);
5669 * Parse a zero-ended string.
5672 parse_string0(struct context *ctx, const struct token *token __rte_unused,
5673 const char *str, unsigned int len,
5674 void *buf, unsigned int size)
5676 const struct arg *arg_data = pop_args(ctx);
5678 /* Arguments are expected. */
5681 size = arg_data->size;
5682 /* Bit-mask fill is not supported. */
5683 if (arg_data->mask || size < len + 1)
5687 buf = (uint8_t *)ctx->object + arg_data->offset;
5688 strncpy(buf, str, len);
5690 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
5693 push_args(ctx, arg_data);
5698 * Parse a MAC address.
5700 * Last argument (ctx->args) is retrieved to determine storage size and
5704 parse_mac_addr(struct context *ctx, const struct token *token,
5705 const char *str, unsigned int len,
5706 void *buf, unsigned int size)
5708 const struct arg *arg = pop_args(ctx);
5709 struct rte_ether_addr tmp;
5713 /* Argument is expected. */
5717 /* Bit-mask fill is not supported. */
5718 if (arg->mask || size != sizeof(tmp))
5720 /* Only network endian is supported. */
5723 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5724 if (ret < 0 || (unsigned int)ret != len)
5728 buf = (uint8_t *)ctx->object + arg->offset;
5729 memcpy(buf, &tmp, size);
5731 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5734 push_args(ctx, arg);
5739 * Parse an IPv4 address.
5741 * Last argument (ctx->args) is retrieved to determine storage size and
5745 parse_ipv4_addr(struct context *ctx, const struct token *token,
5746 const char *str, unsigned int len,
5747 void *buf, unsigned int size)
5749 const struct arg *arg = pop_args(ctx);
5754 /* Argument is expected. */
5758 /* Bit-mask fill is not supported. */
5759 if (arg->mask || size != sizeof(tmp))
5761 /* Only network endian is supported. */
5764 memcpy(str2, str, len);
5766 ret = inet_pton(AF_INET, str2, &tmp);
5768 /* Attempt integer parsing. */
5769 push_args(ctx, arg);
5770 return parse_int(ctx, token, str, len, buf, size);
5774 buf = (uint8_t *)ctx->object + arg->offset;
5775 memcpy(buf, &tmp, size);
5777 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5780 push_args(ctx, arg);
5785 * Parse an IPv6 address.
5787 * Last argument (ctx->args) is retrieved to determine storage size and
5791 parse_ipv6_addr(struct context *ctx, const struct token *token,
5792 const char *str, unsigned int len,
5793 void *buf, unsigned int size)
5795 const struct arg *arg = pop_args(ctx);
5797 struct in6_addr tmp;
5801 /* Argument is expected. */
5805 /* Bit-mask fill is not supported. */
5806 if (arg->mask || size != sizeof(tmp))
5808 /* Only network endian is supported. */
5811 memcpy(str2, str, len);
5813 ret = inet_pton(AF_INET6, str2, &tmp);
5818 buf = (uint8_t *)ctx->object + arg->offset;
5819 memcpy(buf, &tmp, size);
5821 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5824 push_args(ctx, arg);
5828 /** Boolean values (even indices stand for false). */
5829 static const char *const boolean_name[] = {
5839 * Parse a boolean value.
5841 * Last argument (ctx->args) is retrieved to determine storage size and
5845 parse_boolean(struct context *ctx, const struct token *token,
5846 const char *str, unsigned int len,
5847 void *buf, unsigned int size)
5849 const struct arg *arg = pop_args(ctx);
5853 /* Argument is expected. */
5856 for (i = 0; boolean_name[i]; ++i)
5857 if (!strcmp_partial(boolean_name[i], str, len))
5859 /* Process token as integer. */
5860 if (boolean_name[i])
5861 str = i & 1 ? "1" : "0";
5862 push_args(ctx, arg);
5863 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5864 return ret > 0 ? (int)len : ret;
5867 /** Parse port and update context. */
5869 parse_port(struct context *ctx, const struct token *token,
5870 const char *str, unsigned int len,
5871 void *buf, unsigned int size)
5873 struct buffer *out = &(struct buffer){ .port = 0 };
5881 ctx->objmask = NULL;
5882 size = sizeof(*out);
5884 ret = parse_int(ctx, token, str, len, out, size);
5886 ctx->port = out->port;
5892 /** Parse set command, initialize output buffer for subsequent tokens. */
5894 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5895 const char *str, unsigned int len,
5896 void *buf, unsigned int size)
5898 struct buffer *out = buf;
5900 /* Token name must match. */
5901 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5903 /* Nothing else to do if there is no buffer. */
5906 /* Make sure buffer is large enough. */
5907 if (size < sizeof(*out))
5910 ctx->objmask = NULL;
5914 out->command = ctx->curr;
5919 * Parse set raw_encap/raw_decap command,
5920 * initialize output buffer for subsequent tokens.
5923 parse_set_init(struct context *ctx, const struct token *token,
5924 const char *str, unsigned int len,
5925 void *buf, unsigned int size)
5927 struct buffer *out = buf;
5929 /* Token name must match. */
5930 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5932 /* Nothing else to do if there is no buffer. */
5935 /* Make sure buffer is large enough. */
5936 if (size < sizeof(*out))
5938 /* Initialize buffer. */
5939 memset(out, 0x00, sizeof(*out));
5940 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5943 ctx->objmask = NULL;
5944 if (!out->command) {
5945 if (ctx->curr != SET)
5947 if (sizeof(*out) > size)
5949 out->command = ctx->curr;
5950 out->args.vc.data = (uint8_t *)out + size;
5951 /* All we need is pattern */
5952 out->args.vc.pattern =
5953 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5955 ctx->object = out->args.vc.pattern;
5960 /** No completion. */
5962 comp_none(struct context *ctx, const struct token *token,
5963 unsigned int ent, char *buf, unsigned int size)
5973 /** Complete boolean values. */
5975 comp_boolean(struct context *ctx, const struct token *token,
5976 unsigned int ent, char *buf, unsigned int size)
5982 for (i = 0; boolean_name[i]; ++i)
5983 if (buf && i == ent)
5984 return strlcpy(buf, boolean_name[i], size);
5990 /** Complete action names. */
5992 comp_action(struct context *ctx, const struct token *token,
5993 unsigned int ent, char *buf, unsigned int size)
5999 for (i = 0; next_action[i]; ++i)
6000 if (buf && i == ent)
6001 return strlcpy(buf, token_list[next_action[i]].name,
6008 /** Complete available ports. */
6010 comp_port(struct context *ctx, const struct token *token,
6011 unsigned int ent, char *buf, unsigned int size)
6018 RTE_ETH_FOREACH_DEV(p) {
6019 if (buf && i == ent)
6020 return snprintf(buf, size, "%u", p);
6028 /** Complete available rule IDs. */
6030 comp_rule_id(struct context *ctx, const struct token *token,
6031 unsigned int ent, char *buf, unsigned int size)
6034 struct rte_port *port;
6035 struct port_flow *pf;
6038 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
6039 ctx->port == (portid_t)RTE_PORT_ALL)
6041 port = &ports[ctx->port];
6042 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
6043 if (buf && i == ent)
6044 return snprintf(buf, size, "%u", pf->id);
6052 /** Complete type field for RSS action. */
6054 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
6055 unsigned int ent, char *buf, unsigned int size)
6061 for (i = 0; rss_type_table[i].str; ++i)
6066 return strlcpy(buf, rss_type_table[ent].str, size);
6068 return snprintf(buf, size, "end");
6072 /** Complete queue field for RSS action. */
6074 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
6075 unsigned int ent, char *buf, unsigned int size)
6082 return snprintf(buf, size, "%u", ent);
6084 return snprintf(buf, size, "end");
6088 /** Complete index number for set raw_encap/raw_decap commands. */
6090 comp_set_raw_index(struct context *ctx, const struct token *token,
6091 unsigned int ent, char *buf, unsigned int size)
6097 RTE_SET_USED(token);
6098 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
6099 if (buf && idx == ent)
6100 return snprintf(buf, size, "%u", idx);
6106 /** Internal context. */
6107 static struct context cmd_flow_context;
6109 /** Global parser instance (cmdline API). */
6110 cmdline_parse_inst_t cmd_flow;
6111 cmdline_parse_inst_t cmd_set_raw;
6113 /** Initialize context. */
6115 cmd_flow_context_init(struct context *ctx)
6117 /* A full memset() is not necessary. */
6127 ctx->objmask = NULL;
6130 /** Parse a token (cmdline API). */
6132 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
6135 struct context *ctx = &cmd_flow_context;
6136 const struct token *token;
6137 const enum index *list;
6142 token = &token_list[ctx->curr];
6143 /* Check argument length. */
6146 for (len = 0; src[len]; ++len)
6147 if (src[len] == '#' || isspace(src[len]))
6151 /* Last argument and EOL detection. */
6152 for (i = len; src[i]; ++i)
6153 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
6155 else if (!isspace(src[i])) {
6160 if (src[i] == '\r' || src[i] == '\n') {
6164 /* Initialize context if necessary. */
6165 if (!ctx->next_num) {
6168 ctx->next[ctx->next_num++] = token->next[0];
6170 /* Process argument through candidates. */
6171 ctx->prev = ctx->curr;
6172 list = ctx->next[ctx->next_num - 1];
6173 for (i = 0; list[i]; ++i) {
6174 const struct token *next = &token_list[list[i]];
6177 ctx->curr = list[i];
6179 tmp = next->call(ctx, next, src, len, result, size);
6181 tmp = parse_default(ctx, next, src, len, result, size);
6182 if (tmp == -1 || tmp != len)
6190 /* Push subsequent tokens if any. */
6192 for (i = 0; token->next[i]; ++i) {
6193 if (ctx->next_num == RTE_DIM(ctx->next))
6195 ctx->next[ctx->next_num++] = token->next[i];
6197 /* Push arguments if any. */
6199 for (i = 0; token->args[i]; ++i) {
6200 if (ctx->args_num == RTE_DIM(ctx->args))
6202 ctx->args[ctx->args_num++] = token->args[i];
6207 /** Return number of completion entries (cmdline API). */
6209 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
6211 struct context *ctx = &cmd_flow_context;
6212 const struct token *token = &token_list[ctx->curr];
6213 const enum index *list;
6217 /* Count number of tokens in current list. */
6219 list = ctx->next[ctx->next_num - 1];
6221 list = token->next[0];
6222 for (i = 0; list[i]; ++i)
6227 * If there is a single token, use its completion callback, otherwise
6228 * return the number of entries.
6230 token = &token_list[list[0]];
6231 if (i == 1 && token->comp) {
6232 /* Save index for cmd_flow_get_help(). */
6233 ctx->prev = list[0];
6234 return token->comp(ctx, token, 0, NULL, 0);
6239 /** Return a completion entry (cmdline API). */
6241 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
6242 char *dst, unsigned int size)
6244 struct context *ctx = &cmd_flow_context;
6245 const struct token *token = &token_list[ctx->curr];
6246 const enum index *list;
6250 /* Count number of tokens in current list. */
6252 list = ctx->next[ctx->next_num - 1];
6254 list = token->next[0];
6255 for (i = 0; list[i]; ++i)
6259 /* If there is a single token, use its completion callback. */
6260 token = &token_list[list[0]];
6261 if (i == 1 && token->comp) {
6262 /* Save index for cmd_flow_get_help(). */
6263 ctx->prev = list[0];
6264 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
6266 /* Otherwise make sure the index is valid and use defaults. */
6269 token = &token_list[list[index]];
6270 strlcpy(dst, token->name, size);
6271 /* Save index for cmd_flow_get_help(). */
6272 ctx->prev = list[index];
6276 /** Populate help strings for current token (cmdline API). */
6278 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
6280 struct context *ctx = &cmd_flow_context;
6281 const struct token *token = &token_list[ctx->prev];
6286 /* Set token type and update global help with details. */
6287 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
6289 cmd_flow.help_str = token->help;
6291 cmd_flow.help_str = token->name;
6295 /** Token definition template (cmdline API). */
6296 static struct cmdline_token_hdr cmd_flow_token_hdr = {
6297 .ops = &(struct cmdline_token_ops){
6298 .parse = cmd_flow_parse,
6299 .complete_get_nb = cmd_flow_complete_get_nb,
6300 .complete_get_elt = cmd_flow_complete_get_elt,
6301 .get_help = cmd_flow_get_help,
6306 /** Populate the next dynamic token. */
6308 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
6309 cmdline_parse_token_hdr_t **hdr_inst)
6311 struct context *ctx = &cmd_flow_context;
6313 /* Always reinitialize context before requesting the first token. */
6314 if (!(hdr_inst - cmd_flow.tokens))
6315 cmd_flow_context_init(ctx);
6316 /* Return NULL when no more tokens are expected. */
6317 if (!ctx->next_num && ctx->curr) {
6321 /* Determine if command should end here. */
6322 if (ctx->eol && ctx->last && ctx->next_num) {
6323 const enum index *list = ctx->next[ctx->next_num - 1];
6326 for (i = 0; list[i]; ++i) {
6333 *hdr = &cmd_flow_token_hdr;
6336 /** Dispatch parsed buffer to function calls. */
6338 cmd_flow_parsed(const struct buffer *in)
6340 switch (in->command) {
6342 port_flow_validate(in->port, &in->args.vc.attr,
6343 in->args.vc.pattern, in->args.vc.actions);
6346 port_flow_create(in->port, &in->args.vc.attr,
6347 in->args.vc.pattern, in->args.vc.actions);
6350 port_flow_destroy(in->port, in->args.destroy.rule_n,
6351 in->args.destroy.rule);
6354 port_flow_flush(in->port);
6357 port_flow_dump(in->port, in->args.dump.file);
6360 port_flow_query(in->port, in->args.query.rule,
6361 &in->args.query.action);
6364 port_flow_list(in->port, in->args.list.group_n,
6365 in->args.list.group);
6368 port_flow_isolate(in->port, in->args.isolate.set);
6375 /** Token generator and output processing callback (cmdline API). */
6377 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
6380 cmd_flow_tok(arg0, arg2);
6382 cmd_flow_parsed(arg0);
6385 /** Global parser instance (cmdline API). */
6386 cmdline_parse_inst_t cmd_flow = {
6388 .data = NULL, /**< Unused. */
6389 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6392 }, /**< Tokens are returned by cmd_flow_tok(). */
6395 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
6398 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
6400 struct rte_flow_item_ipv4 *ipv4;
6401 struct rte_flow_item_eth *eth;
6402 struct rte_flow_item_ipv6 *ipv6;
6403 struct rte_flow_item_vxlan *vxlan;
6404 struct rte_flow_item_vxlan_gpe *gpe;
6405 struct rte_flow_item_nvgre *nvgre;
6406 uint32_t ipv6_vtc_flow;
6408 switch (item->type) {
6409 case RTE_FLOW_ITEM_TYPE_ETH:
6410 eth = (struct rte_flow_item_eth *)buf;
6412 eth->type = rte_cpu_to_be_16(next_proto);
6414 case RTE_FLOW_ITEM_TYPE_IPV4:
6415 ipv4 = (struct rte_flow_item_ipv4 *)buf;
6416 ipv4->hdr.version_ihl = 0x45;
6417 if (next_proto && ipv4->hdr.next_proto_id == 0)
6418 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
6420 case RTE_FLOW_ITEM_TYPE_IPV6:
6421 ipv6 = (struct rte_flow_item_ipv6 *)buf;
6422 if (next_proto && ipv6->hdr.proto == 0)
6423 ipv6->hdr.proto = (uint8_t)next_proto;
6424 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
6425 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
6426 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
6427 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
6429 case RTE_FLOW_ITEM_TYPE_VXLAN:
6430 vxlan = (struct rte_flow_item_vxlan *)buf;
6431 vxlan->flags = 0x08;
6433 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6434 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
6437 case RTE_FLOW_ITEM_TYPE_NVGRE:
6438 nvgre = (struct rte_flow_item_nvgre *)buf;
6439 nvgre->protocol = rte_cpu_to_be_16(0x6558);
6440 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
6447 /** Helper of get item's default mask. */
6449 flow_item_default_mask(const struct rte_flow_item *item)
6451 const void *mask = NULL;
6452 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6454 switch (item->type) {
6455 case RTE_FLOW_ITEM_TYPE_ANY:
6456 mask = &rte_flow_item_any_mask;
6458 case RTE_FLOW_ITEM_TYPE_VF:
6459 mask = &rte_flow_item_vf_mask;
6461 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6462 mask = &rte_flow_item_port_id_mask;
6464 case RTE_FLOW_ITEM_TYPE_RAW:
6465 mask = &rte_flow_item_raw_mask;
6467 case RTE_FLOW_ITEM_TYPE_ETH:
6468 mask = &rte_flow_item_eth_mask;
6470 case RTE_FLOW_ITEM_TYPE_VLAN:
6471 mask = &rte_flow_item_vlan_mask;
6473 case RTE_FLOW_ITEM_TYPE_IPV4:
6474 mask = &rte_flow_item_ipv4_mask;
6476 case RTE_FLOW_ITEM_TYPE_IPV6:
6477 mask = &rte_flow_item_ipv6_mask;
6479 case RTE_FLOW_ITEM_TYPE_ICMP:
6480 mask = &rte_flow_item_icmp_mask;
6482 case RTE_FLOW_ITEM_TYPE_UDP:
6483 mask = &rte_flow_item_udp_mask;
6485 case RTE_FLOW_ITEM_TYPE_TCP:
6486 mask = &rte_flow_item_tcp_mask;
6488 case RTE_FLOW_ITEM_TYPE_SCTP:
6489 mask = &rte_flow_item_sctp_mask;
6491 case RTE_FLOW_ITEM_TYPE_VXLAN:
6492 mask = &rte_flow_item_vxlan_mask;
6494 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6495 mask = &rte_flow_item_vxlan_gpe_mask;
6497 case RTE_FLOW_ITEM_TYPE_E_TAG:
6498 mask = &rte_flow_item_e_tag_mask;
6500 case RTE_FLOW_ITEM_TYPE_NVGRE:
6501 mask = &rte_flow_item_nvgre_mask;
6503 case RTE_FLOW_ITEM_TYPE_MPLS:
6504 mask = &rte_flow_item_mpls_mask;
6506 case RTE_FLOW_ITEM_TYPE_GRE:
6507 mask = &rte_flow_item_gre_mask;
6509 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6510 mask = &gre_key_default_mask;
6512 case RTE_FLOW_ITEM_TYPE_META:
6513 mask = &rte_flow_item_meta_mask;
6515 case RTE_FLOW_ITEM_TYPE_FUZZY:
6516 mask = &rte_flow_item_fuzzy_mask;
6518 case RTE_FLOW_ITEM_TYPE_GTP:
6519 mask = &rte_flow_item_gtp_mask;
6521 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6522 mask = &rte_flow_item_gtp_psc_mask;
6524 case RTE_FLOW_ITEM_TYPE_GENEVE:
6525 mask = &rte_flow_item_geneve_mask;
6527 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
6528 mask = &rte_flow_item_pppoe_proto_id_mask;
6530 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
6531 mask = &rte_flow_item_l2tpv3oip_mask;
6533 case RTE_FLOW_ITEM_TYPE_ESP:
6534 mask = &rte_flow_item_esp_mask;
6536 case RTE_FLOW_ITEM_TYPE_AH:
6537 mask = &rte_flow_item_ah_mask;
6539 case RTE_FLOW_ITEM_TYPE_PFCP:
6540 mask = &rte_flow_item_pfcp_mask;
6550 /** Dispatch parsed buffer to function calls. */
6552 cmd_set_raw_parsed(const struct buffer *in)
6554 uint32_t n = in->args.vc.pattern_n;
6556 struct rte_flow_item *item = NULL;
6558 uint8_t *data = NULL;
6559 uint8_t *data_tail = NULL;
6560 size_t *total_size = NULL;
6561 uint16_t upper_layer = 0;
6563 uint16_t idx = in->port; /* We borrow port field as index */
6565 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
6566 in->command == SET_RAW_DECAP);
6567 if (in->command == SET_RAW_ENCAP) {
6568 total_size = &raw_encap_confs[idx].size;
6569 data = (uint8_t *)&raw_encap_confs[idx].data;
6571 total_size = &raw_decap_confs[idx].size;
6572 data = (uint8_t *)&raw_decap_confs[idx].data;
6575 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6576 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
6577 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
6578 for (i = n - 1 ; i >= 0; --i) {
6579 item = in->args.vc.pattern + i;
6580 if (item->spec == NULL)
6581 item->spec = flow_item_default_mask(item);
6582 switch (item->type) {
6583 case RTE_FLOW_ITEM_TYPE_ETH:
6584 size = sizeof(struct rte_flow_item_eth);
6586 case RTE_FLOW_ITEM_TYPE_VLAN:
6587 size = sizeof(struct rte_flow_item_vlan);
6588 proto = RTE_ETHER_TYPE_VLAN;
6590 case RTE_FLOW_ITEM_TYPE_IPV4:
6591 size = sizeof(struct rte_flow_item_ipv4);
6592 proto = RTE_ETHER_TYPE_IPV4;
6594 case RTE_FLOW_ITEM_TYPE_IPV6:
6595 size = sizeof(struct rte_flow_item_ipv6);
6596 proto = RTE_ETHER_TYPE_IPV6;
6598 case RTE_FLOW_ITEM_TYPE_UDP:
6599 size = sizeof(struct rte_flow_item_udp);
6602 case RTE_FLOW_ITEM_TYPE_TCP:
6603 size = sizeof(struct rte_flow_item_tcp);
6606 case RTE_FLOW_ITEM_TYPE_VXLAN:
6607 size = sizeof(struct rte_flow_item_vxlan);
6609 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6610 size = sizeof(struct rte_flow_item_vxlan_gpe);
6612 case RTE_FLOW_ITEM_TYPE_GRE:
6613 size = sizeof(struct rte_flow_item_gre);
6616 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6617 size = sizeof(rte_be32_t);
6620 case RTE_FLOW_ITEM_TYPE_MPLS:
6621 size = sizeof(struct rte_flow_item_mpls);
6624 case RTE_FLOW_ITEM_TYPE_NVGRE:
6625 size = sizeof(struct rte_flow_item_nvgre);
6628 case RTE_FLOW_ITEM_TYPE_GENEVE:
6629 size = sizeof(struct rte_flow_item_geneve);
6631 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
6632 size = sizeof(struct rte_flow_item_l2tpv3oip);
6635 case RTE_FLOW_ITEM_TYPE_ESP:
6636 size = sizeof(struct rte_flow_item_esp);
6639 case RTE_FLOW_ITEM_TYPE_AH:
6640 size = sizeof(struct rte_flow_item_ah);
6643 case RTE_FLOW_ITEM_TYPE_GTP:
6644 size = sizeof(struct rte_flow_item_gtp);
6646 case RTE_FLOW_ITEM_TYPE_PFCP:
6647 size = sizeof(struct rte_flow_item_pfcp);
6650 printf("Error - Not supported item\n");
6652 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6655 *total_size += size;
6656 rte_memcpy(data_tail - (*total_size), item->spec, size);
6657 /* update some fields which cannot be set by cmdline */
6658 update_fields((data_tail - (*total_size)), item,
6660 upper_layer = proto;
6662 if (verbose_level & 0x1)
6663 printf("total data size is %zu\n", (*total_size));
6664 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
6665 memmove(data, (data_tail - (*total_size)), *total_size);
6668 /** Populate help strings for current token (cmdline API). */
6670 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6673 struct context *ctx = &cmd_flow_context;
6674 const struct token *token = &token_list[ctx->prev];
6679 /* Set token type and update global help with details. */
6680 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6682 cmd_set_raw.help_str = token->help;
6684 cmd_set_raw.help_str = token->name;
6688 /** Token definition template (cmdline API). */
6689 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6690 .ops = &(struct cmdline_token_ops){
6691 .parse = cmd_flow_parse,
6692 .complete_get_nb = cmd_flow_complete_get_nb,
6693 .complete_get_elt = cmd_flow_complete_get_elt,
6694 .get_help = cmd_set_raw_get_help,
6699 /** Populate the next dynamic token. */
6701 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6702 cmdline_parse_token_hdr_t **hdr_inst)
6704 struct context *ctx = &cmd_flow_context;
6706 /* Always reinitialize context before requesting the first token. */
6707 if (!(hdr_inst - cmd_set_raw.tokens)) {
6708 cmd_flow_context_init(ctx);
6709 ctx->curr = START_SET;
6711 /* Return NULL when no more tokens are expected. */
6712 if (!ctx->next_num && (ctx->curr != START_SET)) {
6716 /* Determine if command should end here. */
6717 if (ctx->eol && ctx->last && ctx->next_num) {
6718 const enum index *list = ctx->next[ctx->next_num - 1];
6721 for (i = 0; list[i]; ++i) {
6728 *hdr = &cmd_set_raw_token_hdr;
6731 /** Token generator and output processing callback (cmdline API). */
6733 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6736 cmd_set_raw_tok(arg0, arg2);
6738 cmd_set_raw_parsed(arg0);
6741 /** Global parser instance (cmdline API). */
6742 cmdline_parse_inst_t cmd_set_raw = {
6743 .f = cmd_set_raw_cb,
6744 .data = NULL, /**< Unused. */
6745 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6748 }, /**< Tokens are returned by cmd_flow_tok(). */
6751 /* *** display raw_encap/raw_decap buf */
6752 struct cmd_show_set_raw_result {
6753 cmdline_fixed_string_t cmd_show;
6754 cmdline_fixed_string_t cmd_what;
6755 cmdline_fixed_string_t cmd_all;
6760 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
6762 struct cmd_show_set_raw_result *res = parsed_result;
6763 uint16_t index = res->cmd_index;
6765 uint8_t *raw_data = NULL;
6766 size_t raw_size = 0;
6767 char title[16] = {0};
6771 if (!strcmp(res->cmd_all, "all")) {
6774 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
6775 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
6779 if (!strcmp(res->cmd_what, "raw_encap")) {
6780 raw_data = (uint8_t *)&raw_encap_confs[index].data;
6781 raw_size = raw_encap_confs[index].size;
6782 snprintf(title, 16, "\nindex: %u", index);
6783 rte_hexdump(stdout, title, raw_data, raw_size);
6785 raw_data = (uint8_t *)&raw_decap_confs[index].data;
6786 raw_size = raw_decap_confs[index].size;
6787 snprintf(title, 16, "\nindex: %u", index);
6788 rte_hexdump(stdout, title, raw_data, raw_size);
6790 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
6793 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
6794 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6796 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
6797 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6798 cmd_what, "raw_encap#raw_decap");
6799 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
6800 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
6802 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
6803 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6805 cmdline_parse_inst_t cmd_show_set_raw = {
6806 .f = cmd_show_set_raw_parsed,
6808 .help_str = "show <raw_encap|raw_decap> <index>",
6810 (void *)&cmd_show_set_raw_cmd_show,
6811 (void *)&cmd_show_set_raw_cmd_what,
6812 (void *)&cmd_show_set_raw_cmd_index,
6816 cmdline_parse_inst_t cmd_show_set_raw_all = {
6817 .f = cmd_show_set_raw_parsed,
6819 .help_str = "show <raw_encap|raw_decap> all",
6821 (void *)&cmd_show_set_raw_cmd_show,
6822 (void *)&cmd_show_set_raw_cmd_what,
6823 (void *)&cmd_show_set_raw_cmd_all,