1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
22 #include <cmdline_parse_string.h>
23 #include <cmdline_parse_num.h>
25 #include <rte_hexdump.h>
29 /** Parser token indices. */
52 /* Top-level command. */
54 /* Sub-leve commands. */
59 /* Top-level command. */
61 /* Sub-level commands. */
70 /* Destroy arguments. */
73 /* Query arguments. */
79 /* Validate/create arguments. */
86 /* Validate/create pattern. */
123 ITEM_VLAN_INNER_TYPE,
155 ITEM_E_TAG_GRP_ECID_B,
164 ITEM_GRE_C_RSVD0_VER,
180 ITEM_ARP_ETH_IPV4_SHA,
181 ITEM_ARP_ETH_IPV4_SPA,
182 ITEM_ARP_ETH_IPV4_THA,
183 ITEM_ARP_ETH_IPV4_TPA,
185 ITEM_IPV6_EXT_NEXT_HDR,
190 ITEM_ICMP6_ND_NS_TARGET_ADDR,
192 ITEM_ICMP6_ND_NA_TARGET_ADDR,
194 ITEM_ICMP6_ND_OPT_TYPE,
195 ITEM_ICMP6_ND_OPT_SLA_ETH,
196 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
197 ITEM_ICMP6_ND_OPT_TLA_ETH,
198 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
211 ITEM_HIGIG2_CLASSIFICATION,
217 /* Validate/create actions. */
237 ACTION_RSS_FUNC_DEFAULT,
238 ACTION_RSS_FUNC_TOEPLITZ,
239 ACTION_RSS_FUNC_SIMPLE_XOR,
240 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
252 ACTION_PHY_PORT_ORIGINAL,
253 ACTION_PHY_PORT_INDEX,
255 ACTION_PORT_ID_ORIGINAL,
259 ACTION_OF_SET_MPLS_TTL,
260 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
261 ACTION_OF_DEC_MPLS_TTL,
262 ACTION_OF_SET_NW_TTL,
263 ACTION_OF_SET_NW_TTL_NW_TTL,
264 ACTION_OF_DEC_NW_TTL,
265 ACTION_OF_COPY_TTL_OUT,
266 ACTION_OF_COPY_TTL_IN,
269 ACTION_OF_PUSH_VLAN_ETHERTYPE,
270 ACTION_OF_SET_VLAN_VID,
271 ACTION_OF_SET_VLAN_VID_VLAN_VID,
272 ACTION_OF_SET_VLAN_PCP,
273 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
275 ACTION_OF_POP_MPLS_ETHERTYPE,
277 ACTION_OF_PUSH_MPLS_ETHERTYPE,
284 ACTION_MPLSOGRE_ENCAP,
285 ACTION_MPLSOGRE_DECAP,
286 ACTION_MPLSOUDP_ENCAP,
287 ACTION_MPLSOUDP_DECAP,
289 ACTION_SET_IPV4_SRC_IPV4_SRC,
291 ACTION_SET_IPV4_DST_IPV4_DST,
293 ACTION_SET_IPV6_SRC_IPV6_SRC,
295 ACTION_SET_IPV6_DST_IPV6_DST,
297 ACTION_SET_TP_SRC_TP_SRC,
299 ACTION_SET_TP_DST_TP_DST,
305 ACTION_SET_MAC_SRC_MAC_SRC,
307 ACTION_SET_MAC_DST_MAC_DST,
309 ACTION_INC_TCP_SEQ_VALUE,
311 ACTION_DEC_TCP_SEQ_VALUE,
313 ACTION_INC_TCP_ACK_VALUE,
315 ACTION_DEC_TCP_ACK_VALUE,
318 ACTION_RAW_ENCAP_INDEX,
319 ACTION_RAW_ENCAP_INDEX_VALUE,
320 ACTION_RAW_DECAP_INDEX,
321 ACTION_RAW_DECAP_INDEX_VALUE,
324 ACTION_SET_TAG_INDEX,
328 /** Maximum size for pattern in struct rte_flow_item_raw. */
329 #define ITEM_RAW_PATTERN_SIZE 40
331 /** Storage size for struct rte_flow_item_raw including pattern. */
332 #define ITEM_RAW_SIZE \
333 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
335 /** Maximum number of queue indices in struct rte_flow_action_rss. */
336 #define ACTION_RSS_QUEUE_NUM 128
338 /** Storage for struct rte_flow_action_rss including external data. */
339 struct action_rss_data {
340 struct rte_flow_action_rss conf;
341 uint8_t key[RSS_HASH_KEY_LENGTH];
342 uint16_t queue[ACTION_RSS_QUEUE_NUM];
345 /** Maximum data size in struct rte_flow_action_raw_encap. */
346 #define ACTION_RAW_ENCAP_MAX_DATA 128
347 #define RAW_ENCAP_CONFS_MAX_NUM 8
349 /** Storage for struct rte_flow_action_raw_encap. */
350 struct raw_encap_conf {
351 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
352 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
356 struct raw_encap_conf raw_encap_confs[RAW_ENCAP_CONFS_MAX_NUM];
358 /** Storage for struct rte_flow_action_raw_encap including external data. */
359 struct action_raw_encap_data {
360 struct rte_flow_action_raw_encap conf;
361 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
362 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
366 /** Storage for struct rte_flow_action_raw_decap. */
367 struct raw_decap_conf {
368 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
372 struct raw_decap_conf raw_decap_confs[RAW_ENCAP_CONFS_MAX_NUM];
374 /** Storage for struct rte_flow_action_raw_decap including external data. */
375 struct action_raw_decap_data {
376 struct rte_flow_action_raw_decap conf;
377 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
381 struct vxlan_encap_conf vxlan_encap_conf = {
385 .vni = "\x00\x00\x00",
387 .udp_dst = RTE_BE16(4789),
388 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
389 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
390 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
391 "\x00\x00\x00\x00\x00\x00\x00\x01",
392 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
393 "\x00\x00\x00\x00\x00\x00\x11\x11",
397 .eth_src = "\x00\x00\x00\x00\x00\x00",
398 .eth_dst = "\xff\xff\xff\xff\xff\xff",
401 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
402 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
404 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
405 struct action_vxlan_encap_data {
406 struct rte_flow_action_vxlan_encap conf;
407 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
408 struct rte_flow_item_eth item_eth;
409 struct rte_flow_item_vlan item_vlan;
411 struct rte_flow_item_ipv4 item_ipv4;
412 struct rte_flow_item_ipv6 item_ipv6;
414 struct rte_flow_item_udp item_udp;
415 struct rte_flow_item_vxlan item_vxlan;
418 struct nvgre_encap_conf nvgre_encap_conf = {
421 .tni = "\x00\x00\x00",
422 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
423 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
424 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
425 "\x00\x00\x00\x00\x00\x00\x00\x01",
426 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
427 "\x00\x00\x00\x00\x00\x00\x11\x11",
429 .eth_src = "\x00\x00\x00\x00\x00\x00",
430 .eth_dst = "\xff\xff\xff\xff\xff\xff",
433 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
434 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
436 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
437 struct action_nvgre_encap_data {
438 struct rte_flow_action_nvgre_encap conf;
439 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
440 struct rte_flow_item_eth item_eth;
441 struct rte_flow_item_vlan item_vlan;
443 struct rte_flow_item_ipv4 item_ipv4;
444 struct rte_flow_item_ipv6 item_ipv6;
446 struct rte_flow_item_nvgre item_nvgre;
449 struct l2_encap_conf l2_encap_conf;
451 struct l2_decap_conf l2_decap_conf;
453 struct mplsogre_encap_conf mplsogre_encap_conf;
455 struct mplsogre_decap_conf mplsogre_decap_conf;
457 struct mplsoudp_encap_conf mplsoudp_encap_conf;
459 struct mplsoudp_decap_conf mplsoudp_decap_conf;
461 /** Maximum number of subsequent tokens and arguments on the stack. */
462 #define CTX_STACK_SIZE 16
464 /** Parser context. */
466 /** Stack of subsequent token lists to process. */
467 const enum index *next[CTX_STACK_SIZE];
468 /** Arguments for stacked tokens. */
469 const void *args[CTX_STACK_SIZE];
470 enum index curr; /**< Current token index. */
471 enum index prev; /**< Index of the last token seen. */
472 int next_num; /**< Number of entries in next[]. */
473 int args_num; /**< Number of entries in args[]. */
474 uint32_t eol:1; /**< EOL has been detected. */
475 uint32_t last:1; /**< No more arguments. */
476 portid_t port; /**< Current port ID (for completions). */
477 uint32_t objdata; /**< Object-specific data. */
478 void *object; /**< Address of current object for relative offsets. */
479 void *objmask; /**< Object a full mask must be written to. */
482 /** Token argument. */
484 uint32_t hton:1; /**< Use network byte ordering. */
485 uint32_t sign:1; /**< Value is signed. */
486 uint32_t bounded:1; /**< Value is bounded. */
487 uintmax_t min; /**< Minimum value if bounded. */
488 uintmax_t max; /**< Maximum value if bounded. */
489 uint32_t offset; /**< Relative offset from ctx->object. */
490 uint32_t size; /**< Field size. */
491 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
494 /** Parser token definition. */
496 /** Type displayed during completion (defaults to "TOKEN"). */
498 /** Help displayed during completion (defaults to token name). */
500 /** Private data used by parser functions. */
503 * Lists of subsequent tokens to push on the stack. Each call to the
504 * parser consumes the last entry of that stack.
506 const enum index *const *next;
507 /** Arguments stack for subsequent tokens that need them. */
508 const struct arg *const *args;
510 * Token-processing callback, returns -1 in case of error, the
511 * length of the matched string otherwise. If NULL, attempts to
512 * match the token name.
514 * If buf is not NULL, the result should be stored in it according
515 * to context. An error is returned if not large enough.
517 int (*call)(struct context *ctx, const struct token *token,
518 const char *str, unsigned int len,
519 void *buf, unsigned int size);
521 * Callback that provides possible values for this token, used for
522 * completion. Returns -1 in case of error, the number of possible
523 * values otherwise. If NULL, the token name is used.
525 * If buf is not NULL, entry index ent is written to buf and the
526 * full length of the entry is returned (same behavior as
529 int (*comp)(struct context *ctx, const struct token *token,
530 unsigned int ent, char *buf, unsigned int size);
531 /** Mandatory token name, no default value. */
535 /** Static initializer for the next field. */
536 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
538 /** Static initializer for a NEXT() entry. */
539 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
541 /** Static initializer for the args field. */
542 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
544 /** Static initializer for ARGS() to target a field. */
545 #define ARGS_ENTRY(s, f) \
546 (&(const struct arg){ \
547 .offset = offsetof(s, f), \
548 .size = sizeof(((s *)0)->f), \
551 /** Static initializer for ARGS() to target a bit-field. */
552 #define ARGS_ENTRY_BF(s, f, b) \
553 (&(const struct arg){ \
555 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
558 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
559 #define ARGS_ENTRY_MASK(s, f, m) \
560 (&(const struct arg){ \
561 .offset = offsetof(s, f), \
562 .size = sizeof(((s *)0)->f), \
563 .mask = (const void *)(m), \
566 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
567 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
568 (&(const struct arg){ \
570 .offset = offsetof(s, f), \
571 .size = sizeof(((s *)0)->f), \
572 .mask = (const void *)(m), \
575 /** Static initializer for ARGS() to target a pointer. */
576 #define ARGS_ENTRY_PTR(s, f) \
577 (&(const struct arg){ \
578 .size = sizeof(*((s *)0)->f), \
581 /** Static initializer for ARGS() with arbitrary offset and size. */
582 #define ARGS_ENTRY_ARB(o, s) \
583 (&(const struct arg){ \
588 /** Same as ARGS_ENTRY_ARB() with bounded values. */
589 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
590 (&(const struct arg){ \
598 /** Same as ARGS_ENTRY() using network byte ordering. */
599 #define ARGS_ENTRY_HTON(s, f) \
600 (&(const struct arg){ \
602 .offset = offsetof(s, f), \
603 .size = sizeof(((s *)0)->f), \
606 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
607 #define ARG_ENTRY_HTON(s) \
608 (&(const struct arg){ \
614 /** Parser output buffer layout expected by cmd_flow_parsed(). */
616 enum index command; /**< Flow command. */
617 portid_t port; /**< Affected port ID. */
620 struct rte_flow_attr attr;
621 struct rte_flow_item *pattern;
622 struct rte_flow_action *actions;
626 } vc; /**< Validate/create arguments. */
630 } destroy; /**< Destroy arguments. */
633 struct rte_flow_action action;
634 } query; /**< Query arguments. */
638 } list; /**< List arguments. */
641 } isolate; /**< Isolated mode arguments. */
642 } args; /**< Command arguments. */
645 /** Private data for pattern items. */
646 struct parse_item_priv {
647 enum rte_flow_item_type type; /**< Item type. */
648 uint32_t size; /**< Size of item specification structure. */
651 #define PRIV_ITEM(t, s) \
652 (&(const struct parse_item_priv){ \
653 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
657 /** Private data for actions. */
658 struct parse_action_priv {
659 enum rte_flow_action_type type; /**< Action type. */
660 uint32_t size; /**< Size of action configuration structure. */
663 #define PRIV_ACTION(t, s) \
664 (&(const struct parse_action_priv){ \
665 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
669 static const enum index next_vc_attr[] = {
679 static const enum index next_destroy_attr[] = {
685 static const enum index next_list_attr[] = {
691 static const enum index item_param[] = {
700 static const enum index next_item[] = {
736 ITEM_ICMP6_ND_OPT_SLA_ETH,
737 ITEM_ICMP6_ND_OPT_TLA_ETH,
750 static const enum index item_fuzzy[] = {
756 static const enum index item_any[] = {
762 static const enum index item_vf[] = {
768 static const enum index item_phy_port[] = {
774 static const enum index item_port_id[] = {
780 static const enum index item_mark[] = {
786 static const enum index item_raw[] = {
796 static const enum index item_eth[] = {
804 static const enum index item_vlan[] = {
809 ITEM_VLAN_INNER_TYPE,
814 static const enum index item_ipv4[] = {
824 static const enum index item_ipv6[] = {
835 static const enum index item_icmp[] = {
842 static const enum index item_udp[] = {
849 static const enum index item_tcp[] = {
857 static const enum index item_sctp[] = {
866 static const enum index item_vxlan[] = {
872 static const enum index item_e_tag[] = {
873 ITEM_E_TAG_GRP_ECID_B,
878 static const enum index item_nvgre[] = {
884 static const enum index item_mpls[] = {
892 static const enum index item_gre[] = {
894 ITEM_GRE_C_RSVD0_VER,
902 static const enum index item_gre_key[] = {
908 static const enum index item_gtp[] = {
914 static const enum index item_geneve[] = {
921 static const enum index item_vxlan_gpe[] = {
927 static const enum index item_arp_eth_ipv4[] = {
928 ITEM_ARP_ETH_IPV4_SHA,
929 ITEM_ARP_ETH_IPV4_SPA,
930 ITEM_ARP_ETH_IPV4_THA,
931 ITEM_ARP_ETH_IPV4_TPA,
936 static const enum index item_ipv6_ext[] = {
937 ITEM_IPV6_EXT_NEXT_HDR,
942 static const enum index item_icmp6[] = {
949 static const enum index item_icmp6_nd_ns[] = {
950 ITEM_ICMP6_ND_NS_TARGET_ADDR,
955 static const enum index item_icmp6_nd_na[] = {
956 ITEM_ICMP6_ND_NA_TARGET_ADDR,
961 static const enum index item_icmp6_nd_opt[] = {
962 ITEM_ICMP6_ND_OPT_TYPE,
967 static const enum index item_icmp6_nd_opt_sla_eth[] = {
968 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
973 static const enum index item_icmp6_nd_opt_tla_eth[] = {
974 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
979 static const enum index item_meta[] = {
985 static const enum index item_gtp_psc[] = {
992 static const enum index item_pppoed[] = {
998 static const enum index item_pppoes[] = {
1004 static const enum index item_pppoe_proto_id[] = {
1005 ITEM_PPPOE_PROTO_ID,
1010 static const enum index item_higig2[] = {
1011 ITEM_HIGIG2_CLASSIFICATION,
1017 static const enum index next_set_raw[] = {
1023 static const enum index item_tag[] = {
1030 static const enum index next_action[] = {
1046 ACTION_OF_SET_MPLS_TTL,
1047 ACTION_OF_DEC_MPLS_TTL,
1048 ACTION_OF_SET_NW_TTL,
1049 ACTION_OF_DEC_NW_TTL,
1050 ACTION_OF_COPY_TTL_OUT,
1051 ACTION_OF_COPY_TTL_IN,
1053 ACTION_OF_PUSH_VLAN,
1054 ACTION_OF_SET_VLAN_VID,
1055 ACTION_OF_SET_VLAN_PCP,
1057 ACTION_OF_PUSH_MPLS,
1064 ACTION_MPLSOGRE_ENCAP,
1065 ACTION_MPLSOGRE_DECAP,
1066 ACTION_MPLSOUDP_ENCAP,
1067 ACTION_MPLSOUDP_DECAP,
1068 ACTION_SET_IPV4_SRC,
1069 ACTION_SET_IPV4_DST,
1070 ACTION_SET_IPV6_SRC,
1071 ACTION_SET_IPV6_DST,
1089 static const enum index action_mark[] = {
1095 static const enum index action_queue[] = {
1101 static const enum index action_count[] = {
1103 ACTION_COUNT_SHARED,
1108 static const enum index action_rss[] = {
1119 static const enum index action_vf[] = {
1126 static const enum index action_phy_port[] = {
1127 ACTION_PHY_PORT_ORIGINAL,
1128 ACTION_PHY_PORT_INDEX,
1133 static const enum index action_port_id[] = {
1134 ACTION_PORT_ID_ORIGINAL,
1140 static const enum index action_meter[] = {
1146 static const enum index action_of_set_mpls_ttl[] = {
1147 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1152 static const enum index action_of_set_nw_ttl[] = {
1153 ACTION_OF_SET_NW_TTL_NW_TTL,
1158 static const enum index action_of_push_vlan[] = {
1159 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1164 static const enum index action_of_set_vlan_vid[] = {
1165 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1170 static const enum index action_of_set_vlan_pcp[] = {
1171 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1176 static const enum index action_of_pop_mpls[] = {
1177 ACTION_OF_POP_MPLS_ETHERTYPE,
1182 static const enum index action_of_push_mpls[] = {
1183 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1188 static const enum index action_set_ipv4_src[] = {
1189 ACTION_SET_IPV4_SRC_IPV4_SRC,
1194 static const enum index action_set_mac_src[] = {
1195 ACTION_SET_MAC_SRC_MAC_SRC,
1200 static const enum index action_set_ipv4_dst[] = {
1201 ACTION_SET_IPV4_DST_IPV4_DST,
1206 static const enum index action_set_ipv6_src[] = {
1207 ACTION_SET_IPV6_SRC_IPV6_SRC,
1212 static const enum index action_set_ipv6_dst[] = {
1213 ACTION_SET_IPV6_DST_IPV6_DST,
1218 static const enum index action_set_tp_src[] = {
1219 ACTION_SET_TP_SRC_TP_SRC,
1224 static const enum index action_set_tp_dst[] = {
1225 ACTION_SET_TP_DST_TP_DST,
1230 static const enum index action_set_ttl[] = {
1236 static const enum index action_jump[] = {
1242 static const enum index action_set_mac_dst[] = {
1243 ACTION_SET_MAC_DST_MAC_DST,
1248 static const enum index action_inc_tcp_seq[] = {
1249 ACTION_INC_TCP_SEQ_VALUE,
1254 static const enum index action_dec_tcp_seq[] = {
1255 ACTION_DEC_TCP_SEQ_VALUE,
1260 static const enum index action_inc_tcp_ack[] = {
1261 ACTION_INC_TCP_ACK_VALUE,
1266 static const enum index action_dec_tcp_ack[] = {
1267 ACTION_DEC_TCP_ACK_VALUE,
1272 static const enum index action_raw_encap[] = {
1273 ACTION_RAW_ENCAP_INDEX,
1278 static const enum index action_raw_decap[] = {
1279 ACTION_RAW_DECAP_INDEX,
1284 static const enum index action_set_tag[] = {
1285 ACTION_SET_TAG_DATA,
1286 ACTION_SET_TAG_INDEX,
1287 ACTION_SET_TAG_MASK,
1292 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1293 const char *, unsigned int,
1294 void *, unsigned int);
1295 static int parse_set_init(struct context *, const struct token *,
1296 const char *, unsigned int,
1297 void *, unsigned int);
1298 static int parse_init(struct context *, const struct token *,
1299 const char *, unsigned int,
1300 void *, unsigned int);
1301 static int parse_vc(struct context *, const struct token *,
1302 const char *, unsigned int,
1303 void *, unsigned int);
1304 static int parse_vc_spec(struct context *, const struct token *,
1305 const char *, unsigned int, void *, unsigned int);
1306 static int parse_vc_conf(struct context *, const struct token *,
1307 const char *, unsigned int, void *, unsigned int);
1308 static int parse_vc_action_rss(struct context *, const struct token *,
1309 const char *, unsigned int, void *,
1311 static int parse_vc_action_rss_func(struct context *, const struct token *,
1312 const char *, unsigned int, void *,
1314 static int parse_vc_action_rss_type(struct context *, const struct token *,
1315 const char *, unsigned int, void *,
1317 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1318 const char *, unsigned int, void *,
1320 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1321 const char *, unsigned int, void *,
1323 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1324 const char *, unsigned int, void *,
1326 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1327 const char *, unsigned int, void *,
1329 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1330 const char *, unsigned int, void *,
1332 static int parse_vc_action_mplsogre_encap(struct context *,
1333 const struct token *, const char *,
1334 unsigned int, void *, unsigned int);
1335 static int parse_vc_action_mplsogre_decap(struct context *,
1336 const struct token *, const char *,
1337 unsigned int, void *, unsigned int);
1338 static int parse_vc_action_mplsoudp_encap(struct context *,
1339 const struct token *, const char *,
1340 unsigned int, void *, unsigned int);
1341 static int parse_vc_action_mplsoudp_decap(struct context *,
1342 const struct token *, const char *,
1343 unsigned int, void *, unsigned int);
1344 static int parse_vc_action_raw_encap(struct context *,
1345 const struct token *, const char *,
1346 unsigned int, void *, unsigned int);
1347 static int parse_vc_action_raw_decap(struct context *,
1348 const struct token *, const char *,
1349 unsigned int, void *, unsigned int);
1350 static int parse_vc_action_raw_encap_index(struct context *,
1351 const struct token *, const char *,
1352 unsigned int, void *, unsigned int);
1353 static int parse_vc_action_raw_decap_index(struct context *,
1354 const struct token *, const char *,
1355 unsigned int, void *, unsigned int);
1356 static int parse_destroy(struct context *, const struct token *,
1357 const char *, unsigned int,
1358 void *, unsigned int);
1359 static int parse_flush(struct context *, const struct token *,
1360 const char *, unsigned int,
1361 void *, unsigned int);
1362 static int parse_query(struct context *, const struct token *,
1363 const char *, unsigned int,
1364 void *, unsigned int);
1365 static int parse_action(struct context *, const struct token *,
1366 const char *, unsigned int,
1367 void *, unsigned int);
1368 static int parse_list(struct context *, const struct token *,
1369 const char *, unsigned int,
1370 void *, unsigned int);
1371 static int parse_isolate(struct context *, const struct token *,
1372 const char *, unsigned int,
1373 void *, unsigned int);
1374 static int parse_int(struct context *, const struct token *,
1375 const char *, unsigned int,
1376 void *, unsigned int);
1377 static int parse_prefix(struct context *, const struct token *,
1378 const char *, unsigned int,
1379 void *, unsigned int);
1380 static int parse_boolean(struct context *, const struct token *,
1381 const char *, unsigned int,
1382 void *, unsigned int);
1383 static int parse_string(struct context *, const struct token *,
1384 const char *, unsigned int,
1385 void *, unsigned int);
1386 static int parse_hex(struct context *ctx, const struct token *token,
1387 const char *str, unsigned int len,
1388 void *buf, unsigned int size);
1389 static int parse_mac_addr(struct context *, const struct token *,
1390 const char *, unsigned int,
1391 void *, unsigned int);
1392 static int parse_ipv4_addr(struct context *, const struct token *,
1393 const char *, unsigned int,
1394 void *, unsigned int);
1395 static int parse_ipv6_addr(struct context *, const struct token *,
1396 const char *, unsigned int,
1397 void *, unsigned int);
1398 static int parse_port(struct context *, const struct token *,
1399 const char *, unsigned int,
1400 void *, unsigned int);
1401 static int comp_none(struct context *, const struct token *,
1402 unsigned int, char *, unsigned int);
1403 static int comp_boolean(struct context *, const struct token *,
1404 unsigned int, char *, unsigned int);
1405 static int comp_action(struct context *, const struct token *,
1406 unsigned int, char *, unsigned int);
1407 static int comp_port(struct context *, const struct token *,
1408 unsigned int, char *, unsigned int);
1409 static int comp_rule_id(struct context *, const struct token *,
1410 unsigned int, char *, unsigned int);
1411 static int comp_vc_action_rss_type(struct context *, const struct token *,
1412 unsigned int, char *, unsigned int);
1413 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1414 unsigned int, char *, unsigned int);
1415 static int comp_set_raw_index(struct context *, const struct token *,
1416 unsigned int, char *, unsigned int);
1418 /** Token definitions. */
1419 static const struct token token_list[] = {
1420 /* Special tokens. */
1423 .help = "null entry, abused as the entry point",
1424 .next = NEXT(NEXT_ENTRY(FLOW)),
1429 .help = "command may end here",
1432 .name = "START_SET",
1433 .help = "null entry, abused as the entry point for set",
1434 .next = NEXT(NEXT_ENTRY(SET)),
1439 .help = "set command may end here",
1441 /* Common tokens. */
1445 .help = "integer value",
1450 .name = "{unsigned}",
1452 .help = "unsigned integer value",
1459 .help = "prefix length for bit-mask",
1460 .call = parse_prefix,
1464 .name = "{boolean}",
1466 .help = "any boolean value",
1467 .call = parse_boolean,
1468 .comp = comp_boolean,
1473 .help = "fixed string",
1474 .call = parse_string,
1480 .help = "fixed string",
1485 .name = "{MAC address}",
1487 .help = "standard MAC address notation",
1488 .call = parse_mac_addr,
1492 .name = "{IPv4 address}",
1493 .type = "IPV4 ADDRESS",
1494 .help = "standard IPv4 address notation",
1495 .call = parse_ipv4_addr,
1499 .name = "{IPv6 address}",
1500 .type = "IPV6 ADDRESS",
1501 .help = "standard IPv6 address notation",
1502 .call = parse_ipv6_addr,
1506 .name = "{rule id}",
1508 .help = "rule identifier",
1510 .comp = comp_rule_id,
1513 .name = "{port_id}",
1515 .help = "port identifier",
1520 .name = "{group_id}",
1522 .help = "group identifier",
1526 [PRIORITY_LEVEL] = {
1529 .help = "priority level",
1533 /* Top-level command. */
1536 .type = "{command} {port_id} [{arg} [...]]",
1537 .help = "manage ingress/egress flow rules",
1538 .next = NEXT(NEXT_ENTRY
1548 /* Sub-level commands. */
1551 .help = "check whether a flow rule can be created",
1552 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1553 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1558 .help = "create a flow rule",
1559 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1560 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1565 .help = "destroy specific flow rules",
1566 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1567 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1568 .call = parse_destroy,
1572 .help = "destroy all flow rules",
1573 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1574 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1575 .call = parse_flush,
1579 .help = "query an existing flow rule",
1580 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1581 NEXT_ENTRY(RULE_ID),
1582 NEXT_ENTRY(PORT_ID)),
1583 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1584 ARGS_ENTRY(struct buffer, args.query.rule),
1585 ARGS_ENTRY(struct buffer, port)),
1586 .call = parse_query,
1590 .help = "list existing flow rules",
1591 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1592 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1597 .help = "restrict ingress traffic to the defined flow rules",
1598 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1599 NEXT_ENTRY(PORT_ID)),
1600 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1601 ARGS_ENTRY(struct buffer, port)),
1602 .call = parse_isolate,
1604 /* Destroy arguments. */
1607 .help = "specify a rule identifier",
1608 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1609 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1610 .call = parse_destroy,
1612 /* Query arguments. */
1616 .help = "action to query, must be part of the rule",
1617 .call = parse_action,
1618 .comp = comp_action,
1620 /* List arguments. */
1623 .help = "specify a group",
1624 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1625 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1628 /* Validate/create attributes. */
1631 .help = "specify a group",
1632 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1633 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1638 .help = "specify a priority level",
1639 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1640 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1645 .help = "affect rule to ingress",
1646 .next = NEXT(next_vc_attr),
1651 .help = "affect rule to egress",
1652 .next = NEXT(next_vc_attr),
1657 .help = "apply rule directly to endpoints found in pattern",
1658 .next = NEXT(next_vc_attr),
1661 /* Validate/create pattern. */
1664 .help = "submit a list of pattern items",
1665 .next = NEXT(next_item),
1670 .help = "match value perfectly (with full bit-mask)",
1671 .call = parse_vc_spec,
1673 [ITEM_PARAM_SPEC] = {
1675 .help = "match value according to configured bit-mask",
1676 .call = parse_vc_spec,
1678 [ITEM_PARAM_LAST] = {
1680 .help = "specify upper bound to establish a range",
1681 .call = parse_vc_spec,
1683 [ITEM_PARAM_MASK] = {
1685 .help = "specify bit-mask with relevant bits set to one",
1686 .call = parse_vc_spec,
1688 [ITEM_PARAM_PREFIX] = {
1690 .help = "generate bit-mask from a prefix length",
1691 .call = parse_vc_spec,
1695 .help = "specify next pattern item",
1696 .next = NEXT(next_item),
1700 .help = "end list of pattern items",
1701 .priv = PRIV_ITEM(END, 0),
1702 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1707 .help = "no-op pattern item",
1708 .priv = PRIV_ITEM(VOID, 0),
1709 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1714 .help = "perform actions when pattern does not match",
1715 .priv = PRIV_ITEM(INVERT, 0),
1716 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1721 .help = "match any protocol for the current layer",
1722 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1723 .next = NEXT(item_any),
1728 .help = "number of layers covered",
1729 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1730 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1734 .help = "match traffic from/to the physical function",
1735 .priv = PRIV_ITEM(PF, 0),
1736 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1741 .help = "match traffic from/to a virtual function ID",
1742 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1743 .next = NEXT(item_vf),
1749 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1750 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1754 .help = "match traffic from/to a specific physical port",
1755 .priv = PRIV_ITEM(PHY_PORT,
1756 sizeof(struct rte_flow_item_phy_port)),
1757 .next = NEXT(item_phy_port),
1760 [ITEM_PHY_PORT_INDEX] = {
1762 .help = "physical port index",
1763 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1764 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1768 .help = "match traffic from/to a given DPDK port ID",
1769 .priv = PRIV_ITEM(PORT_ID,
1770 sizeof(struct rte_flow_item_port_id)),
1771 .next = NEXT(item_port_id),
1774 [ITEM_PORT_ID_ID] = {
1776 .help = "DPDK port ID",
1777 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1778 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1782 .help = "match traffic against value set in previously matched rule",
1783 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1784 .next = NEXT(item_mark),
1789 .help = "Integer value to match against",
1790 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1791 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1795 .help = "match an arbitrary byte string",
1796 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1797 .next = NEXT(item_raw),
1800 [ITEM_RAW_RELATIVE] = {
1802 .help = "look for pattern after the previous item",
1803 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1804 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1807 [ITEM_RAW_SEARCH] = {
1809 .help = "search pattern from offset (see also limit)",
1810 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1811 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1814 [ITEM_RAW_OFFSET] = {
1816 .help = "absolute or relative offset for pattern",
1817 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1818 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1820 [ITEM_RAW_LIMIT] = {
1822 .help = "search area limit for start of pattern",
1823 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1824 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1826 [ITEM_RAW_PATTERN] = {
1828 .help = "byte string to look for",
1829 .next = NEXT(item_raw,
1831 NEXT_ENTRY(ITEM_PARAM_IS,
1834 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1835 ARGS_ENTRY(struct rte_flow_item_raw, length),
1836 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1837 ITEM_RAW_PATTERN_SIZE)),
1841 .help = "match Ethernet header",
1842 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1843 .next = NEXT(item_eth),
1848 .help = "destination MAC",
1849 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1850 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1854 .help = "source MAC",
1855 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1856 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1860 .help = "EtherType",
1861 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1862 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1866 .help = "match 802.1Q/ad VLAN tag",
1867 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1868 .next = NEXT(item_vlan),
1873 .help = "tag control information",
1874 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1875 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1879 .help = "priority code point",
1880 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1881 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1886 .help = "drop eligible indicator",
1887 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1888 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1893 .help = "VLAN identifier",
1894 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1895 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1898 [ITEM_VLAN_INNER_TYPE] = {
1899 .name = "inner_type",
1900 .help = "inner EtherType",
1901 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1902 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1907 .help = "match IPv4 header",
1908 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1909 .next = NEXT(item_ipv4),
1914 .help = "type of service",
1915 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1916 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1917 hdr.type_of_service)),
1921 .help = "time to live",
1922 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1923 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1926 [ITEM_IPV4_PROTO] = {
1928 .help = "next protocol ID",
1929 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1930 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1931 hdr.next_proto_id)),
1935 .help = "source address",
1936 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1937 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1942 .help = "destination address",
1943 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1944 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1949 .help = "match IPv6 header",
1950 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1951 .next = NEXT(item_ipv6),
1956 .help = "traffic class",
1957 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1958 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1960 "\x0f\xf0\x00\x00")),
1962 [ITEM_IPV6_FLOW] = {
1964 .help = "flow label",
1965 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1966 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1968 "\x00\x0f\xff\xff")),
1970 [ITEM_IPV6_PROTO] = {
1972 .help = "protocol (next header)",
1973 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1974 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1979 .help = "hop limit",
1980 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1981 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1986 .help = "source address",
1987 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1988 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1993 .help = "destination address",
1994 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1995 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
2000 .help = "match ICMP header",
2001 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
2002 .next = NEXT(item_icmp),
2005 [ITEM_ICMP_TYPE] = {
2007 .help = "ICMP packet type",
2008 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2009 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2012 [ITEM_ICMP_CODE] = {
2014 .help = "ICMP packet code",
2015 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
2016 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
2021 .help = "match UDP header",
2022 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
2023 .next = NEXT(item_udp),
2028 .help = "UDP source port",
2029 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2030 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2035 .help = "UDP destination port",
2036 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
2037 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
2042 .help = "match TCP header",
2043 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
2044 .next = NEXT(item_tcp),
2049 .help = "TCP source port",
2050 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2051 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2056 .help = "TCP destination port",
2057 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2058 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2061 [ITEM_TCP_FLAGS] = {
2063 .help = "TCP flags",
2064 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2065 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2070 .help = "match SCTP header",
2071 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2072 .next = NEXT(item_sctp),
2077 .help = "SCTP source port",
2078 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2079 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2084 .help = "SCTP destination port",
2085 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2086 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2091 .help = "validation tag",
2092 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2093 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2096 [ITEM_SCTP_CKSUM] = {
2099 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2100 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2105 .help = "match VXLAN header",
2106 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2107 .next = NEXT(item_vxlan),
2110 [ITEM_VXLAN_VNI] = {
2112 .help = "VXLAN identifier",
2113 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2114 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2118 .help = "match E-Tag header",
2119 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2120 .next = NEXT(item_e_tag),
2123 [ITEM_E_TAG_GRP_ECID_B] = {
2124 .name = "grp_ecid_b",
2125 .help = "GRP and E-CID base",
2126 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2127 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2133 .help = "match NVGRE header",
2134 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2135 .next = NEXT(item_nvgre),
2138 [ITEM_NVGRE_TNI] = {
2140 .help = "virtual subnet ID",
2141 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2142 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2146 .help = "match MPLS header",
2147 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2148 .next = NEXT(item_mpls),
2151 [ITEM_MPLS_LABEL] = {
2153 .help = "MPLS label",
2154 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2155 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2161 .help = "MPLS Traffic Class",
2162 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2163 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2169 .help = "MPLS Bottom-of-Stack",
2170 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2171 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2177 .help = "match GRE header",
2178 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2179 .next = NEXT(item_gre),
2182 [ITEM_GRE_PROTO] = {
2184 .help = "GRE protocol type",
2185 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2186 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2189 [ITEM_GRE_C_RSVD0_VER] = {
2190 .name = "c_rsvd0_ver",
2192 "checksum (1b), undefined (1b), key bit (1b),"
2193 " sequence number (1b), reserved 0 (9b),"
2195 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2196 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2199 [ITEM_GRE_C_BIT] = {
2201 .help = "checksum bit (C)",
2202 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2203 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2205 "\x80\x00\x00\x00")),
2207 [ITEM_GRE_S_BIT] = {
2209 .help = "sequence number bit (S)",
2210 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2211 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2213 "\x10\x00\x00\x00")),
2215 [ITEM_GRE_K_BIT] = {
2217 .help = "key bit (K)",
2218 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2219 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2221 "\x20\x00\x00\x00")),
2225 .help = "fuzzy pattern match, expect faster than default",
2226 .priv = PRIV_ITEM(FUZZY,
2227 sizeof(struct rte_flow_item_fuzzy)),
2228 .next = NEXT(item_fuzzy),
2231 [ITEM_FUZZY_THRESH] = {
2233 .help = "match accuracy threshold",
2234 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2235 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2240 .help = "match GTP header",
2241 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2242 .next = NEXT(item_gtp),
2247 .help = "tunnel endpoint identifier",
2248 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2249 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2253 .help = "match GTP header",
2254 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2255 .next = NEXT(item_gtp),
2260 .help = "match GTP header",
2261 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2262 .next = NEXT(item_gtp),
2267 .help = "match GENEVE header",
2268 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2269 .next = NEXT(item_geneve),
2272 [ITEM_GENEVE_VNI] = {
2274 .help = "virtual network identifier",
2275 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2276 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2278 [ITEM_GENEVE_PROTO] = {
2280 .help = "GENEVE protocol type",
2281 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2282 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2285 [ITEM_VXLAN_GPE] = {
2286 .name = "vxlan-gpe",
2287 .help = "match VXLAN-GPE header",
2288 .priv = PRIV_ITEM(VXLAN_GPE,
2289 sizeof(struct rte_flow_item_vxlan_gpe)),
2290 .next = NEXT(item_vxlan_gpe),
2293 [ITEM_VXLAN_GPE_VNI] = {
2295 .help = "VXLAN-GPE identifier",
2296 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2297 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2300 [ITEM_ARP_ETH_IPV4] = {
2301 .name = "arp_eth_ipv4",
2302 .help = "match ARP header for Ethernet/IPv4",
2303 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2304 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2305 .next = NEXT(item_arp_eth_ipv4),
2308 [ITEM_ARP_ETH_IPV4_SHA] = {
2310 .help = "sender hardware address",
2311 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2313 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2316 [ITEM_ARP_ETH_IPV4_SPA] = {
2318 .help = "sender IPv4 address",
2319 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2321 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2324 [ITEM_ARP_ETH_IPV4_THA] = {
2326 .help = "target hardware address",
2327 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2329 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2332 [ITEM_ARP_ETH_IPV4_TPA] = {
2334 .help = "target IPv4 address",
2335 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2337 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2342 .help = "match presence of any IPv6 extension header",
2343 .priv = PRIV_ITEM(IPV6_EXT,
2344 sizeof(struct rte_flow_item_ipv6_ext)),
2345 .next = NEXT(item_ipv6_ext),
2348 [ITEM_IPV6_EXT_NEXT_HDR] = {
2350 .help = "next header",
2351 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2352 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2357 .help = "match any ICMPv6 header",
2358 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2359 .next = NEXT(item_icmp6),
2362 [ITEM_ICMP6_TYPE] = {
2364 .help = "ICMPv6 type",
2365 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2366 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2369 [ITEM_ICMP6_CODE] = {
2371 .help = "ICMPv6 code",
2372 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2373 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2376 [ITEM_ICMP6_ND_NS] = {
2377 .name = "icmp6_nd_ns",
2378 .help = "match ICMPv6 neighbor discovery solicitation",
2379 .priv = PRIV_ITEM(ICMP6_ND_NS,
2380 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2381 .next = NEXT(item_icmp6_nd_ns),
2384 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2385 .name = "target_addr",
2386 .help = "target address",
2387 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2389 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2392 [ITEM_ICMP6_ND_NA] = {
2393 .name = "icmp6_nd_na",
2394 .help = "match ICMPv6 neighbor discovery advertisement",
2395 .priv = PRIV_ITEM(ICMP6_ND_NA,
2396 sizeof(struct rte_flow_item_icmp6_nd_na)),
2397 .next = NEXT(item_icmp6_nd_na),
2400 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2401 .name = "target_addr",
2402 .help = "target address",
2403 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2405 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2408 [ITEM_ICMP6_ND_OPT] = {
2409 .name = "icmp6_nd_opt",
2410 .help = "match presence of any ICMPv6 neighbor discovery"
2412 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2413 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2414 .next = NEXT(item_icmp6_nd_opt),
2417 [ITEM_ICMP6_ND_OPT_TYPE] = {
2419 .help = "ND option type",
2420 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2422 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2425 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2426 .name = "icmp6_nd_opt_sla_eth",
2427 .help = "match ICMPv6 neighbor discovery source Ethernet"
2428 " link-layer address option",
2430 (ICMP6_ND_OPT_SLA_ETH,
2431 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2432 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2435 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2437 .help = "source Ethernet LLA",
2438 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2440 .args = ARGS(ARGS_ENTRY_HTON
2441 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2443 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2444 .name = "icmp6_nd_opt_tla_eth",
2445 .help = "match ICMPv6 neighbor discovery target Ethernet"
2446 " link-layer address option",
2448 (ICMP6_ND_OPT_TLA_ETH,
2449 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2450 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2453 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2455 .help = "target Ethernet LLA",
2456 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2458 .args = ARGS(ARGS_ENTRY_HTON
2459 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2463 .help = "match metadata header",
2464 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2465 .next = NEXT(item_meta),
2468 [ITEM_META_DATA] = {
2470 .help = "metadata value",
2471 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2472 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2473 data, "\xff\xff\xff\xff")),
2477 .help = "match GRE key",
2478 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2479 .next = NEXT(item_gre_key),
2482 [ITEM_GRE_KEY_VALUE] = {
2484 .help = "key value",
2485 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2486 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2490 .help = "match GTP extension header with type 0x85",
2491 .priv = PRIV_ITEM(GTP_PSC,
2492 sizeof(struct rte_flow_item_gtp_psc)),
2493 .next = NEXT(item_gtp_psc),
2496 [ITEM_GTP_PSC_QFI] = {
2498 .help = "QoS flow identifier",
2499 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2500 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2503 [ITEM_GTP_PSC_PDU_T] = {
2506 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2507 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2512 .help = "match PPPoE session header",
2513 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2514 .next = NEXT(item_pppoes),
2519 .help = "match PPPoE discovery header",
2520 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2521 .next = NEXT(item_pppoed),
2524 [ITEM_PPPOE_SEID] = {
2526 .help = "session identifier",
2527 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2528 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2531 [ITEM_PPPOE_PROTO_ID] = {
2533 .help = "match PPPoE session protocol identifier",
2534 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2535 sizeof(struct rte_flow_item_pppoe_proto_id)),
2536 .next = NEXT(item_pppoe_proto_id),
2541 .help = "matches higig2 header",
2542 .priv = PRIV_ITEM(HIGIG2,
2543 sizeof(struct rte_flow_item_higig2_hdr)),
2544 .next = NEXT(item_higig2),
2547 [ITEM_HIGIG2_CLASSIFICATION] = {
2548 .name = "classification",
2549 .help = "matches classification of higig2 header",
2550 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2551 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2552 hdr.ppt1.classification)),
2554 [ITEM_HIGIG2_VID] = {
2556 .help = "matches vid of higig2 header",
2557 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2558 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2563 .help = "match tag value",
2564 .priv = PRIV_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
2565 .next = NEXT(item_tag),
2570 .help = "tag value to match",
2571 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED), item_param),
2572 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tag, data)),
2574 [ITEM_TAG_INDEX] = {
2576 .help = "index of tag array to match",
2577 .next = NEXT(item_tag, NEXT_ENTRY(UNSIGNED),
2578 NEXT_ENTRY(ITEM_PARAM_IS)),
2579 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_tag, index)),
2581 /* Validate/create actions. */
2584 .help = "submit a list of associated actions",
2585 .next = NEXT(next_action),
2590 .help = "specify next action",
2591 .next = NEXT(next_action),
2595 .help = "end list of actions",
2596 .priv = PRIV_ACTION(END, 0),
2601 .help = "no-op action",
2602 .priv = PRIV_ACTION(VOID, 0),
2603 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2606 [ACTION_PASSTHRU] = {
2608 .help = "let subsequent rule process matched packets",
2609 .priv = PRIV_ACTION(PASSTHRU, 0),
2610 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2615 .help = "redirect traffic to a given group",
2616 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2617 .next = NEXT(action_jump),
2620 [ACTION_JUMP_GROUP] = {
2622 .help = "group to redirect traffic to",
2623 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2624 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2625 .call = parse_vc_conf,
2629 .help = "attach 32 bit value to packets",
2630 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2631 .next = NEXT(action_mark),
2634 [ACTION_MARK_ID] = {
2636 .help = "32 bit value to return with packets",
2637 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2638 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2639 .call = parse_vc_conf,
2643 .help = "flag packets",
2644 .priv = PRIV_ACTION(FLAG, 0),
2645 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2650 .help = "assign packets to a given queue index",
2651 .priv = PRIV_ACTION(QUEUE,
2652 sizeof(struct rte_flow_action_queue)),
2653 .next = NEXT(action_queue),
2656 [ACTION_QUEUE_INDEX] = {
2658 .help = "queue index to use",
2659 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2660 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2661 .call = parse_vc_conf,
2665 .help = "drop packets (note: passthru has priority)",
2666 .priv = PRIV_ACTION(DROP, 0),
2667 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2672 .help = "enable counters for this rule",
2673 .priv = PRIV_ACTION(COUNT,
2674 sizeof(struct rte_flow_action_count)),
2675 .next = NEXT(action_count),
2678 [ACTION_COUNT_ID] = {
2679 .name = "identifier",
2680 .help = "counter identifier to use",
2681 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2682 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2683 .call = parse_vc_conf,
2685 [ACTION_COUNT_SHARED] = {
2687 .help = "shared counter",
2688 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2689 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2691 .call = parse_vc_conf,
2695 .help = "spread packets among several queues",
2696 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2697 .next = NEXT(action_rss),
2698 .call = parse_vc_action_rss,
2700 [ACTION_RSS_FUNC] = {
2702 .help = "RSS hash function to apply",
2703 .next = NEXT(action_rss,
2704 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2705 ACTION_RSS_FUNC_TOEPLITZ,
2706 ACTION_RSS_FUNC_SIMPLE_XOR,
2707 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2709 [ACTION_RSS_FUNC_DEFAULT] = {
2711 .help = "default hash function",
2712 .call = parse_vc_action_rss_func,
2714 [ACTION_RSS_FUNC_TOEPLITZ] = {
2716 .help = "Toeplitz hash function",
2717 .call = parse_vc_action_rss_func,
2719 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2720 .name = "simple_xor",
2721 .help = "simple XOR hash function",
2722 .call = parse_vc_action_rss_func,
2724 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2725 .name = "symmetric_toeplitz",
2726 .help = "Symmetric Toeplitz hash function",
2727 .call = parse_vc_action_rss_func,
2729 [ACTION_RSS_LEVEL] = {
2731 .help = "encapsulation level for \"types\"",
2732 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2733 .args = ARGS(ARGS_ENTRY_ARB
2734 (offsetof(struct action_rss_data, conf) +
2735 offsetof(struct rte_flow_action_rss, level),
2736 sizeof(((struct rte_flow_action_rss *)0)->
2739 [ACTION_RSS_TYPES] = {
2741 .help = "specific RSS hash types",
2742 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2744 [ACTION_RSS_TYPE] = {
2746 .help = "RSS hash type",
2747 .call = parse_vc_action_rss_type,
2748 .comp = comp_vc_action_rss_type,
2750 [ACTION_RSS_KEY] = {
2752 .help = "RSS hash key",
2753 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2754 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2756 (offsetof(struct action_rss_data, conf) +
2757 offsetof(struct rte_flow_action_rss, key_len),
2758 sizeof(((struct rte_flow_action_rss *)0)->
2760 ARGS_ENTRY(struct action_rss_data, key)),
2762 [ACTION_RSS_KEY_LEN] = {
2764 .help = "RSS hash key length in bytes",
2765 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2766 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2767 (offsetof(struct action_rss_data, conf) +
2768 offsetof(struct rte_flow_action_rss, key_len),
2769 sizeof(((struct rte_flow_action_rss *)0)->
2772 RSS_HASH_KEY_LENGTH)),
2774 [ACTION_RSS_QUEUES] = {
2776 .help = "queue indices to use",
2777 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2778 .call = parse_vc_conf,
2780 [ACTION_RSS_QUEUE] = {
2782 .help = "queue index",
2783 .call = parse_vc_action_rss_queue,
2784 .comp = comp_vc_action_rss_queue,
2788 .help = "direct traffic to physical function",
2789 .priv = PRIV_ACTION(PF, 0),
2790 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2795 .help = "direct traffic to a virtual function ID",
2796 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2797 .next = NEXT(action_vf),
2800 [ACTION_VF_ORIGINAL] = {
2802 .help = "use original VF ID if possible",
2803 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2804 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2806 .call = parse_vc_conf,
2811 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2812 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2813 .call = parse_vc_conf,
2815 [ACTION_PHY_PORT] = {
2817 .help = "direct packets to physical port index",
2818 .priv = PRIV_ACTION(PHY_PORT,
2819 sizeof(struct rte_flow_action_phy_port)),
2820 .next = NEXT(action_phy_port),
2823 [ACTION_PHY_PORT_ORIGINAL] = {
2825 .help = "use original port index if possible",
2826 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2827 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2829 .call = parse_vc_conf,
2831 [ACTION_PHY_PORT_INDEX] = {
2833 .help = "physical port index",
2834 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2835 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2837 .call = parse_vc_conf,
2839 [ACTION_PORT_ID] = {
2841 .help = "direct matching traffic to a given DPDK port ID",
2842 .priv = PRIV_ACTION(PORT_ID,
2843 sizeof(struct rte_flow_action_port_id)),
2844 .next = NEXT(action_port_id),
2847 [ACTION_PORT_ID_ORIGINAL] = {
2849 .help = "use original DPDK port ID if possible",
2850 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2851 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2853 .call = parse_vc_conf,
2855 [ACTION_PORT_ID_ID] = {
2857 .help = "DPDK port ID",
2858 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2859 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2860 .call = parse_vc_conf,
2864 .help = "meter the directed packets at given id",
2865 .priv = PRIV_ACTION(METER,
2866 sizeof(struct rte_flow_action_meter)),
2867 .next = NEXT(action_meter),
2870 [ACTION_METER_ID] = {
2872 .help = "meter id to use",
2873 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2874 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2875 .call = parse_vc_conf,
2877 [ACTION_OF_SET_MPLS_TTL] = {
2878 .name = "of_set_mpls_ttl",
2879 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2882 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2883 .next = NEXT(action_of_set_mpls_ttl),
2886 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2889 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2890 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2892 .call = parse_vc_conf,
2894 [ACTION_OF_DEC_MPLS_TTL] = {
2895 .name = "of_dec_mpls_ttl",
2896 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2897 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2898 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2901 [ACTION_OF_SET_NW_TTL] = {
2902 .name = "of_set_nw_ttl",
2903 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2906 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2907 .next = NEXT(action_of_set_nw_ttl),
2910 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2913 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2914 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2916 .call = parse_vc_conf,
2918 [ACTION_OF_DEC_NW_TTL] = {
2919 .name = "of_dec_nw_ttl",
2920 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2921 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2922 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2925 [ACTION_OF_COPY_TTL_OUT] = {
2926 .name = "of_copy_ttl_out",
2927 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2928 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2929 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2932 [ACTION_OF_COPY_TTL_IN] = {
2933 .name = "of_copy_ttl_in",
2934 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2935 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2936 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2939 [ACTION_OF_POP_VLAN] = {
2940 .name = "of_pop_vlan",
2941 .help = "OpenFlow's OFPAT_POP_VLAN",
2942 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2943 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2946 [ACTION_OF_PUSH_VLAN] = {
2947 .name = "of_push_vlan",
2948 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2951 sizeof(struct rte_flow_action_of_push_vlan)),
2952 .next = NEXT(action_of_push_vlan),
2955 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2956 .name = "ethertype",
2957 .help = "EtherType",
2958 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2959 .args = ARGS(ARGS_ENTRY_HTON
2960 (struct rte_flow_action_of_push_vlan,
2962 .call = parse_vc_conf,
2964 [ACTION_OF_SET_VLAN_VID] = {
2965 .name = "of_set_vlan_vid",
2966 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2969 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2970 .next = NEXT(action_of_set_vlan_vid),
2973 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2976 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2977 .args = ARGS(ARGS_ENTRY_HTON
2978 (struct rte_flow_action_of_set_vlan_vid,
2980 .call = parse_vc_conf,
2982 [ACTION_OF_SET_VLAN_PCP] = {
2983 .name = "of_set_vlan_pcp",
2984 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2987 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2988 .next = NEXT(action_of_set_vlan_pcp),
2991 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2993 .help = "VLAN priority",
2994 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2995 .args = ARGS(ARGS_ENTRY_HTON
2996 (struct rte_flow_action_of_set_vlan_pcp,
2998 .call = parse_vc_conf,
3000 [ACTION_OF_POP_MPLS] = {
3001 .name = "of_pop_mpls",
3002 .help = "OpenFlow's OFPAT_POP_MPLS",
3003 .priv = PRIV_ACTION(OF_POP_MPLS,
3004 sizeof(struct rte_flow_action_of_pop_mpls)),
3005 .next = NEXT(action_of_pop_mpls),
3008 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
3009 .name = "ethertype",
3010 .help = "EtherType",
3011 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
3012 .args = ARGS(ARGS_ENTRY_HTON
3013 (struct rte_flow_action_of_pop_mpls,
3015 .call = parse_vc_conf,
3017 [ACTION_OF_PUSH_MPLS] = {
3018 .name = "of_push_mpls",
3019 .help = "OpenFlow's OFPAT_PUSH_MPLS",
3022 sizeof(struct rte_flow_action_of_push_mpls)),
3023 .next = NEXT(action_of_push_mpls),
3026 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
3027 .name = "ethertype",
3028 .help = "EtherType",
3029 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
3030 .args = ARGS(ARGS_ENTRY_HTON
3031 (struct rte_flow_action_of_push_mpls,
3033 .call = parse_vc_conf,
3035 [ACTION_VXLAN_ENCAP] = {
3036 .name = "vxlan_encap",
3037 .help = "VXLAN encapsulation, uses configuration set by \"set"
3039 .priv = PRIV_ACTION(VXLAN_ENCAP,
3040 sizeof(struct action_vxlan_encap_data)),
3041 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3042 .call = parse_vc_action_vxlan_encap,
3044 [ACTION_VXLAN_DECAP] = {
3045 .name = "vxlan_decap",
3046 .help = "Performs a decapsulation action by stripping all"
3047 " headers of the VXLAN tunnel network overlay from the"
3049 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
3050 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3053 [ACTION_NVGRE_ENCAP] = {
3054 .name = "nvgre_encap",
3055 .help = "NVGRE encapsulation, uses configuration set by \"set"
3057 .priv = PRIV_ACTION(NVGRE_ENCAP,
3058 sizeof(struct action_nvgre_encap_data)),
3059 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3060 .call = parse_vc_action_nvgre_encap,
3062 [ACTION_NVGRE_DECAP] = {
3063 .name = "nvgre_decap",
3064 .help = "Performs a decapsulation action by stripping all"
3065 " headers of the NVGRE tunnel network overlay from the"
3067 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
3068 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3071 [ACTION_L2_ENCAP] = {
3073 .help = "l2 encap, uses configuration set by"
3074 " \"set l2_encap\"",
3075 .priv = PRIV_ACTION(RAW_ENCAP,
3076 sizeof(struct action_raw_encap_data)),
3077 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3078 .call = parse_vc_action_l2_encap,
3080 [ACTION_L2_DECAP] = {
3082 .help = "l2 decap, uses configuration set by"
3083 " \"set l2_decap\"",
3084 .priv = PRIV_ACTION(RAW_DECAP,
3085 sizeof(struct action_raw_decap_data)),
3086 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3087 .call = parse_vc_action_l2_decap,
3089 [ACTION_MPLSOGRE_ENCAP] = {
3090 .name = "mplsogre_encap",
3091 .help = "mplsogre encapsulation, uses configuration set by"
3092 " \"set mplsogre_encap\"",
3093 .priv = PRIV_ACTION(RAW_ENCAP,
3094 sizeof(struct action_raw_encap_data)),
3095 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3096 .call = parse_vc_action_mplsogre_encap,
3098 [ACTION_MPLSOGRE_DECAP] = {
3099 .name = "mplsogre_decap",
3100 .help = "mplsogre decapsulation, uses configuration set by"
3101 " \"set mplsogre_decap\"",
3102 .priv = PRIV_ACTION(RAW_DECAP,
3103 sizeof(struct action_raw_decap_data)),
3104 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3105 .call = parse_vc_action_mplsogre_decap,
3107 [ACTION_MPLSOUDP_ENCAP] = {
3108 .name = "mplsoudp_encap",
3109 .help = "mplsoudp encapsulation, uses configuration set by"
3110 " \"set mplsoudp_encap\"",
3111 .priv = PRIV_ACTION(RAW_ENCAP,
3112 sizeof(struct action_raw_encap_data)),
3113 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3114 .call = parse_vc_action_mplsoudp_encap,
3116 [ACTION_MPLSOUDP_DECAP] = {
3117 .name = "mplsoudp_decap",
3118 .help = "mplsoudp decapsulation, uses configuration set by"
3119 " \"set mplsoudp_decap\"",
3120 .priv = PRIV_ACTION(RAW_DECAP,
3121 sizeof(struct action_raw_decap_data)),
3122 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3123 .call = parse_vc_action_mplsoudp_decap,
3125 [ACTION_SET_IPV4_SRC] = {
3126 .name = "set_ipv4_src",
3127 .help = "Set a new IPv4 source address in the outermost"
3129 .priv = PRIV_ACTION(SET_IPV4_SRC,
3130 sizeof(struct rte_flow_action_set_ipv4)),
3131 .next = NEXT(action_set_ipv4_src),
3134 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3135 .name = "ipv4_addr",
3136 .help = "new IPv4 source address to set",
3137 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3138 .args = ARGS(ARGS_ENTRY_HTON
3139 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3140 .call = parse_vc_conf,
3142 [ACTION_SET_IPV4_DST] = {
3143 .name = "set_ipv4_dst",
3144 .help = "Set a new IPv4 destination address in the outermost"
3146 .priv = PRIV_ACTION(SET_IPV4_DST,
3147 sizeof(struct rte_flow_action_set_ipv4)),
3148 .next = NEXT(action_set_ipv4_dst),
3151 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3152 .name = "ipv4_addr",
3153 .help = "new IPv4 destination address to set",
3154 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3155 .args = ARGS(ARGS_ENTRY_HTON
3156 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3157 .call = parse_vc_conf,
3159 [ACTION_SET_IPV6_SRC] = {
3160 .name = "set_ipv6_src",
3161 .help = "Set a new IPv6 source address in the outermost"
3163 .priv = PRIV_ACTION(SET_IPV6_SRC,
3164 sizeof(struct rte_flow_action_set_ipv6)),
3165 .next = NEXT(action_set_ipv6_src),
3168 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3169 .name = "ipv6_addr",
3170 .help = "new IPv6 source address to set",
3171 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3172 .args = ARGS(ARGS_ENTRY_HTON
3173 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3174 .call = parse_vc_conf,
3176 [ACTION_SET_IPV6_DST] = {
3177 .name = "set_ipv6_dst",
3178 .help = "Set a new IPv6 destination address in the outermost"
3180 .priv = PRIV_ACTION(SET_IPV6_DST,
3181 sizeof(struct rte_flow_action_set_ipv6)),
3182 .next = NEXT(action_set_ipv6_dst),
3185 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3186 .name = "ipv6_addr",
3187 .help = "new IPv6 destination address to set",
3188 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3189 .args = ARGS(ARGS_ENTRY_HTON
3190 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3191 .call = parse_vc_conf,
3193 [ACTION_SET_TP_SRC] = {
3194 .name = "set_tp_src",
3195 .help = "set a new source port number in the outermost"
3197 .priv = PRIV_ACTION(SET_TP_SRC,
3198 sizeof(struct rte_flow_action_set_tp)),
3199 .next = NEXT(action_set_tp_src),
3202 [ACTION_SET_TP_SRC_TP_SRC] = {
3204 .help = "new source port number to set",
3205 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3206 .args = ARGS(ARGS_ENTRY_HTON
3207 (struct rte_flow_action_set_tp, port)),
3208 .call = parse_vc_conf,
3210 [ACTION_SET_TP_DST] = {
3211 .name = "set_tp_dst",
3212 .help = "set a new destination port number in the outermost"
3214 .priv = PRIV_ACTION(SET_TP_DST,
3215 sizeof(struct rte_flow_action_set_tp)),
3216 .next = NEXT(action_set_tp_dst),
3219 [ACTION_SET_TP_DST_TP_DST] = {
3221 .help = "new destination port number to set",
3222 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3223 .args = ARGS(ARGS_ENTRY_HTON
3224 (struct rte_flow_action_set_tp, port)),
3225 .call = parse_vc_conf,
3227 [ACTION_MAC_SWAP] = {
3229 .help = "Swap the source and destination MAC addresses"
3230 " in the outermost Ethernet header",
3231 .priv = PRIV_ACTION(MAC_SWAP, 0),
3232 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3235 [ACTION_DEC_TTL] = {
3237 .help = "decrease network TTL if available",
3238 .priv = PRIV_ACTION(DEC_TTL, 0),
3239 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3242 [ACTION_SET_TTL] = {
3244 .help = "set ttl value",
3245 .priv = PRIV_ACTION(SET_TTL,
3246 sizeof(struct rte_flow_action_set_ttl)),
3247 .next = NEXT(action_set_ttl),
3250 [ACTION_SET_TTL_TTL] = {
3251 .name = "ttl_value",
3252 .help = "new ttl value to set",
3253 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3254 .args = ARGS(ARGS_ENTRY_HTON
3255 (struct rte_flow_action_set_ttl, ttl_value)),
3256 .call = parse_vc_conf,
3258 [ACTION_SET_MAC_SRC] = {
3259 .name = "set_mac_src",
3260 .help = "set source mac address",
3261 .priv = PRIV_ACTION(SET_MAC_SRC,
3262 sizeof(struct rte_flow_action_set_mac)),
3263 .next = NEXT(action_set_mac_src),
3266 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3268 .help = "new source mac address",
3269 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3270 .args = ARGS(ARGS_ENTRY_HTON
3271 (struct rte_flow_action_set_mac, mac_addr)),
3272 .call = parse_vc_conf,
3274 [ACTION_SET_MAC_DST] = {
3275 .name = "set_mac_dst",
3276 .help = "set destination mac address",
3277 .priv = PRIV_ACTION(SET_MAC_DST,
3278 sizeof(struct rte_flow_action_set_mac)),
3279 .next = NEXT(action_set_mac_dst),
3282 [ACTION_SET_MAC_DST_MAC_DST] = {
3284 .help = "new destination mac address to set",
3285 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3286 .args = ARGS(ARGS_ENTRY_HTON
3287 (struct rte_flow_action_set_mac, mac_addr)),
3288 .call = parse_vc_conf,
3290 [ACTION_INC_TCP_SEQ] = {
3291 .name = "inc_tcp_seq",
3292 .help = "increase TCP sequence number",
3293 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3294 .next = NEXT(action_inc_tcp_seq),
3297 [ACTION_INC_TCP_SEQ_VALUE] = {
3299 .help = "the value to increase TCP sequence number by",
3300 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3301 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3302 .call = parse_vc_conf,
3304 [ACTION_DEC_TCP_SEQ] = {
3305 .name = "dec_tcp_seq",
3306 .help = "decrease TCP sequence number",
3307 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3308 .next = NEXT(action_dec_tcp_seq),
3311 [ACTION_DEC_TCP_SEQ_VALUE] = {
3313 .help = "the value to decrease TCP sequence number by",
3314 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3315 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3316 .call = parse_vc_conf,
3318 [ACTION_INC_TCP_ACK] = {
3319 .name = "inc_tcp_ack",
3320 .help = "increase TCP acknowledgment number",
3321 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3322 .next = NEXT(action_inc_tcp_ack),
3325 [ACTION_INC_TCP_ACK_VALUE] = {
3327 .help = "the value to increase TCP acknowledgment number by",
3328 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3329 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3330 .call = parse_vc_conf,
3332 [ACTION_DEC_TCP_ACK] = {
3333 .name = "dec_tcp_ack",
3334 .help = "decrease TCP acknowledgment number",
3335 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3336 .next = NEXT(action_dec_tcp_ack),
3339 [ACTION_DEC_TCP_ACK_VALUE] = {
3341 .help = "the value to decrease TCP acknowledgment number by",
3342 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3343 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3344 .call = parse_vc_conf,
3346 [ACTION_RAW_ENCAP] = {
3347 .name = "raw_encap",
3348 .help = "encapsulation data, defined by set raw_encap",
3349 .priv = PRIV_ACTION(RAW_ENCAP,
3350 sizeof(struct action_raw_encap_data)),
3351 .next = NEXT(action_raw_encap),
3352 .call = parse_vc_action_raw_encap,
3354 [ACTION_RAW_ENCAP_INDEX] = {
3356 .help = "the index of raw_encap_confs",
3357 .next = NEXT(NEXT_ENTRY(ACTION_RAW_ENCAP_INDEX_VALUE)),
3359 [ACTION_RAW_ENCAP_INDEX_VALUE] = {
3362 .help = "unsigned integer value",
3363 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3364 .call = parse_vc_action_raw_encap_index,
3365 .comp = comp_set_raw_index,
3367 [ACTION_RAW_DECAP] = {
3368 .name = "raw_decap",
3369 .help = "decapsulation data, defined by set raw_encap",
3370 .priv = PRIV_ACTION(RAW_DECAP,
3371 sizeof(struct action_raw_decap_data)),
3372 .next = NEXT(action_raw_decap),
3373 .call = parse_vc_action_raw_decap,
3375 [ACTION_RAW_DECAP_INDEX] = {
3377 .help = "the index of raw_encap_confs",
3378 .next = NEXT(NEXT_ENTRY(ACTION_RAW_DECAP_INDEX_VALUE)),
3380 [ACTION_RAW_DECAP_INDEX_VALUE] = {
3383 .help = "unsigned integer value",
3384 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3385 .call = parse_vc_action_raw_decap_index,
3386 .comp = comp_set_raw_index,
3388 /* Top level command. */
3391 .help = "set raw encap/decap data",
3392 .type = "set raw_encap|raw_decap <index> <pattern>",
3393 .next = NEXT(NEXT_ENTRY
3396 .call = parse_set_init,
3398 /* Sub-level commands. */
3400 .name = "raw_encap",
3401 .help = "set raw encap data",
3402 .next = NEXT(next_set_raw),
3403 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3404 (offsetof(struct buffer, port),
3405 sizeof(((struct buffer *)0)->port),
3406 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3407 .call = parse_set_raw_encap_decap,
3410 .name = "raw_decap",
3411 .help = "set raw decap data",
3412 .next = NEXT(next_set_raw),
3413 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
3414 (offsetof(struct buffer, port),
3415 sizeof(((struct buffer *)0)->port),
3416 0, RAW_ENCAP_CONFS_MAX_NUM - 1)),
3417 .call = parse_set_raw_encap_decap,
3422 .help = "index of raw_encap/raw_decap data",
3423 .next = NEXT(next_item),
3426 [ACTION_SET_TAG] = {
3429 .priv = PRIV_ACTION(SET_TAG,
3430 sizeof(struct rte_flow_action_set_tag)),
3431 .next = NEXT(action_set_tag),
3434 [ACTION_SET_TAG_INDEX] = {
3436 .help = "index of tag array",
3437 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3438 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_set_tag, index)),
3439 .call = parse_vc_conf,
3441 [ACTION_SET_TAG_DATA] = {
3443 .help = "tag value",
3444 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3445 .args = ARGS(ARGS_ENTRY_HTON
3446 (struct rte_flow_action_set_tag, data)),
3447 .call = parse_vc_conf,
3449 [ACTION_SET_TAG_MASK] = {
3451 .help = "mask for tag value",
3452 .next = NEXT(action_set_tag, NEXT_ENTRY(UNSIGNED)),
3453 .args = ARGS(ARGS_ENTRY_HTON
3454 (struct rte_flow_action_set_tag, mask)),
3455 .call = parse_vc_conf,
3459 /** Remove and return last entry from argument stack. */
3460 static const struct arg *
3461 pop_args(struct context *ctx)
3463 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3466 /** Add entry on top of the argument stack. */
3468 push_args(struct context *ctx, const struct arg *arg)
3470 if (ctx->args_num == CTX_STACK_SIZE)
3472 ctx->args[ctx->args_num++] = arg;
3476 /** Spread value into buffer according to bit-mask. */
3478 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3480 uint32_t i = arg->size;
3488 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3497 unsigned int shift = 0;
3498 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3500 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3501 if (!(arg->mask[i] & (1 << shift)))
3506 *buf &= ~(1 << shift);
3507 *buf |= (val & 1) << shift;
3515 /** Compare a string with a partial one of a given length. */
3517 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3519 int r = strncmp(full, partial, partial_len);
3523 if (strlen(full) <= partial_len)
3525 return full[partial_len];
3529 * Parse a prefix length and generate a bit-mask.
3531 * Last argument (ctx->args) is retrieved to determine mask size, storage
3532 * location and whether the result must use network byte ordering.
3535 parse_prefix(struct context *ctx, const struct token *token,
3536 const char *str, unsigned int len,
3537 void *buf, unsigned int size)
3539 const struct arg *arg = pop_args(ctx);
3540 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3547 /* Argument is expected. */
3551 u = strtoumax(str, &end, 0);
3552 if (errno || (size_t)(end - str) != len)
3557 extra = arg_entry_bf_fill(NULL, 0, arg);
3566 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3567 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3574 if (bytes > size || bytes + !!extra > size)
3578 buf = (uint8_t *)ctx->object + arg->offset;
3579 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3581 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3582 memset(buf, 0x00, size - bytes);
3584 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3588 memset(buf, 0xff, bytes);
3589 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3591 ((uint8_t *)buf)[bytes] = conv[extra];
3594 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3597 push_args(ctx, arg);
3601 /** Default parsing function for token name matching. */
3603 parse_default(struct context *ctx, const struct token *token,
3604 const char *str, unsigned int len,
3605 void *buf, unsigned int size)
3610 if (strcmp_partial(token->name, str, len))
3615 /** Parse flow command, initialize output buffer for subsequent tokens. */
3617 parse_init(struct context *ctx, const struct token *token,
3618 const char *str, unsigned int len,
3619 void *buf, unsigned int size)
3621 struct buffer *out = buf;
3623 /* Token name must match. */
3624 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3626 /* Nothing else to do if there is no buffer. */
3629 /* Make sure buffer is large enough. */
3630 if (size < sizeof(*out))
3632 /* Initialize buffer. */
3633 memset(out, 0x00, sizeof(*out));
3634 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3637 ctx->objmask = NULL;
3641 /** Parse tokens for validate/create commands. */
3643 parse_vc(struct context *ctx, const struct token *token,
3644 const char *str, unsigned int len,
3645 void *buf, unsigned int size)
3647 struct buffer *out = buf;
3651 /* Token name must match. */
3652 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3654 /* Nothing else to do if there is no buffer. */
3657 if (!out->command) {
3658 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3660 if (sizeof(*out) > size)
3662 out->command = ctx->curr;
3665 ctx->objmask = NULL;
3666 out->args.vc.data = (uint8_t *)out + size;
3670 ctx->object = &out->args.vc.attr;
3671 ctx->objmask = NULL;
3672 switch (ctx->curr) {
3677 out->args.vc.attr.ingress = 1;
3680 out->args.vc.attr.egress = 1;
3683 out->args.vc.attr.transfer = 1;
3686 out->args.vc.pattern =
3687 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3689 ctx->object = out->args.vc.pattern;
3690 ctx->objmask = NULL;
3693 out->args.vc.actions =
3694 (void *)RTE_ALIGN_CEIL((uintptr_t)
3695 (out->args.vc.pattern +
3696 out->args.vc.pattern_n),
3698 ctx->object = out->args.vc.actions;
3699 ctx->objmask = NULL;
3706 if (!out->args.vc.actions) {
3707 const struct parse_item_priv *priv = token->priv;
3708 struct rte_flow_item *item =
3709 out->args.vc.pattern + out->args.vc.pattern_n;
3711 data_size = priv->size * 3; /* spec, last, mask */
3712 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3713 (out->args.vc.data - data_size),
3715 if ((uint8_t *)item + sizeof(*item) > data)
3717 *item = (struct rte_flow_item){
3720 ++out->args.vc.pattern_n;
3722 ctx->objmask = NULL;
3724 const struct parse_action_priv *priv = token->priv;
3725 struct rte_flow_action *action =
3726 out->args.vc.actions + out->args.vc.actions_n;
3728 data_size = priv->size; /* configuration */
3729 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3730 (out->args.vc.data - data_size),
3732 if ((uint8_t *)action + sizeof(*action) > data)
3734 *action = (struct rte_flow_action){
3736 .conf = data_size ? data : NULL,
3738 ++out->args.vc.actions_n;
3739 ctx->object = action;
3740 ctx->objmask = NULL;
3742 memset(data, 0, data_size);
3743 out->args.vc.data = data;
3744 ctx->objdata = data_size;
3748 /** Parse pattern item parameter type. */
3750 parse_vc_spec(struct context *ctx, const struct token *token,
3751 const char *str, unsigned int len,
3752 void *buf, unsigned int size)
3754 struct buffer *out = buf;
3755 struct rte_flow_item *item;
3761 /* Token name must match. */
3762 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3764 /* Parse parameter types. */
3765 switch (ctx->curr) {
3766 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3772 case ITEM_PARAM_SPEC:
3775 case ITEM_PARAM_LAST:
3778 case ITEM_PARAM_PREFIX:
3779 /* Modify next token to expect a prefix. */
3780 if (ctx->next_num < 2)
3782 ctx->next[ctx->next_num - 2] = prefix;
3784 case ITEM_PARAM_MASK:
3790 /* Nothing else to do if there is no buffer. */
3793 if (!out->args.vc.pattern_n)
3795 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3796 data_size = ctx->objdata / 3; /* spec, last, mask */
3797 /* Point to selected object. */
3798 ctx->object = out->args.vc.data + (data_size * index);
3800 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3801 item->mask = ctx->objmask;
3803 ctx->objmask = NULL;
3804 /* Update relevant item pointer. */
3805 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3810 /** Parse action configuration field. */
3812 parse_vc_conf(struct context *ctx, const struct token *token,
3813 const char *str, unsigned int len,
3814 void *buf, unsigned int size)
3816 struct buffer *out = buf;
3819 /* Token name must match. */
3820 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3822 /* Nothing else to do if there is no buffer. */
3825 /* Point to selected object. */
3826 ctx->object = out->args.vc.data;
3827 ctx->objmask = NULL;
3831 /** Parse RSS action. */
3833 parse_vc_action_rss(struct context *ctx, const struct token *token,
3834 const char *str, unsigned int len,
3835 void *buf, unsigned int size)
3837 struct buffer *out = buf;
3838 struct rte_flow_action *action;
3839 struct action_rss_data *action_rss_data;
3843 ret = parse_vc(ctx, token, str, len, buf, size);
3846 /* Nothing else to do if there is no buffer. */
3849 if (!out->args.vc.actions_n)
3851 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3852 /* Point to selected object. */
3853 ctx->object = out->args.vc.data;
3854 ctx->objmask = NULL;
3855 /* Set up default configuration. */
3856 action_rss_data = ctx->object;
3857 *action_rss_data = (struct action_rss_data){
3858 .conf = (struct rte_flow_action_rss){
3859 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3862 .key_len = sizeof(action_rss_data->key),
3863 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3864 .key = action_rss_data->key,
3865 .queue = action_rss_data->queue,
3867 .key = "testpmd's default RSS hash key, "
3868 "override it for better balancing",
3871 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3872 action_rss_data->queue[i] = i;
3873 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3874 ctx->port != (portid_t)RTE_PORT_ALL) {
3875 struct rte_eth_dev_info info;
3878 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3882 action_rss_data->conf.key_len =
3883 RTE_MIN(sizeof(action_rss_data->key),
3884 info.hash_key_size);
3886 action->conf = &action_rss_data->conf;
3891 * Parse func field for RSS action.
3893 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3894 * ACTION_RSS_FUNC_* index that called this function.
3897 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3898 const char *str, unsigned int len,
3899 void *buf, unsigned int size)
3901 struct action_rss_data *action_rss_data;
3902 enum rte_eth_hash_function func;
3906 /* Token name must match. */
3907 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3909 switch (ctx->curr) {
3910 case ACTION_RSS_FUNC_DEFAULT:
3911 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3913 case ACTION_RSS_FUNC_TOEPLITZ:
3914 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3916 case ACTION_RSS_FUNC_SIMPLE_XOR:
3917 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3919 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
3920 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
3927 action_rss_data = ctx->object;
3928 action_rss_data->conf.func = func;
3933 * Parse type field for RSS action.
3935 * Valid tokens are type field names and the "end" token.
3938 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3939 const char *str, unsigned int len,
3940 void *buf, unsigned int size)
3942 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3943 struct action_rss_data *action_rss_data;
3949 if (ctx->curr != ACTION_RSS_TYPE)
3951 if (!(ctx->objdata >> 16) && ctx->object) {
3952 action_rss_data = ctx->object;
3953 action_rss_data->conf.types = 0;
3955 if (!strcmp_partial("end", str, len)) {
3956 ctx->objdata &= 0xffff;
3959 for (i = 0; rss_type_table[i].str; ++i)
3960 if (!strcmp_partial(rss_type_table[i].str, str, len))
3962 if (!rss_type_table[i].str)
3964 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3966 if (ctx->next_num == RTE_DIM(ctx->next))
3968 ctx->next[ctx->next_num++] = next;
3971 action_rss_data = ctx->object;
3972 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3977 * Parse queue field for RSS action.
3979 * Valid tokens are queue indices and the "end" token.
3982 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3983 const char *str, unsigned int len,
3984 void *buf, unsigned int size)
3986 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3987 struct action_rss_data *action_rss_data;
3988 const struct arg *arg;
3995 if (ctx->curr != ACTION_RSS_QUEUE)
3997 i = ctx->objdata >> 16;
3998 if (!strcmp_partial("end", str, len)) {
3999 ctx->objdata &= 0xffff;
4002 if (i >= ACTION_RSS_QUEUE_NUM)
4004 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
4005 i * sizeof(action_rss_data->queue[i]),
4006 sizeof(action_rss_data->queue[i]));
4007 if (push_args(ctx, arg))
4009 ret = parse_int(ctx, token, str, len, NULL, 0);
4015 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
4017 if (ctx->next_num == RTE_DIM(ctx->next))
4019 ctx->next[ctx->next_num++] = next;
4023 action_rss_data = ctx->object;
4024 action_rss_data->conf.queue_num = i;
4025 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
4029 /** Parse VXLAN encap action. */
4031 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
4032 const char *str, unsigned int len,
4033 void *buf, unsigned int size)
4035 struct buffer *out = buf;
4036 struct rte_flow_action *action;
4037 struct action_vxlan_encap_data *action_vxlan_encap_data;
4040 ret = parse_vc(ctx, token, str, len, buf, size);
4043 /* Nothing else to do if there is no buffer. */
4046 if (!out->args.vc.actions_n)
4048 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4049 /* Point to selected object. */
4050 ctx->object = out->args.vc.data;
4051 ctx->objmask = NULL;
4052 /* Set up default configuration. */
4053 action_vxlan_encap_data = ctx->object;
4054 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
4055 .conf = (struct rte_flow_action_vxlan_encap){
4056 .definition = action_vxlan_encap_data->items,
4060 .type = RTE_FLOW_ITEM_TYPE_ETH,
4061 .spec = &action_vxlan_encap_data->item_eth,
4062 .mask = &rte_flow_item_eth_mask,
4065 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4066 .spec = &action_vxlan_encap_data->item_vlan,
4067 .mask = &rte_flow_item_vlan_mask,
4070 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4071 .spec = &action_vxlan_encap_data->item_ipv4,
4072 .mask = &rte_flow_item_ipv4_mask,
4075 .type = RTE_FLOW_ITEM_TYPE_UDP,
4076 .spec = &action_vxlan_encap_data->item_udp,
4077 .mask = &rte_flow_item_udp_mask,
4080 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
4081 .spec = &action_vxlan_encap_data->item_vxlan,
4082 .mask = &rte_flow_item_vxlan_mask,
4085 .type = RTE_FLOW_ITEM_TYPE_END,
4090 .tci = vxlan_encap_conf.vlan_tci,
4094 .src_addr = vxlan_encap_conf.ipv4_src,
4095 .dst_addr = vxlan_encap_conf.ipv4_dst,
4098 .src_port = vxlan_encap_conf.udp_src,
4099 .dst_port = vxlan_encap_conf.udp_dst,
4101 .item_vxlan.flags = 0,
4103 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
4104 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4105 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
4106 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4107 if (!vxlan_encap_conf.select_ipv4) {
4108 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
4109 &vxlan_encap_conf.ipv6_src,
4110 sizeof(vxlan_encap_conf.ipv6_src));
4111 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
4112 &vxlan_encap_conf.ipv6_dst,
4113 sizeof(vxlan_encap_conf.ipv6_dst));
4114 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
4115 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4116 .spec = &action_vxlan_encap_data->item_ipv6,
4117 .mask = &rte_flow_item_ipv6_mask,
4120 if (!vxlan_encap_conf.select_vlan)
4121 action_vxlan_encap_data->items[1].type =
4122 RTE_FLOW_ITEM_TYPE_VOID;
4123 if (vxlan_encap_conf.select_tos_ttl) {
4124 if (vxlan_encap_conf.select_ipv4) {
4125 static struct rte_flow_item_ipv4 ipv4_mask_tos;
4127 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
4128 sizeof(ipv4_mask_tos));
4129 ipv4_mask_tos.hdr.type_of_service = 0xff;
4130 ipv4_mask_tos.hdr.time_to_live = 0xff;
4131 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
4132 vxlan_encap_conf.ip_tos;
4133 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
4134 vxlan_encap_conf.ip_ttl;
4135 action_vxlan_encap_data->items[2].mask =
4138 static struct rte_flow_item_ipv6 ipv6_mask_tos;
4140 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
4141 sizeof(ipv6_mask_tos));
4142 ipv6_mask_tos.hdr.vtc_flow |=
4143 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
4144 ipv6_mask_tos.hdr.hop_limits = 0xff;
4145 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
4147 ((uint32_t)vxlan_encap_conf.ip_tos <<
4148 RTE_IPV6_HDR_TC_SHIFT);
4149 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
4150 vxlan_encap_conf.ip_ttl;
4151 action_vxlan_encap_data->items[2].mask =
4155 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
4156 RTE_DIM(vxlan_encap_conf.vni));
4157 action->conf = &action_vxlan_encap_data->conf;
4161 /** Parse NVGRE encap action. */
4163 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
4164 const char *str, unsigned int len,
4165 void *buf, unsigned int size)
4167 struct buffer *out = buf;
4168 struct rte_flow_action *action;
4169 struct action_nvgre_encap_data *action_nvgre_encap_data;
4172 ret = parse_vc(ctx, token, str, len, buf, size);
4175 /* Nothing else to do if there is no buffer. */
4178 if (!out->args.vc.actions_n)
4180 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4181 /* Point to selected object. */
4182 ctx->object = out->args.vc.data;
4183 ctx->objmask = NULL;
4184 /* Set up default configuration. */
4185 action_nvgre_encap_data = ctx->object;
4186 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
4187 .conf = (struct rte_flow_action_nvgre_encap){
4188 .definition = action_nvgre_encap_data->items,
4192 .type = RTE_FLOW_ITEM_TYPE_ETH,
4193 .spec = &action_nvgre_encap_data->item_eth,
4194 .mask = &rte_flow_item_eth_mask,
4197 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4198 .spec = &action_nvgre_encap_data->item_vlan,
4199 .mask = &rte_flow_item_vlan_mask,
4202 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4203 .spec = &action_nvgre_encap_data->item_ipv4,
4204 .mask = &rte_flow_item_ipv4_mask,
4207 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4208 .spec = &action_nvgre_encap_data->item_nvgre,
4209 .mask = &rte_flow_item_nvgre_mask,
4212 .type = RTE_FLOW_ITEM_TYPE_END,
4217 .tci = nvgre_encap_conf.vlan_tci,
4221 .src_addr = nvgre_encap_conf.ipv4_src,
4222 .dst_addr = nvgre_encap_conf.ipv4_dst,
4224 .item_nvgre.flow_id = 0,
4226 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4227 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4228 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4229 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4230 if (!nvgre_encap_conf.select_ipv4) {
4231 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4232 &nvgre_encap_conf.ipv6_src,
4233 sizeof(nvgre_encap_conf.ipv6_src));
4234 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4235 &nvgre_encap_conf.ipv6_dst,
4236 sizeof(nvgre_encap_conf.ipv6_dst));
4237 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4238 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4239 .spec = &action_nvgre_encap_data->item_ipv6,
4240 .mask = &rte_flow_item_ipv6_mask,
4243 if (!nvgre_encap_conf.select_vlan)
4244 action_nvgre_encap_data->items[1].type =
4245 RTE_FLOW_ITEM_TYPE_VOID;
4246 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4247 RTE_DIM(nvgre_encap_conf.tni));
4248 action->conf = &action_nvgre_encap_data->conf;
4252 /** Parse l2 encap action. */
4254 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4255 const char *str, unsigned int len,
4256 void *buf, unsigned int size)
4258 struct buffer *out = buf;
4259 struct rte_flow_action *action;
4260 struct action_raw_encap_data *action_encap_data;
4261 struct rte_flow_item_eth eth = { .type = 0, };
4262 struct rte_flow_item_vlan vlan = {
4263 .tci = mplsoudp_encap_conf.vlan_tci,
4269 ret = parse_vc(ctx, token, str, len, buf, size);
4272 /* Nothing else to do if there is no buffer. */
4275 if (!out->args.vc.actions_n)
4277 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4278 /* Point to selected object. */
4279 ctx->object = out->args.vc.data;
4280 ctx->objmask = NULL;
4281 /* Copy the headers to the buffer. */
4282 action_encap_data = ctx->object;
4283 *action_encap_data = (struct action_raw_encap_data) {
4284 .conf = (struct rte_flow_action_raw_encap){
4285 .data = action_encap_data->data,
4289 header = action_encap_data->data;
4290 if (l2_encap_conf.select_vlan)
4291 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4292 else if (l2_encap_conf.select_ipv4)
4293 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4295 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4296 memcpy(eth.dst.addr_bytes,
4297 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4298 memcpy(eth.src.addr_bytes,
4299 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4300 memcpy(header, ð, sizeof(eth));
4301 header += sizeof(eth);
4302 if (l2_encap_conf.select_vlan) {
4303 if (l2_encap_conf.select_ipv4)
4304 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4306 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4307 memcpy(header, &vlan, sizeof(vlan));
4308 header += sizeof(vlan);
4310 action_encap_data->conf.size = header -
4311 action_encap_data->data;
4312 action->conf = &action_encap_data->conf;
4316 /** Parse l2 decap action. */
4318 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4319 const char *str, unsigned int len,
4320 void *buf, unsigned int size)
4322 struct buffer *out = buf;
4323 struct rte_flow_action *action;
4324 struct action_raw_decap_data *action_decap_data;
4325 struct rte_flow_item_eth eth = { .type = 0, };
4326 struct rte_flow_item_vlan vlan = {
4327 .tci = mplsoudp_encap_conf.vlan_tci,
4333 ret = parse_vc(ctx, token, str, len, buf, size);
4336 /* Nothing else to do if there is no buffer. */
4339 if (!out->args.vc.actions_n)
4341 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4342 /* Point to selected object. */
4343 ctx->object = out->args.vc.data;
4344 ctx->objmask = NULL;
4345 /* Copy the headers to the buffer. */
4346 action_decap_data = ctx->object;
4347 *action_decap_data = (struct action_raw_decap_data) {
4348 .conf = (struct rte_flow_action_raw_decap){
4349 .data = action_decap_data->data,
4353 header = action_decap_data->data;
4354 if (l2_decap_conf.select_vlan)
4355 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4356 memcpy(header, ð, sizeof(eth));
4357 header += sizeof(eth);
4358 if (l2_decap_conf.select_vlan) {
4359 memcpy(header, &vlan, sizeof(vlan));
4360 header += sizeof(vlan);
4362 action_decap_data->conf.size = header -
4363 action_decap_data->data;
4364 action->conf = &action_decap_data->conf;
4368 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4370 /** Parse MPLSOGRE encap action. */
4372 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4373 const char *str, unsigned int len,
4374 void *buf, unsigned int size)
4376 struct buffer *out = buf;
4377 struct rte_flow_action *action;
4378 struct action_raw_encap_data *action_encap_data;
4379 struct rte_flow_item_eth eth = { .type = 0, };
4380 struct rte_flow_item_vlan vlan = {
4381 .tci = mplsogre_encap_conf.vlan_tci,
4384 struct rte_flow_item_ipv4 ipv4 = {
4386 .src_addr = mplsogre_encap_conf.ipv4_src,
4387 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4388 .next_proto_id = IPPROTO_GRE,
4389 .version_ihl = RTE_IPV4_VHL_DEF,
4390 .time_to_live = IPDEFTTL,
4393 struct rte_flow_item_ipv6 ipv6 = {
4395 .proto = IPPROTO_GRE,
4396 .hop_limits = IPDEFTTL,
4399 struct rte_flow_item_gre gre = {
4400 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4402 struct rte_flow_item_mpls mpls;
4406 ret = parse_vc(ctx, token, str, len, buf, size);
4409 /* Nothing else to do if there is no buffer. */
4412 if (!out->args.vc.actions_n)
4414 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4415 /* Point to selected object. */
4416 ctx->object = out->args.vc.data;
4417 ctx->objmask = NULL;
4418 /* Copy the headers to the buffer. */
4419 action_encap_data = ctx->object;
4420 *action_encap_data = (struct action_raw_encap_data) {
4421 .conf = (struct rte_flow_action_raw_encap){
4422 .data = action_encap_data->data,
4427 header = action_encap_data->data;
4428 if (mplsogre_encap_conf.select_vlan)
4429 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4430 else if (mplsogre_encap_conf.select_ipv4)
4431 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4433 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4434 memcpy(eth.dst.addr_bytes,
4435 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4436 memcpy(eth.src.addr_bytes,
4437 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4438 memcpy(header, ð, sizeof(eth));
4439 header += sizeof(eth);
4440 if (mplsogre_encap_conf.select_vlan) {
4441 if (mplsogre_encap_conf.select_ipv4)
4442 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4444 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4445 memcpy(header, &vlan, sizeof(vlan));
4446 header += sizeof(vlan);
4448 if (mplsogre_encap_conf.select_ipv4) {
4449 memcpy(header, &ipv4, sizeof(ipv4));
4450 header += sizeof(ipv4);
4452 memcpy(&ipv6.hdr.src_addr,
4453 &mplsogre_encap_conf.ipv6_src,
4454 sizeof(mplsogre_encap_conf.ipv6_src));
4455 memcpy(&ipv6.hdr.dst_addr,
4456 &mplsogre_encap_conf.ipv6_dst,
4457 sizeof(mplsogre_encap_conf.ipv6_dst));
4458 memcpy(header, &ipv6, sizeof(ipv6));
4459 header += sizeof(ipv6);
4461 memcpy(header, &gre, sizeof(gre));
4462 header += sizeof(gre);
4463 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4464 RTE_DIM(mplsogre_encap_conf.label));
4465 mpls.label_tc_s[2] |= 0x1;
4466 memcpy(header, &mpls, sizeof(mpls));
4467 header += sizeof(mpls);
4468 action_encap_data->conf.size = header -
4469 action_encap_data->data;
4470 action->conf = &action_encap_data->conf;
4474 /** Parse MPLSOGRE decap action. */
4476 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4477 const char *str, unsigned int len,
4478 void *buf, unsigned int size)
4480 struct buffer *out = buf;
4481 struct rte_flow_action *action;
4482 struct action_raw_decap_data *action_decap_data;
4483 struct rte_flow_item_eth eth = { .type = 0, };
4484 struct rte_flow_item_vlan vlan = {.tci = 0};
4485 struct rte_flow_item_ipv4 ipv4 = {
4487 .next_proto_id = IPPROTO_GRE,
4490 struct rte_flow_item_ipv6 ipv6 = {
4492 .proto = IPPROTO_GRE,
4495 struct rte_flow_item_gre gre = {
4496 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4498 struct rte_flow_item_mpls mpls;
4502 ret = parse_vc(ctx, token, str, len, buf, size);
4505 /* Nothing else to do if there is no buffer. */
4508 if (!out->args.vc.actions_n)
4510 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4511 /* Point to selected object. */
4512 ctx->object = out->args.vc.data;
4513 ctx->objmask = NULL;
4514 /* Copy the headers to the buffer. */
4515 action_decap_data = ctx->object;
4516 *action_decap_data = (struct action_raw_decap_data) {
4517 .conf = (struct rte_flow_action_raw_decap){
4518 .data = action_decap_data->data,
4522 header = action_decap_data->data;
4523 if (mplsogre_decap_conf.select_vlan)
4524 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4525 else if (mplsogre_encap_conf.select_ipv4)
4526 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4528 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4529 memcpy(eth.dst.addr_bytes,
4530 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4531 memcpy(eth.src.addr_bytes,
4532 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4533 memcpy(header, ð, sizeof(eth));
4534 header += sizeof(eth);
4535 if (mplsogre_encap_conf.select_vlan) {
4536 if (mplsogre_encap_conf.select_ipv4)
4537 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4539 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4540 memcpy(header, &vlan, sizeof(vlan));
4541 header += sizeof(vlan);
4543 if (mplsogre_encap_conf.select_ipv4) {
4544 memcpy(header, &ipv4, sizeof(ipv4));
4545 header += sizeof(ipv4);
4547 memcpy(header, &ipv6, sizeof(ipv6));
4548 header += sizeof(ipv6);
4550 memcpy(header, &gre, sizeof(gre));
4551 header += sizeof(gre);
4552 memset(&mpls, 0, sizeof(mpls));
4553 memcpy(header, &mpls, sizeof(mpls));
4554 header += sizeof(mpls);
4555 action_decap_data->conf.size = header -
4556 action_decap_data->data;
4557 action->conf = &action_decap_data->conf;
4561 /** Parse MPLSOUDP encap action. */
4563 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4564 const char *str, unsigned int len,
4565 void *buf, unsigned int size)
4567 struct buffer *out = buf;
4568 struct rte_flow_action *action;
4569 struct action_raw_encap_data *action_encap_data;
4570 struct rte_flow_item_eth eth = { .type = 0, };
4571 struct rte_flow_item_vlan vlan = {
4572 .tci = mplsoudp_encap_conf.vlan_tci,
4575 struct rte_flow_item_ipv4 ipv4 = {
4577 .src_addr = mplsoudp_encap_conf.ipv4_src,
4578 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4579 .next_proto_id = IPPROTO_UDP,
4580 .version_ihl = RTE_IPV4_VHL_DEF,
4581 .time_to_live = IPDEFTTL,
4584 struct rte_flow_item_ipv6 ipv6 = {
4586 .proto = IPPROTO_UDP,
4587 .hop_limits = IPDEFTTL,
4590 struct rte_flow_item_udp udp = {
4592 .src_port = mplsoudp_encap_conf.udp_src,
4593 .dst_port = mplsoudp_encap_conf.udp_dst,
4596 struct rte_flow_item_mpls mpls;
4600 ret = parse_vc(ctx, token, str, len, buf, size);
4603 /* Nothing else to do if there is no buffer. */
4606 if (!out->args.vc.actions_n)
4608 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4609 /* Point to selected object. */
4610 ctx->object = out->args.vc.data;
4611 ctx->objmask = NULL;
4612 /* Copy the headers to the buffer. */
4613 action_encap_data = ctx->object;
4614 *action_encap_data = (struct action_raw_encap_data) {
4615 .conf = (struct rte_flow_action_raw_encap){
4616 .data = action_encap_data->data,
4621 header = action_encap_data->data;
4622 if (mplsoudp_encap_conf.select_vlan)
4623 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4624 else if (mplsoudp_encap_conf.select_ipv4)
4625 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4627 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4628 memcpy(eth.dst.addr_bytes,
4629 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4630 memcpy(eth.src.addr_bytes,
4631 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4632 memcpy(header, ð, sizeof(eth));
4633 header += sizeof(eth);
4634 if (mplsoudp_encap_conf.select_vlan) {
4635 if (mplsoudp_encap_conf.select_ipv4)
4636 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4638 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4639 memcpy(header, &vlan, sizeof(vlan));
4640 header += sizeof(vlan);
4642 if (mplsoudp_encap_conf.select_ipv4) {
4643 memcpy(header, &ipv4, sizeof(ipv4));
4644 header += sizeof(ipv4);
4646 memcpy(&ipv6.hdr.src_addr,
4647 &mplsoudp_encap_conf.ipv6_src,
4648 sizeof(mplsoudp_encap_conf.ipv6_src));
4649 memcpy(&ipv6.hdr.dst_addr,
4650 &mplsoudp_encap_conf.ipv6_dst,
4651 sizeof(mplsoudp_encap_conf.ipv6_dst));
4652 memcpy(header, &ipv6, sizeof(ipv6));
4653 header += sizeof(ipv6);
4655 memcpy(header, &udp, sizeof(udp));
4656 header += sizeof(udp);
4657 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4658 RTE_DIM(mplsoudp_encap_conf.label));
4659 mpls.label_tc_s[2] |= 0x1;
4660 memcpy(header, &mpls, sizeof(mpls));
4661 header += sizeof(mpls);
4662 action_encap_data->conf.size = header -
4663 action_encap_data->data;
4664 action->conf = &action_encap_data->conf;
4668 /** Parse MPLSOUDP decap action. */
4670 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4671 const char *str, unsigned int len,
4672 void *buf, unsigned int size)
4674 struct buffer *out = buf;
4675 struct rte_flow_action *action;
4676 struct action_raw_decap_data *action_decap_data;
4677 struct rte_flow_item_eth eth = { .type = 0, };
4678 struct rte_flow_item_vlan vlan = {.tci = 0};
4679 struct rte_flow_item_ipv4 ipv4 = {
4681 .next_proto_id = IPPROTO_UDP,
4684 struct rte_flow_item_ipv6 ipv6 = {
4686 .proto = IPPROTO_UDP,
4689 struct rte_flow_item_udp udp = {
4691 .dst_port = rte_cpu_to_be_16(6635),
4694 struct rte_flow_item_mpls mpls;
4698 ret = parse_vc(ctx, token, str, len, buf, size);
4701 /* Nothing else to do if there is no buffer. */
4704 if (!out->args.vc.actions_n)
4706 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4707 /* Point to selected object. */
4708 ctx->object = out->args.vc.data;
4709 ctx->objmask = NULL;
4710 /* Copy the headers to the buffer. */
4711 action_decap_data = ctx->object;
4712 *action_decap_data = (struct action_raw_decap_data) {
4713 .conf = (struct rte_flow_action_raw_decap){
4714 .data = action_decap_data->data,
4718 header = action_decap_data->data;
4719 if (mplsoudp_decap_conf.select_vlan)
4720 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4721 else if (mplsoudp_encap_conf.select_ipv4)
4722 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4724 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4725 memcpy(eth.dst.addr_bytes,
4726 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4727 memcpy(eth.src.addr_bytes,
4728 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4729 memcpy(header, ð, sizeof(eth));
4730 header += sizeof(eth);
4731 if (mplsoudp_encap_conf.select_vlan) {
4732 if (mplsoudp_encap_conf.select_ipv4)
4733 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4735 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4736 memcpy(header, &vlan, sizeof(vlan));
4737 header += sizeof(vlan);
4739 if (mplsoudp_encap_conf.select_ipv4) {
4740 memcpy(header, &ipv4, sizeof(ipv4));
4741 header += sizeof(ipv4);
4743 memcpy(header, &ipv6, sizeof(ipv6));
4744 header += sizeof(ipv6);
4746 memcpy(header, &udp, sizeof(udp));
4747 header += sizeof(udp);
4748 memset(&mpls, 0, sizeof(mpls));
4749 memcpy(header, &mpls, sizeof(mpls));
4750 header += sizeof(mpls);
4751 action_decap_data->conf.size = header -
4752 action_decap_data->data;
4753 action->conf = &action_decap_data->conf;
4758 parse_vc_action_raw_decap_index(struct context *ctx, const struct token *token,
4759 const char *str, unsigned int len, void *buf,
4762 struct action_raw_decap_data *action_raw_decap_data;
4763 struct rte_flow_action *action;
4764 const struct arg *arg;
4765 struct buffer *out = buf;
4769 RTE_SET_USED(token);
4772 arg = ARGS_ENTRY_ARB_BOUNDED
4773 (offsetof(struct action_raw_decap_data, idx),
4774 sizeof(((struct action_raw_decap_data *)0)->idx),
4775 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4776 if (push_args(ctx, arg))
4778 ret = parse_int(ctx, token, str, len, NULL, 0);
4785 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4786 action_raw_decap_data = ctx->object;
4787 idx = action_raw_decap_data->idx;
4788 action_raw_decap_data->conf.data = raw_decap_confs[idx].data;
4789 action_raw_decap_data->conf.size = raw_decap_confs[idx].size;
4790 action->conf = &action_raw_decap_data->conf;
4796 parse_vc_action_raw_encap_index(struct context *ctx, const struct token *token,
4797 const char *str, unsigned int len, void *buf,
4800 struct action_raw_encap_data *action_raw_encap_data;
4801 struct rte_flow_action *action;
4802 const struct arg *arg;
4803 struct buffer *out = buf;
4807 RTE_SET_USED(token);
4810 if (ctx->curr != ACTION_RAW_ENCAP_INDEX_VALUE)
4812 arg = ARGS_ENTRY_ARB_BOUNDED
4813 (offsetof(struct action_raw_encap_data, idx),
4814 sizeof(((struct action_raw_encap_data *)0)->idx),
4815 0, RAW_ENCAP_CONFS_MAX_NUM - 1);
4816 if (push_args(ctx, arg))
4818 ret = parse_int(ctx, token, str, len, NULL, 0);
4825 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4826 action_raw_encap_data = ctx->object;
4827 idx = action_raw_encap_data->idx;
4828 action_raw_encap_data->conf.data = raw_encap_confs[idx].data;
4829 action_raw_encap_data->conf.size = raw_encap_confs[idx].size;
4830 action_raw_encap_data->conf.preserve = NULL;
4831 action->conf = &action_raw_encap_data->conf;
4836 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4837 const char *str, unsigned int len, void *buf,
4840 struct buffer *out = buf;
4841 struct rte_flow_action *action;
4842 struct action_raw_encap_data *action_raw_encap_data = NULL;
4845 ret = parse_vc(ctx, token, str, len, buf, size);
4848 /* Nothing else to do if there is no buffer. */
4851 if (!out->args.vc.actions_n)
4853 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4854 /* Point to selected object. */
4855 ctx->object = out->args.vc.data;
4856 ctx->objmask = NULL;
4857 /* Copy the headers to the buffer. */
4858 action_raw_encap_data = ctx->object;
4859 action_raw_encap_data->conf.data = raw_encap_confs[0].data;
4860 action_raw_encap_data->conf.preserve = NULL;
4861 action_raw_encap_data->conf.size = raw_encap_confs[0].size;
4862 action->conf = &action_raw_encap_data->conf;
4867 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4868 const char *str, unsigned int len, void *buf,
4871 struct buffer *out = buf;
4872 struct rte_flow_action *action;
4873 struct action_raw_decap_data *action_raw_decap_data = NULL;
4876 ret = parse_vc(ctx, token, str, len, buf, size);
4879 /* Nothing else to do if there is no buffer. */
4882 if (!out->args.vc.actions_n)
4884 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4885 /* Point to selected object. */
4886 ctx->object = out->args.vc.data;
4887 ctx->objmask = NULL;
4888 /* Copy the headers to the buffer. */
4889 action_raw_decap_data = ctx->object;
4890 action_raw_decap_data->conf.data = raw_decap_confs[0].data;
4891 action_raw_decap_data->conf.size = raw_decap_confs[0].size;
4892 action->conf = &action_raw_decap_data->conf;
4896 /** Parse tokens for destroy command. */
4898 parse_destroy(struct context *ctx, const struct token *token,
4899 const char *str, unsigned int len,
4900 void *buf, unsigned int size)
4902 struct buffer *out = buf;
4904 /* Token name must match. */
4905 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4907 /* Nothing else to do if there is no buffer. */
4910 if (!out->command) {
4911 if (ctx->curr != DESTROY)
4913 if (sizeof(*out) > size)
4915 out->command = ctx->curr;
4918 ctx->objmask = NULL;
4919 out->args.destroy.rule =
4920 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4924 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4925 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4928 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4929 ctx->objmask = NULL;
4933 /** Parse tokens for flush command. */
4935 parse_flush(struct context *ctx, const struct token *token,
4936 const char *str, unsigned int len,
4937 void *buf, unsigned int size)
4939 struct buffer *out = buf;
4941 /* Token name must match. */
4942 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4944 /* Nothing else to do if there is no buffer. */
4947 if (!out->command) {
4948 if (ctx->curr != FLUSH)
4950 if (sizeof(*out) > size)
4952 out->command = ctx->curr;
4955 ctx->objmask = NULL;
4960 /** Parse tokens for query command. */
4962 parse_query(struct context *ctx, const struct token *token,
4963 const char *str, unsigned int len,
4964 void *buf, unsigned int size)
4966 struct buffer *out = buf;
4968 /* Token name must match. */
4969 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4971 /* Nothing else to do if there is no buffer. */
4974 if (!out->command) {
4975 if (ctx->curr != QUERY)
4977 if (sizeof(*out) > size)
4979 out->command = ctx->curr;
4982 ctx->objmask = NULL;
4987 /** Parse action names. */
4989 parse_action(struct context *ctx, const struct token *token,
4990 const char *str, unsigned int len,
4991 void *buf, unsigned int size)
4993 struct buffer *out = buf;
4994 const struct arg *arg = pop_args(ctx);
4998 /* Argument is expected. */
5001 /* Parse action name. */
5002 for (i = 0; next_action[i]; ++i) {
5003 const struct parse_action_priv *priv;
5005 token = &token_list[next_action[i]];
5006 if (strcmp_partial(token->name, str, len))
5012 memcpy((uint8_t *)ctx->object + arg->offset,
5018 push_args(ctx, arg);
5022 /** Parse tokens for list command. */
5024 parse_list(struct context *ctx, const struct token *token,
5025 const char *str, unsigned int len,
5026 void *buf, unsigned int size)
5028 struct buffer *out = buf;
5030 /* Token name must match. */
5031 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5033 /* Nothing else to do if there is no buffer. */
5036 if (!out->command) {
5037 if (ctx->curr != LIST)
5039 if (sizeof(*out) > size)
5041 out->command = ctx->curr;
5044 ctx->objmask = NULL;
5045 out->args.list.group =
5046 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5050 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
5051 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
5054 ctx->object = out->args.list.group + out->args.list.group_n++;
5055 ctx->objmask = NULL;
5059 /** Parse tokens for isolate command. */
5061 parse_isolate(struct context *ctx, const struct token *token,
5062 const char *str, unsigned int len,
5063 void *buf, unsigned int size)
5065 struct buffer *out = buf;
5067 /* Token name must match. */
5068 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5070 /* Nothing else to do if there is no buffer. */
5073 if (!out->command) {
5074 if (ctx->curr != ISOLATE)
5076 if (sizeof(*out) > size)
5078 out->command = ctx->curr;
5081 ctx->objmask = NULL;
5087 * Parse signed/unsigned integers 8 to 64-bit long.
5089 * Last argument (ctx->args) is retrieved to determine integer type and
5093 parse_int(struct context *ctx, const struct token *token,
5094 const char *str, unsigned int len,
5095 void *buf, unsigned int size)
5097 const struct arg *arg = pop_args(ctx);
5102 /* Argument is expected. */
5107 (uintmax_t)strtoimax(str, &end, 0) :
5108 strtoumax(str, &end, 0);
5109 if (errno || (size_t)(end - str) != len)
5112 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
5113 (intmax_t)u > (intmax_t)arg->max)) ||
5114 (!arg->sign && (u < arg->min || u > arg->max))))
5119 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
5120 !arg_entry_bf_fill(ctx->objmask, -1, arg))
5124 buf = (uint8_t *)ctx->object + arg->offset;
5126 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
5130 case sizeof(uint8_t):
5131 *(uint8_t *)buf = u;
5133 case sizeof(uint16_t):
5134 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
5136 case sizeof(uint8_t [3]):
5137 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
5139 ((uint8_t *)buf)[0] = u;
5140 ((uint8_t *)buf)[1] = u >> 8;
5141 ((uint8_t *)buf)[2] = u >> 16;
5145 ((uint8_t *)buf)[0] = u >> 16;
5146 ((uint8_t *)buf)[1] = u >> 8;
5147 ((uint8_t *)buf)[2] = u;
5149 case sizeof(uint32_t):
5150 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
5152 case sizeof(uint64_t):
5153 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
5158 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
5160 buf = (uint8_t *)ctx->objmask + arg->offset;
5165 push_args(ctx, arg);
5172 * Three arguments (ctx->args) are retrieved from the stack to store data,
5173 * its actual length and address (in that order).
5176 parse_string(struct context *ctx, const struct token *token,
5177 const char *str, unsigned int len,
5178 void *buf, unsigned int size)
5180 const struct arg *arg_data = pop_args(ctx);
5181 const struct arg *arg_len = pop_args(ctx);
5182 const struct arg *arg_addr = pop_args(ctx);
5183 char tmp[16]; /* Ought to be enough. */
5186 /* Arguments are expected. */
5190 push_args(ctx, arg_data);
5194 push_args(ctx, arg_len);
5195 push_args(ctx, arg_data);
5198 size = arg_data->size;
5199 /* Bit-mask fill is not supported. */
5200 if (arg_data->mask || size < len)
5204 /* Let parse_int() fill length information first. */
5205 ret = snprintf(tmp, sizeof(tmp), "%u", len);
5208 push_args(ctx, arg_len);
5209 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5214 buf = (uint8_t *)ctx->object + arg_data->offset;
5215 /* Output buffer is not necessarily NUL-terminated. */
5216 memcpy(buf, str, len);
5217 memset((uint8_t *)buf + len, 0x00, size - len);
5219 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
5220 /* Save address if requested. */
5221 if (arg_addr->size) {
5222 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5224 (uint8_t *)ctx->object + arg_data->offset
5228 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5230 (uint8_t *)ctx->objmask + arg_data->offset
5236 push_args(ctx, arg_addr);
5237 push_args(ctx, arg_len);
5238 push_args(ctx, arg_data);
5243 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
5249 /* Check input parameters */
5250 if ((src == NULL) ||
5256 /* Convert chars to bytes */
5257 for (i = 0, len = 0; i < *size; i += 2) {
5258 snprintf(tmp, 3, "%s", src + i);
5259 dst[len++] = strtoul(tmp, &c, 16);
5274 parse_hex(struct context *ctx, const struct token *token,
5275 const char *str, unsigned int len,
5276 void *buf, unsigned int size)
5278 const struct arg *arg_data = pop_args(ctx);
5279 const struct arg *arg_len = pop_args(ctx);
5280 const struct arg *arg_addr = pop_args(ctx);
5281 char tmp[16]; /* Ought to be enough. */
5283 unsigned int hexlen = len;
5284 unsigned int length = 256;
5285 uint8_t hex_tmp[length];
5287 /* Arguments are expected. */
5291 push_args(ctx, arg_data);
5295 push_args(ctx, arg_len);
5296 push_args(ctx, arg_data);
5299 size = arg_data->size;
5300 /* Bit-mask fill is not supported. */
5306 /* translate bytes string to array. */
5307 if (str[0] == '0' && ((str[1] == 'x') ||
5312 if (hexlen > length)
5314 ret = parse_hex_string(str, hex_tmp, &hexlen);
5317 /* Let parse_int() fill length information first. */
5318 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5321 push_args(ctx, arg_len);
5322 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5327 buf = (uint8_t *)ctx->object + arg_data->offset;
5328 /* Output buffer is not necessarily NUL-terminated. */
5329 memcpy(buf, hex_tmp, hexlen);
5330 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5332 memset((uint8_t *)ctx->objmask + arg_data->offset,
5334 /* Save address if requested. */
5335 if (arg_addr->size) {
5336 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5338 (uint8_t *)ctx->object + arg_data->offset
5342 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5344 (uint8_t *)ctx->objmask + arg_data->offset
5350 push_args(ctx, arg_addr);
5351 push_args(ctx, arg_len);
5352 push_args(ctx, arg_data);
5358 * Parse a MAC address.
5360 * Last argument (ctx->args) is retrieved to determine storage size and
5364 parse_mac_addr(struct context *ctx, const struct token *token,
5365 const char *str, unsigned int len,
5366 void *buf, unsigned int size)
5368 const struct arg *arg = pop_args(ctx);
5369 struct rte_ether_addr tmp;
5373 /* Argument is expected. */
5377 /* Bit-mask fill is not supported. */
5378 if (arg->mask || size != sizeof(tmp))
5380 /* Only network endian is supported. */
5383 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5384 if (ret < 0 || (unsigned int)ret != len)
5388 buf = (uint8_t *)ctx->object + arg->offset;
5389 memcpy(buf, &tmp, size);
5391 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5394 push_args(ctx, arg);
5399 * Parse an IPv4 address.
5401 * Last argument (ctx->args) is retrieved to determine storage size and
5405 parse_ipv4_addr(struct context *ctx, const struct token *token,
5406 const char *str, unsigned int len,
5407 void *buf, unsigned int size)
5409 const struct arg *arg = pop_args(ctx);
5414 /* Argument is expected. */
5418 /* Bit-mask fill is not supported. */
5419 if (arg->mask || size != sizeof(tmp))
5421 /* Only network endian is supported. */
5424 memcpy(str2, str, len);
5426 ret = inet_pton(AF_INET, str2, &tmp);
5428 /* Attempt integer parsing. */
5429 push_args(ctx, arg);
5430 return parse_int(ctx, token, str, len, buf, size);
5434 buf = (uint8_t *)ctx->object + arg->offset;
5435 memcpy(buf, &tmp, size);
5437 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5440 push_args(ctx, arg);
5445 * Parse an IPv6 address.
5447 * Last argument (ctx->args) is retrieved to determine storage size and
5451 parse_ipv6_addr(struct context *ctx, const struct token *token,
5452 const char *str, unsigned int len,
5453 void *buf, unsigned int size)
5455 const struct arg *arg = pop_args(ctx);
5457 struct in6_addr tmp;
5461 /* Argument is expected. */
5465 /* Bit-mask fill is not supported. */
5466 if (arg->mask || size != sizeof(tmp))
5468 /* Only network endian is supported. */
5471 memcpy(str2, str, len);
5473 ret = inet_pton(AF_INET6, str2, &tmp);
5478 buf = (uint8_t *)ctx->object + arg->offset;
5479 memcpy(buf, &tmp, size);
5481 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5484 push_args(ctx, arg);
5488 /** Boolean values (even indices stand for false). */
5489 static const char *const boolean_name[] = {
5499 * Parse a boolean value.
5501 * Last argument (ctx->args) is retrieved to determine storage size and
5505 parse_boolean(struct context *ctx, const struct token *token,
5506 const char *str, unsigned int len,
5507 void *buf, unsigned int size)
5509 const struct arg *arg = pop_args(ctx);
5513 /* Argument is expected. */
5516 for (i = 0; boolean_name[i]; ++i)
5517 if (!strcmp_partial(boolean_name[i], str, len))
5519 /* Process token as integer. */
5520 if (boolean_name[i])
5521 str = i & 1 ? "1" : "0";
5522 push_args(ctx, arg);
5523 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5524 return ret > 0 ? (int)len : ret;
5527 /** Parse port and update context. */
5529 parse_port(struct context *ctx, const struct token *token,
5530 const char *str, unsigned int len,
5531 void *buf, unsigned int size)
5533 struct buffer *out = &(struct buffer){ .port = 0 };
5541 ctx->objmask = NULL;
5542 size = sizeof(*out);
5544 ret = parse_int(ctx, token, str, len, out, size);
5546 ctx->port = out->port;
5552 /** Parse set command, initialize output buffer for subsequent tokens. */
5554 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5555 const char *str, unsigned int len,
5556 void *buf, unsigned int size)
5558 struct buffer *out = buf;
5560 /* Token name must match. */
5561 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5563 /* Nothing else to do if there is no buffer. */
5566 /* Make sure buffer is large enough. */
5567 if (size < sizeof(*out))
5570 ctx->objmask = NULL;
5574 out->command = ctx->curr;
5579 * Parse set raw_encap/raw_decap command,
5580 * initialize output buffer for subsequent tokens.
5583 parse_set_init(struct context *ctx, const struct token *token,
5584 const char *str, unsigned int len,
5585 void *buf, unsigned int size)
5587 struct buffer *out = buf;
5589 /* Token name must match. */
5590 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5592 /* Nothing else to do if there is no buffer. */
5595 /* Make sure buffer is large enough. */
5596 if (size < sizeof(*out))
5598 /* Initialize buffer. */
5599 memset(out, 0x00, sizeof(*out));
5600 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5603 ctx->objmask = NULL;
5604 if (!out->command) {
5605 if (ctx->curr != SET)
5607 if (sizeof(*out) > size)
5609 out->command = ctx->curr;
5610 out->args.vc.data = (uint8_t *)out + size;
5611 /* All we need is pattern */
5612 out->args.vc.pattern =
5613 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5615 ctx->object = out->args.vc.pattern;
5620 /** No completion. */
5622 comp_none(struct context *ctx, const struct token *token,
5623 unsigned int ent, char *buf, unsigned int size)
5633 /** Complete boolean values. */
5635 comp_boolean(struct context *ctx, const struct token *token,
5636 unsigned int ent, char *buf, unsigned int size)
5642 for (i = 0; boolean_name[i]; ++i)
5643 if (buf && i == ent)
5644 return strlcpy(buf, boolean_name[i], size);
5650 /** Complete action names. */
5652 comp_action(struct context *ctx, const struct token *token,
5653 unsigned int ent, char *buf, unsigned int size)
5659 for (i = 0; next_action[i]; ++i)
5660 if (buf && i == ent)
5661 return strlcpy(buf, token_list[next_action[i]].name,
5668 /** Complete available ports. */
5670 comp_port(struct context *ctx, const struct token *token,
5671 unsigned int ent, char *buf, unsigned int size)
5678 RTE_ETH_FOREACH_DEV(p) {
5679 if (buf && i == ent)
5680 return snprintf(buf, size, "%u", p);
5688 /** Complete available rule IDs. */
5690 comp_rule_id(struct context *ctx, const struct token *token,
5691 unsigned int ent, char *buf, unsigned int size)
5694 struct rte_port *port;
5695 struct port_flow *pf;
5698 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5699 ctx->port == (portid_t)RTE_PORT_ALL)
5701 port = &ports[ctx->port];
5702 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5703 if (buf && i == ent)
5704 return snprintf(buf, size, "%u", pf->id);
5712 /** Complete type field for RSS action. */
5714 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5715 unsigned int ent, char *buf, unsigned int size)
5721 for (i = 0; rss_type_table[i].str; ++i)
5726 return strlcpy(buf, rss_type_table[ent].str, size);
5728 return snprintf(buf, size, "end");
5732 /** Complete queue field for RSS action. */
5734 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5735 unsigned int ent, char *buf, unsigned int size)
5742 return snprintf(buf, size, "%u", ent);
5744 return snprintf(buf, size, "end");
5748 /** Complete index number for set raw_encap/raw_decap commands. */
5750 comp_set_raw_index(struct context *ctx, const struct token *token,
5751 unsigned int ent, char *buf, unsigned int size)
5757 RTE_SET_USED(token);
5758 for (idx = 0; idx < RAW_ENCAP_CONFS_MAX_NUM; ++idx) {
5759 if (buf && idx == ent)
5760 return snprintf(buf, size, "%u", idx);
5766 /** Internal context. */
5767 static struct context cmd_flow_context;
5769 /** Global parser instance (cmdline API). */
5770 cmdline_parse_inst_t cmd_flow;
5771 cmdline_parse_inst_t cmd_set_raw;
5773 /** Initialize context. */
5775 cmd_flow_context_init(struct context *ctx)
5777 /* A full memset() is not necessary. */
5787 ctx->objmask = NULL;
5790 /** Parse a token (cmdline API). */
5792 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5795 struct context *ctx = &cmd_flow_context;
5796 const struct token *token;
5797 const enum index *list;
5802 token = &token_list[ctx->curr];
5803 /* Check argument length. */
5806 for (len = 0; src[len]; ++len)
5807 if (src[len] == '#' || isspace(src[len]))
5811 /* Last argument and EOL detection. */
5812 for (i = len; src[i]; ++i)
5813 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5815 else if (!isspace(src[i])) {
5820 if (src[i] == '\r' || src[i] == '\n') {
5824 /* Initialize context if necessary. */
5825 if (!ctx->next_num) {
5828 ctx->next[ctx->next_num++] = token->next[0];
5830 /* Process argument through candidates. */
5831 ctx->prev = ctx->curr;
5832 list = ctx->next[ctx->next_num - 1];
5833 for (i = 0; list[i]; ++i) {
5834 const struct token *next = &token_list[list[i]];
5837 ctx->curr = list[i];
5839 tmp = next->call(ctx, next, src, len, result, size);
5841 tmp = parse_default(ctx, next, src, len, result, size);
5842 if (tmp == -1 || tmp != len)
5850 /* Push subsequent tokens if any. */
5852 for (i = 0; token->next[i]; ++i) {
5853 if (ctx->next_num == RTE_DIM(ctx->next))
5855 ctx->next[ctx->next_num++] = token->next[i];
5857 /* Push arguments if any. */
5859 for (i = 0; token->args[i]; ++i) {
5860 if (ctx->args_num == RTE_DIM(ctx->args))
5862 ctx->args[ctx->args_num++] = token->args[i];
5867 /** Return number of completion entries (cmdline API). */
5869 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5871 struct context *ctx = &cmd_flow_context;
5872 const struct token *token = &token_list[ctx->curr];
5873 const enum index *list;
5877 /* Count number of tokens in current list. */
5879 list = ctx->next[ctx->next_num - 1];
5881 list = token->next[0];
5882 for (i = 0; list[i]; ++i)
5887 * If there is a single token, use its completion callback, otherwise
5888 * return the number of entries.
5890 token = &token_list[list[0]];
5891 if (i == 1 && token->comp) {
5892 /* Save index for cmd_flow_get_help(). */
5893 ctx->prev = list[0];
5894 return token->comp(ctx, token, 0, NULL, 0);
5899 /** Return a completion entry (cmdline API). */
5901 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5902 char *dst, unsigned int size)
5904 struct context *ctx = &cmd_flow_context;
5905 const struct token *token = &token_list[ctx->curr];
5906 const enum index *list;
5910 /* Count number of tokens in current list. */
5912 list = ctx->next[ctx->next_num - 1];
5914 list = token->next[0];
5915 for (i = 0; list[i]; ++i)
5919 /* If there is a single token, use its completion callback. */
5920 token = &token_list[list[0]];
5921 if (i == 1 && token->comp) {
5922 /* Save index for cmd_flow_get_help(). */
5923 ctx->prev = list[0];
5924 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5926 /* Otherwise make sure the index is valid and use defaults. */
5929 token = &token_list[list[index]];
5930 strlcpy(dst, token->name, size);
5931 /* Save index for cmd_flow_get_help(). */
5932 ctx->prev = list[index];
5936 /** Populate help strings for current token (cmdline API). */
5938 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5940 struct context *ctx = &cmd_flow_context;
5941 const struct token *token = &token_list[ctx->prev];
5946 /* Set token type and update global help with details. */
5947 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5949 cmd_flow.help_str = token->help;
5951 cmd_flow.help_str = token->name;
5955 /** Token definition template (cmdline API). */
5956 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5957 .ops = &(struct cmdline_token_ops){
5958 .parse = cmd_flow_parse,
5959 .complete_get_nb = cmd_flow_complete_get_nb,
5960 .complete_get_elt = cmd_flow_complete_get_elt,
5961 .get_help = cmd_flow_get_help,
5966 /** Populate the next dynamic token. */
5968 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5969 cmdline_parse_token_hdr_t **hdr_inst)
5971 struct context *ctx = &cmd_flow_context;
5973 /* Always reinitialize context before requesting the first token. */
5974 if (!(hdr_inst - cmd_flow.tokens))
5975 cmd_flow_context_init(ctx);
5976 /* Return NULL when no more tokens are expected. */
5977 if (!ctx->next_num && ctx->curr) {
5981 /* Determine if command should end here. */
5982 if (ctx->eol && ctx->last && ctx->next_num) {
5983 const enum index *list = ctx->next[ctx->next_num - 1];
5986 for (i = 0; list[i]; ++i) {
5993 *hdr = &cmd_flow_token_hdr;
5996 /** Dispatch parsed buffer to function calls. */
5998 cmd_flow_parsed(const struct buffer *in)
6000 switch (in->command) {
6002 port_flow_validate(in->port, &in->args.vc.attr,
6003 in->args.vc.pattern, in->args.vc.actions);
6006 port_flow_create(in->port, &in->args.vc.attr,
6007 in->args.vc.pattern, in->args.vc.actions);
6010 port_flow_destroy(in->port, in->args.destroy.rule_n,
6011 in->args.destroy.rule);
6014 port_flow_flush(in->port);
6017 port_flow_query(in->port, in->args.query.rule,
6018 &in->args.query.action);
6021 port_flow_list(in->port, in->args.list.group_n,
6022 in->args.list.group);
6025 port_flow_isolate(in->port, in->args.isolate.set);
6032 /** Token generator and output processing callback (cmdline API). */
6034 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
6037 cmd_flow_tok(arg0, arg2);
6039 cmd_flow_parsed(arg0);
6042 /** Global parser instance (cmdline API). */
6043 cmdline_parse_inst_t cmd_flow = {
6045 .data = NULL, /**< Unused. */
6046 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6049 }, /**< Tokens are returned by cmd_flow_tok(). */
6052 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
6055 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
6057 struct rte_flow_item_ipv4 *ipv4;
6058 struct rte_flow_item_eth *eth;
6059 struct rte_flow_item_ipv6 *ipv6;
6060 struct rte_flow_item_vxlan *vxlan;
6061 struct rte_flow_item_vxlan_gpe *gpe;
6062 struct rte_flow_item_nvgre *nvgre;
6063 uint32_t ipv6_vtc_flow;
6065 switch (item->type) {
6066 case RTE_FLOW_ITEM_TYPE_ETH:
6067 eth = (struct rte_flow_item_eth *)buf;
6069 eth->type = rte_cpu_to_be_16(next_proto);
6071 case RTE_FLOW_ITEM_TYPE_IPV4:
6072 ipv4 = (struct rte_flow_item_ipv4 *)buf;
6073 ipv4->hdr.version_ihl = 0x45;
6074 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
6076 case RTE_FLOW_ITEM_TYPE_IPV6:
6077 ipv6 = (struct rte_flow_item_ipv6 *)buf;
6078 ipv6->hdr.proto = (uint8_t)next_proto;
6079 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
6080 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
6081 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
6082 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
6084 case RTE_FLOW_ITEM_TYPE_VXLAN:
6085 vxlan = (struct rte_flow_item_vxlan *)buf;
6086 vxlan->flags = 0x08;
6088 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6089 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
6092 case RTE_FLOW_ITEM_TYPE_NVGRE:
6093 nvgre = (struct rte_flow_item_nvgre *)buf;
6094 nvgre->protocol = rte_cpu_to_be_16(0x6558);
6095 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
6102 /** Helper of get item's default mask. */
6104 flow_item_default_mask(const struct rte_flow_item *item)
6106 const void *mask = NULL;
6107 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
6109 switch (item->type) {
6110 case RTE_FLOW_ITEM_TYPE_ANY:
6111 mask = &rte_flow_item_any_mask;
6113 case RTE_FLOW_ITEM_TYPE_VF:
6114 mask = &rte_flow_item_vf_mask;
6116 case RTE_FLOW_ITEM_TYPE_PORT_ID:
6117 mask = &rte_flow_item_port_id_mask;
6119 case RTE_FLOW_ITEM_TYPE_RAW:
6120 mask = &rte_flow_item_raw_mask;
6122 case RTE_FLOW_ITEM_TYPE_ETH:
6123 mask = &rte_flow_item_eth_mask;
6125 case RTE_FLOW_ITEM_TYPE_VLAN:
6126 mask = &rte_flow_item_vlan_mask;
6128 case RTE_FLOW_ITEM_TYPE_IPV4:
6129 mask = &rte_flow_item_ipv4_mask;
6131 case RTE_FLOW_ITEM_TYPE_IPV6:
6132 mask = &rte_flow_item_ipv6_mask;
6134 case RTE_FLOW_ITEM_TYPE_ICMP:
6135 mask = &rte_flow_item_icmp_mask;
6137 case RTE_FLOW_ITEM_TYPE_UDP:
6138 mask = &rte_flow_item_udp_mask;
6140 case RTE_FLOW_ITEM_TYPE_TCP:
6141 mask = &rte_flow_item_tcp_mask;
6143 case RTE_FLOW_ITEM_TYPE_SCTP:
6144 mask = &rte_flow_item_sctp_mask;
6146 case RTE_FLOW_ITEM_TYPE_VXLAN:
6147 mask = &rte_flow_item_vxlan_mask;
6149 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6150 mask = &rte_flow_item_vxlan_gpe_mask;
6152 case RTE_FLOW_ITEM_TYPE_E_TAG:
6153 mask = &rte_flow_item_e_tag_mask;
6155 case RTE_FLOW_ITEM_TYPE_NVGRE:
6156 mask = &rte_flow_item_nvgre_mask;
6158 case RTE_FLOW_ITEM_TYPE_MPLS:
6159 mask = &rte_flow_item_mpls_mask;
6161 case RTE_FLOW_ITEM_TYPE_GRE:
6162 mask = &rte_flow_item_gre_mask;
6164 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6165 mask = &gre_key_default_mask;
6167 case RTE_FLOW_ITEM_TYPE_META:
6168 mask = &rte_flow_item_meta_mask;
6170 case RTE_FLOW_ITEM_TYPE_FUZZY:
6171 mask = &rte_flow_item_fuzzy_mask;
6173 case RTE_FLOW_ITEM_TYPE_GTP:
6174 mask = &rte_flow_item_gtp_mask;
6176 case RTE_FLOW_ITEM_TYPE_ESP:
6177 mask = &rte_flow_item_esp_mask;
6179 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
6180 mask = &rte_flow_item_gtp_psc_mask;
6182 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
6183 mask = &rte_flow_item_pppoe_proto_id_mask;
6192 /** Dispatch parsed buffer to function calls. */
6194 cmd_set_raw_parsed(const struct buffer *in)
6196 uint32_t n = in->args.vc.pattern_n;
6198 struct rte_flow_item *item = NULL;
6200 uint8_t *data = NULL;
6201 uint8_t *data_tail = NULL;
6202 size_t *total_size = NULL;
6203 uint16_t upper_layer = 0;
6205 uint16_t idx = in->port; /* We borrow port field as index */
6207 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
6208 in->command == SET_RAW_DECAP);
6209 if (in->command == SET_RAW_ENCAP) {
6210 total_size = &raw_encap_confs[idx].size;
6211 data = (uint8_t *)&raw_encap_confs[idx].data;
6213 total_size = &raw_decap_confs[idx].size;
6214 data = (uint8_t *)&raw_decap_confs[idx].data;
6217 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6218 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
6219 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
6220 for (i = n - 1 ; i >= 0; --i) {
6221 item = in->args.vc.pattern + i;
6222 if (item->spec == NULL)
6223 item->spec = flow_item_default_mask(item);
6224 switch (item->type) {
6225 case RTE_FLOW_ITEM_TYPE_ETH:
6226 size = sizeof(struct rte_flow_item_eth);
6228 case RTE_FLOW_ITEM_TYPE_VLAN:
6229 size = sizeof(struct rte_flow_item_vlan);
6230 proto = RTE_ETHER_TYPE_VLAN;
6232 case RTE_FLOW_ITEM_TYPE_IPV4:
6233 size = sizeof(struct rte_flow_item_ipv4);
6234 proto = RTE_ETHER_TYPE_IPV4;
6236 case RTE_FLOW_ITEM_TYPE_IPV6:
6237 size = sizeof(struct rte_flow_item_ipv6);
6238 proto = RTE_ETHER_TYPE_IPV6;
6240 case RTE_FLOW_ITEM_TYPE_UDP:
6241 size = sizeof(struct rte_flow_item_udp);
6244 case RTE_FLOW_ITEM_TYPE_TCP:
6245 size = sizeof(struct rte_flow_item_tcp);
6248 case RTE_FLOW_ITEM_TYPE_VXLAN:
6249 size = sizeof(struct rte_flow_item_vxlan);
6251 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6252 size = sizeof(struct rte_flow_item_vxlan_gpe);
6254 case RTE_FLOW_ITEM_TYPE_GRE:
6255 size = sizeof(struct rte_flow_item_gre);
6258 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6259 size = sizeof(rte_be32_t);
6261 case RTE_FLOW_ITEM_TYPE_MPLS:
6262 size = sizeof(struct rte_flow_item_mpls);
6264 case RTE_FLOW_ITEM_TYPE_NVGRE:
6265 size = sizeof(struct rte_flow_item_nvgre);
6268 case RTE_FLOW_ITEM_TYPE_GENEVE:
6269 size = sizeof(struct rte_flow_item_geneve);
6272 printf("Error - Not supported item\n");
6274 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6277 *total_size += size;
6278 rte_memcpy(data_tail - (*total_size), item->spec, size);
6279 /* update some fields which cannot be set by cmdline */
6280 update_fields((data_tail - (*total_size)), item,
6282 upper_layer = proto;
6284 if (verbose_level & 0x1)
6285 printf("total data size is %zu\n", (*total_size));
6286 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
6287 memmove(data, (data_tail - (*total_size)), *total_size);
6290 /** Populate help strings for current token (cmdline API). */
6292 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6295 struct context *ctx = &cmd_flow_context;
6296 const struct token *token = &token_list[ctx->prev];
6301 /* Set token type and update global help with details. */
6302 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6304 cmd_set_raw.help_str = token->help;
6306 cmd_set_raw.help_str = token->name;
6310 /** Token definition template (cmdline API). */
6311 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6312 .ops = &(struct cmdline_token_ops){
6313 .parse = cmd_flow_parse,
6314 .complete_get_nb = cmd_flow_complete_get_nb,
6315 .complete_get_elt = cmd_flow_complete_get_elt,
6316 .get_help = cmd_set_raw_get_help,
6321 /** Populate the next dynamic token. */
6323 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6324 cmdline_parse_token_hdr_t **hdr_inst)
6326 struct context *ctx = &cmd_flow_context;
6328 /* Always reinitialize context before requesting the first token. */
6329 if (!(hdr_inst - cmd_set_raw.tokens)) {
6330 cmd_flow_context_init(ctx);
6331 ctx->curr = START_SET;
6333 /* Return NULL when no more tokens are expected. */
6334 if (!ctx->next_num && (ctx->curr != START_SET)) {
6338 /* Determine if command should end here. */
6339 if (ctx->eol && ctx->last && ctx->next_num) {
6340 const enum index *list = ctx->next[ctx->next_num - 1];
6343 for (i = 0; list[i]; ++i) {
6350 *hdr = &cmd_set_raw_token_hdr;
6353 /** Token generator and output processing callback (cmdline API). */
6355 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6358 cmd_set_raw_tok(arg0, arg2);
6360 cmd_set_raw_parsed(arg0);
6363 /** Global parser instance (cmdline API). */
6364 cmdline_parse_inst_t cmd_set_raw = {
6365 .f = cmd_set_raw_cb,
6366 .data = NULL, /**< Unused. */
6367 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6370 }, /**< Tokens are returned by cmd_flow_tok(). */
6373 /* *** display raw_encap/raw_decap buf */
6374 struct cmd_show_set_raw_result {
6375 cmdline_fixed_string_t cmd_show;
6376 cmdline_fixed_string_t cmd_what;
6377 cmdline_fixed_string_t cmd_all;
6382 cmd_show_set_raw_parsed(void *parsed_result, struct cmdline *cl, void *data)
6384 struct cmd_show_set_raw_result *res = parsed_result;
6385 uint16_t index = res->cmd_index;
6387 uint8_t *raw_data = NULL;
6388 size_t raw_size = 0;
6389 char title[16] = {0};
6393 if (!strcmp(res->cmd_all, "all")) {
6396 } else if (index >= RAW_ENCAP_CONFS_MAX_NUM) {
6397 printf("index should be 0-%u\n", RAW_ENCAP_CONFS_MAX_NUM - 1);
6401 if (!strcmp(res->cmd_what, "raw_encap")) {
6402 raw_data = (uint8_t *)&raw_encap_confs[index].data;
6403 raw_size = raw_encap_confs[index].size;
6404 snprintf(title, 16, "\nindex: %u", index);
6405 rte_hexdump(stdout, title, raw_data, raw_size);
6407 raw_data = (uint8_t *)&raw_decap_confs[index].data;
6408 raw_size = raw_decap_confs[index].size;
6409 snprintf(title, 16, "\nindex: %u", index);
6410 rte_hexdump(stdout, title, raw_data, raw_size);
6412 } while (all && ++index < RAW_ENCAP_CONFS_MAX_NUM);
6415 cmdline_parse_token_string_t cmd_show_set_raw_cmd_show =
6416 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6418 cmdline_parse_token_string_t cmd_show_set_raw_cmd_what =
6419 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6420 cmd_what, "raw_encap#raw_decap");
6421 cmdline_parse_token_num_t cmd_show_set_raw_cmd_index =
6422 TOKEN_NUM_INITIALIZER(struct cmd_show_set_raw_result,
6424 cmdline_parse_token_string_t cmd_show_set_raw_cmd_all =
6425 TOKEN_STRING_INITIALIZER(struct cmd_show_set_raw_result,
6427 cmdline_parse_inst_t cmd_show_set_raw = {
6428 .f = cmd_show_set_raw_parsed,
6430 .help_str = "show <raw_encap|raw_decap> <index>",
6432 (void *)&cmd_show_set_raw_cmd_show,
6433 (void *)&cmd_show_set_raw_cmd_what,
6434 (void *)&cmd_show_set_raw_cmd_index,
6438 cmdline_parse_inst_t cmd_show_set_raw_all = {
6439 .f = cmd_show_set_raw_parsed,
6441 .help_str = "show <raw_encap|raw_decap> all",
6443 (void *)&cmd_show_set_raw_cmd_show,
6444 (void *)&cmd_show_set_raw_cmd_what,
6445 (void *)&cmd_show_set_raw_cmd_all,