1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
49 /* Top-level command. */
51 /* Sub-leve commands. */
55 /* Top-level command. */
57 /* Sub-level commands. */
66 /* Destroy arguments. */
69 /* Query arguments. */
75 /* Validate/create arguments. */
82 /* Validate/create pattern. */
119 ITEM_VLAN_INNER_TYPE,
151 ITEM_E_TAG_GRP_ECID_B,
160 ITEM_GRE_C_RSVD0_VER,
176 ITEM_ARP_ETH_IPV4_SHA,
177 ITEM_ARP_ETH_IPV4_SPA,
178 ITEM_ARP_ETH_IPV4_THA,
179 ITEM_ARP_ETH_IPV4_TPA,
181 ITEM_IPV6_EXT_NEXT_HDR,
186 ITEM_ICMP6_ND_NS_TARGET_ADDR,
188 ITEM_ICMP6_ND_NA_TARGET_ADDR,
190 ITEM_ICMP6_ND_OPT_TYPE,
191 ITEM_ICMP6_ND_OPT_SLA_ETH,
192 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
193 ITEM_ICMP6_ND_OPT_TLA_ETH,
194 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
207 ITEM_HIGIG2_CLASSIFICATION,
210 /* Validate/create actions. */
230 ACTION_RSS_FUNC_DEFAULT,
231 ACTION_RSS_FUNC_TOEPLITZ,
232 ACTION_RSS_FUNC_SIMPLE_XOR,
233 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
245 ACTION_PHY_PORT_ORIGINAL,
246 ACTION_PHY_PORT_INDEX,
248 ACTION_PORT_ID_ORIGINAL,
252 ACTION_OF_SET_MPLS_TTL,
253 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
254 ACTION_OF_DEC_MPLS_TTL,
255 ACTION_OF_SET_NW_TTL,
256 ACTION_OF_SET_NW_TTL_NW_TTL,
257 ACTION_OF_DEC_NW_TTL,
258 ACTION_OF_COPY_TTL_OUT,
259 ACTION_OF_COPY_TTL_IN,
262 ACTION_OF_PUSH_VLAN_ETHERTYPE,
263 ACTION_OF_SET_VLAN_VID,
264 ACTION_OF_SET_VLAN_VID_VLAN_VID,
265 ACTION_OF_SET_VLAN_PCP,
266 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
268 ACTION_OF_POP_MPLS_ETHERTYPE,
270 ACTION_OF_PUSH_MPLS_ETHERTYPE,
277 ACTION_MPLSOGRE_ENCAP,
278 ACTION_MPLSOGRE_DECAP,
279 ACTION_MPLSOUDP_ENCAP,
280 ACTION_MPLSOUDP_DECAP,
282 ACTION_SET_IPV4_SRC_IPV4_SRC,
284 ACTION_SET_IPV4_DST_IPV4_DST,
286 ACTION_SET_IPV6_SRC_IPV6_SRC,
288 ACTION_SET_IPV6_DST_IPV6_DST,
290 ACTION_SET_TP_SRC_TP_SRC,
292 ACTION_SET_TP_DST_TP_DST,
298 ACTION_SET_MAC_SRC_MAC_SRC,
300 ACTION_SET_MAC_DST_MAC_DST,
302 ACTION_INC_TCP_SEQ_VALUE,
304 ACTION_DEC_TCP_SEQ_VALUE,
306 ACTION_INC_TCP_ACK_VALUE,
308 ACTION_DEC_TCP_ACK_VALUE,
313 /** Maximum size for pattern in struct rte_flow_item_raw. */
314 #define ITEM_RAW_PATTERN_SIZE 40
316 /** Storage size for struct rte_flow_item_raw including pattern. */
317 #define ITEM_RAW_SIZE \
318 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
320 /** Maximum number of queue indices in struct rte_flow_action_rss. */
321 #define ACTION_RSS_QUEUE_NUM 128
323 /** Storage for struct rte_flow_action_rss including external data. */
324 struct action_rss_data {
325 struct rte_flow_action_rss conf;
326 uint8_t key[RSS_HASH_KEY_LENGTH];
327 uint16_t queue[ACTION_RSS_QUEUE_NUM];
330 /** Maximum data size in struct rte_flow_action_raw_encap. */
331 #define ACTION_RAW_ENCAP_MAX_DATA 128
333 /** Storage for struct rte_flow_action_raw_encap. */
334 struct raw_encap_conf {
335 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
336 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
340 struct raw_encap_conf raw_encap_conf = {.size = 0};
342 /** Storage for struct rte_flow_action_raw_encap including external data. */
343 struct action_raw_encap_data {
344 struct rte_flow_action_raw_encap conf;
345 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
346 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
349 /** Storage for struct rte_flow_action_raw_decap. */
350 struct raw_decap_conf {
351 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
355 struct raw_decap_conf raw_decap_conf = {.size = 0};
357 /** Storage for struct rte_flow_action_raw_decap including external data. */
358 struct action_raw_decap_data {
359 struct rte_flow_action_raw_decap conf;
360 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
363 struct vxlan_encap_conf vxlan_encap_conf = {
367 .vni = "\x00\x00\x00",
369 .udp_dst = RTE_BE16(4789),
370 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
371 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
372 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
373 "\x00\x00\x00\x00\x00\x00\x00\x01",
374 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
375 "\x00\x00\x00\x00\x00\x00\x11\x11",
379 .eth_src = "\x00\x00\x00\x00\x00\x00",
380 .eth_dst = "\xff\xff\xff\xff\xff\xff",
383 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
384 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
386 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
387 struct action_vxlan_encap_data {
388 struct rte_flow_action_vxlan_encap conf;
389 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
390 struct rte_flow_item_eth item_eth;
391 struct rte_flow_item_vlan item_vlan;
393 struct rte_flow_item_ipv4 item_ipv4;
394 struct rte_flow_item_ipv6 item_ipv6;
396 struct rte_flow_item_udp item_udp;
397 struct rte_flow_item_vxlan item_vxlan;
400 struct nvgre_encap_conf nvgre_encap_conf = {
403 .tni = "\x00\x00\x00",
404 .ipv4_src = RTE_IPV4(127, 0, 0, 1),
405 .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
406 .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
407 "\x00\x00\x00\x00\x00\x00\x00\x01",
408 .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
409 "\x00\x00\x00\x00\x00\x00\x11\x11",
411 .eth_src = "\x00\x00\x00\x00\x00\x00",
412 .eth_dst = "\xff\xff\xff\xff\xff\xff",
415 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
416 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
418 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
419 struct action_nvgre_encap_data {
420 struct rte_flow_action_nvgre_encap conf;
421 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
422 struct rte_flow_item_eth item_eth;
423 struct rte_flow_item_vlan item_vlan;
425 struct rte_flow_item_ipv4 item_ipv4;
426 struct rte_flow_item_ipv6 item_ipv6;
428 struct rte_flow_item_nvgre item_nvgre;
431 struct l2_encap_conf l2_encap_conf;
433 struct l2_decap_conf l2_decap_conf;
435 struct mplsogre_encap_conf mplsogre_encap_conf;
437 struct mplsogre_decap_conf mplsogre_decap_conf;
439 struct mplsoudp_encap_conf mplsoudp_encap_conf;
441 struct mplsoudp_decap_conf mplsoudp_decap_conf;
443 /** Maximum number of subsequent tokens and arguments on the stack. */
444 #define CTX_STACK_SIZE 16
446 /** Parser context. */
448 /** Stack of subsequent token lists to process. */
449 const enum index *next[CTX_STACK_SIZE];
450 /** Arguments for stacked tokens. */
451 const void *args[CTX_STACK_SIZE];
452 enum index curr; /**< Current token index. */
453 enum index prev; /**< Index of the last token seen. */
454 int next_num; /**< Number of entries in next[]. */
455 int args_num; /**< Number of entries in args[]. */
456 uint32_t eol:1; /**< EOL has been detected. */
457 uint32_t last:1; /**< No more arguments. */
458 portid_t port; /**< Current port ID (for completions). */
459 uint32_t objdata; /**< Object-specific data. */
460 void *object; /**< Address of current object for relative offsets. */
461 void *objmask; /**< Object a full mask must be written to. */
464 /** Token argument. */
466 uint32_t hton:1; /**< Use network byte ordering. */
467 uint32_t sign:1; /**< Value is signed. */
468 uint32_t bounded:1; /**< Value is bounded. */
469 uintmax_t min; /**< Minimum value if bounded. */
470 uintmax_t max; /**< Maximum value if bounded. */
471 uint32_t offset; /**< Relative offset from ctx->object. */
472 uint32_t size; /**< Field size. */
473 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
476 /** Parser token definition. */
478 /** Type displayed during completion (defaults to "TOKEN"). */
480 /** Help displayed during completion (defaults to token name). */
482 /** Private data used by parser functions. */
485 * Lists of subsequent tokens to push on the stack. Each call to the
486 * parser consumes the last entry of that stack.
488 const enum index *const *next;
489 /** Arguments stack for subsequent tokens that need them. */
490 const struct arg *const *args;
492 * Token-processing callback, returns -1 in case of error, the
493 * length of the matched string otherwise. If NULL, attempts to
494 * match the token name.
496 * If buf is not NULL, the result should be stored in it according
497 * to context. An error is returned if not large enough.
499 int (*call)(struct context *ctx, const struct token *token,
500 const char *str, unsigned int len,
501 void *buf, unsigned int size);
503 * Callback that provides possible values for this token, used for
504 * completion. Returns -1 in case of error, the number of possible
505 * values otherwise. If NULL, the token name is used.
507 * If buf is not NULL, entry index ent is written to buf and the
508 * full length of the entry is returned (same behavior as
511 int (*comp)(struct context *ctx, const struct token *token,
512 unsigned int ent, char *buf, unsigned int size);
513 /** Mandatory token name, no default value. */
517 /** Static initializer for the next field. */
518 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
520 /** Static initializer for a NEXT() entry. */
521 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
523 /** Static initializer for the args field. */
524 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
526 /** Static initializer for ARGS() to target a field. */
527 #define ARGS_ENTRY(s, f) \
528 (&(const struct arg){ \
529 .offset = offsetof(s, f), \
530 .size = sizeof(((s *)0)->f), \
533 /** Static initializer for ARGS() to target a bit-field. */
534 #define ARGS_ENTRY_BF(s, f, b) \
535 (&(const struct arg){ \
537 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
540 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
541 #define ARGS_ENTRY_MASK(s, f, m) \
542 (&(const struct arg){ \
543 .offset = offsetof(s, f), \
544 .size = sizeof(((s *)0)->f), \
545 .mask = (const void *)(m), \
548 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
549 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
550 (&(const struct arg){ \
552 .offset = offsetof(s, f), \
553 .size = sizeof(((s *)0)->f), \
554 .mask = (const void *)(m), \
557 /** Static initializer for ARGS() to target a pointer. */
558 #define ARGS_ENTRY_PTR(s, f) \
559 (&(const struct arg){ \
560 .size = sizeof(*((s *)0)->f), \
563 /** Static initializer for ARGS() with arbitrary offset and size. */
564 #define ARGS_ENTRY_ARB(o, s) \
565 (&(const struct arg){ \
570 /** Same as ARGS_ENTRY_ARB() with bounded values. */
571 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
572 (&(const struct arg){ \
580 /** Same as ARGS_ENTRY() using network byte ordering. */
581 #define ARGS_ENTRY_HTON(s, f) \
582 (&(const struct arg){ \
584 .offset = offsetof(s, f), \
585 .size = sizeof(((s *)0)->f), \
588 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
589 #define ARG_ENTRY_HTON(s) \
590 (&(const struct arg){ \
596 /** Parser output buffer layout expected by cmd_flow_parsed(). */
598 enum index command; /**< Flow command. */
599 portid_t port; /**< Affected port ID. */
602 struct rte_flow_attr attr;
603 struct rte_flow_item *pattern;
604 struct rte_flow_action *actions;
608 } vc; /**< Validate/create arguments. */
612 } destroy; /**< Destroy arguments. */
615 struct rte_flow_action action;
616 } query; /**< Query arguments. */
620 } list; /**< List arguments. */
623 } isolate; /**< Isolated mode arguments. */
624 } args; /**< Command arguments. */
627 /** Private data for pattern items. */
628 struct parse_item_priv {
629 enum rte_flow_item_type type; /**< Item type. */
630 uint32_t size; /**< Size of item specification structure. */
633 #define PRIV_ITEM(t, s) \
634 (&(const struct parse_item_priv){ \
635 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
639 /** Private data for actions. */
640 struct parse_action_priv {
641 enum rte_flow_action_type type; /**< Action type. */
642 uint32_t size; /**< Size of action configuration structure. */
645 #define PRIV_ACTION(t, s) \
646 (&(const struct parse_action_priv){ \
647 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
651 static const enum index next_vc_attr[] = {
661 static const enum index next_destroy_attr[] = {
667 static const enum index next_list_attr[] = {
673 static const enum index item_param[] = {
682 static const enum index next_item[] = {
718 ITEM_ICMP6_ND_OPT_SLA_ETH,
719 ITEM_ICMP6_ND_OPT_TLA_ETH,
731 static const enum index item_fuzzy[] = {
737 static const enum index item_any[] = {
743 static const enum index item_vf[] = {
749 static const enum index item_phy_port[] = {
755 static const enum index item_port_id[] = {
761 static const enum index item_mark[] = {
767 static const enum index item_raw[] = {
777 static const enum index item_eth[] = {
785 static const enum index item_vlan[] = {
790 ITEM_VLAN_INNER_TYPE,
795 static const enum index item_ipv4[] = {
805 static const enum index item_ipv6[] = {
816 static const enum index item_icmp[] = {
823 static const enum index item_udp[] = {
830 static const enum index item_tcp[] = {
838 static const enum index item_sctp[] = {
847 static const enum index item_vxlan[] = {
853 static const enum index item_e_tag[] = {
854 ITEM_E_TAG_GRP_ECID_B,
859 static const enum index item_nvgre[] = {
865 static const enum index item_mpls[] = {
873 static const enum index item_gre[] = {
875 ITEM_GRE_C_RSVD0_VER,
883 static const enum index item_gre_key[] = {
889 static const enum index item_gtp[] = {
895 static const enum index item_geneve[] = {
902 static const enum index item_vxlan_gpe[] = {
908 static const enum index item_arp_eth_ipv4[] = {
909 ITEM_ARP_ETH_IPV4_SHA,
910 ITEM_ARP_ETH_IPV4_SPA,
911 ITEM_ARP_ETH_IPV4_THA,
912 ITEM_ARP_ETH_IPV4_TPA,
917 static const enum index item_ipv6_ext[] = {
918 ITEM_IPV6_EXT_NEXT_HDR,
923 static const enum index item_icmp6[] = {
930 static const enum index item_icmp6_nd_ns[] = {
931 ITEM_ICMP6_ND_NS_TARGET_ADDR,
936 static const enum index item_icmp6_nd_na[] = {
937 ITEM_ICMP6_ND_NA_TARGET_ADDR,
942 static const enum index item_icmp6_nd_opt[] = {
943 ITEM_ICMP6_ND_OPT_TYPE,
948 static const enum index item_icmp6_nd_opt_sla_eth[] = {
949 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
954 static const enum index item_icmp6_nd_opt_tla_eth[] = {
955 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
960 static const enum index item_meta[] = {
966 static const enum index item_gtp_psc[] = {
973 static const enum index item_pppoed[] = {
979 static const enum index item_pppoes[] = {
985 static const enum index item_pppoe_proto_id[] = {
991 static const enum index item_higig2[] = {
992 ITEM_HIGIG2_CLASSIFICATION,
998 static const enum index next_action[] = {
1014 ACTION_OF_SET_MPLS_TTL,
1015 ACTION_OF_DEC_MPLS_TTL,
1016 ACTION_OF_SET_NW_TTL,
1017 ACTION_OF_DEC_NW_TTL,
1018 ACTION_OF_COPY_TTL_OUT,
1019 ACTION_OF_COPY_TTL_IN,
1021 ACTION_OF_PUSH_VLAN,
1022 ACTION_OF_SET_VLAN_VID,
1023 ACTION_OF_SET_VLAN_PCP,
1025 ACTION_OF_PUSH_MPLS,
1032 ACTION_MPLSOGRE_ENCAP,
1033 ACTION_MPLSOGRE_DECAP,
1034 ACTION_MPLSOUDP_ENCAP,
1035 ACTION_MPLSOUDP_DECAP,
1036 ACTION_SET_IPV4_SRC,
1037 ACTION_SET_IPV4_DST,
1038 ACTION_SET_IPV6_SRC,
1039 ACTION_SET_IPV6_DST,
1056 static const enum index action_mark[] = {
1062 static const enum index action_queue[] = {
1068 static const enum index action_count[] = {
1070 ACTION_COUNT_SHARED,
1075 static const enum index action_rss[] = {
1086 static const enum index action_vf[] = {
1093 static const enum index action_phy_port[] = {
1094 ACTION_PHY_PORT_ORIGINAL,
1095 ACTION_PHY_PORT_INDEX,
1100 static const enum index action_port_id[] = {
1101 ACTION_PORT_ID_ORIGINAL,
1107 static const enum index action_meter[] = {
1113 static const enum index action_of_set_mpls_ttl[] = {
1114 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1119 static const enum index action_of_set_nw_ttl[] = {
1120 ACTION_OF_SET_NW_TTL_NW_TTL,
1125 static const enum index action_of_push_vlan[] = {
1126 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1131 static const enum index action_of_set_vlan_vid[] = {
1132 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1137 static const enum index action_of_set_vlan_pcp[] = {
1138 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1143 static const enum index action_of_pop_mpls[] = {
1144 ACTION_OF_POP_MPLS_ETHERTYPE,
1149 static const enum index action_of_push_mpls[] = {
1150 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1155 static const enum index action_set_ipv4_src[] = {
1156 ACTION_SET_IPV4_SRC_IPV4_SRC,
1161 static const enum index action_set_mac_src[] = {
1162 ACTION_SET_MAC_SRC_MAC_SRC,
1167 static const enum index action_set_ipv4_dst[] = {
1168 ACTION_SET_IPV4_DST_IPV4_DST,
1173 static const enum index action_set_ipv6_src[] = {
1174 ACTION_SET_IPV6_SRC_IPV6_SRC,
1179 static const enum index action_set_ipv6_dst[] = {
1180 ACTION_SET_IPV6_DST_IPV6_DST,
1185 static const enum index action_set_tp_src[] = {
1186 ACTION_SET_TP_SRC_TP_SRC,
1191 static const enum index action_set_tp_dst[] = {
1192 ACTION_SET_TP_DST_TP_DST,
1197 static const enum index action_set_ttl[] = {
1203 static const enum index action_jump[] = {
1209 static const enum index action_set_mac_dst[] = {
1210 ACTION_SET_MAC_DST_MAC_DST,
1215 static const enum index action_inc_tcp_seq[] = {
1216 ACTION_INC_TCP_SEQ_VALUE,
1221 static const enum index action_dec_tcp_seq[] = {
1222 ACTION_DEC_TCP_SEQ_VALUE,
1227 static const enum index action_inc_tcp_ack[] = {
1228 ACTION_INC_TCP_ACK_VALUE,
1233 static const enum index action_dec_tcp_ack[] = {
1234 ACTION_DEC_TCP_ACK_VALUE,
1239 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1240 const char *, unsigned int,
1241 void *, unsigned int);
1242 static int parse_set_init(struct context *, const struct token *,
1243 const char *, unsigned int,
1244 void *, unsigned int);
1245 static int parse_init(struct context *, const struct token *,
1246 const char *, unsigned int,
1247 void *, unsigned int);
1248 static int parse_vc(struct context *, const struct token *,
1249 const char *, unsigned int,
1250 void *, unsigned int);
1251 static int parse_vc_spec(struct context *, const struct token *,
1252 const char *, unsigned int, void *, unsigned int);
1253 static int parse_vc_conf(struct context *, const struct token *,
1254 const char *, unsigned int, void *, unsigned int);
1255 static int parse_vc_action_rss(struct context *, const struct token *,
1256 const char *, unsigned int, void *,
1258 static int parse_vc_action_rss_func(struct context *, const struct token *,
1259 const char *, unsigned int, void *,
1261 static int parse_vc_action_rss_type(struct context *, const struct token *,
1262 const char *, unsigned int, void *,
1264 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1265 const char *, unsigned int, void *,
1267 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1268 const char *, unsigned int, void *,
1270 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1271 const char *, unsigned int, void *,
1273 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1274 const char *, unsigned int, void *,
1276 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1277 const char *, unsigned int, void *,
1279 static int parse_vc_action_mplsogre_encap(struct context *,
1280 const struct token *, const char *,
1281 unsigned int, void *, unsigned int);
1282 static int parse_vc_action_mplsogre_decap(struct context *,
1283 const struct token *, const char *,
1284 unsigned int, void *, unsigned int);
1285 static int parse_vc_action_mplsoudp_encap(struct context *,
1286 const struct token *, const char *,
1287 unsigned int, void *, unsigned int);
1288 static int parse_vc_action_mplsoudp_decap(struct context *,
1289 const struct token *, const char *,
1290 unsigned int, void *, unsigned int);
1291 static int parse_vc_action_raw_encap(struct context *,
1292 const struct token *, const char *,
1293 unsigned int, void *, unsigned int);
1294 static int parse_vc_action_raw_decap(struct context *,
1295 const struct token *, const char *,
1296 unsigned int, void *, unsigned int);
1297 static int parse_destroy(struct context *, const struct token *,
1298 const char *, unsigned int,
1299 void *, unsigned int);
1300 static int parse_flush(struct context *, const struct token *,
1301 const char *, unsigned int,
1302 void *, unsigned int);
1303 static int parse_query(struct context *, const struct token *,
1304 const char *, unsigned int,
1305 void *, unsigned int);
1306 static int parse_action(struct context *, const struct token *,
1307 const char *, unsigned int,
1308 void *, unsigned int);
1309 static int parse_list(struct context *, const struct token *,
1310 const char *, unsigned int,
1311 void *, unsigned int);
1312 static int parse_isolate(struct context *, const struct token *,
1313 const char *, unsigned int,
1314 void *, unsigned int);
1315 static int parse_int(struct context *, const struct token *,
1316 const char *, unsigned int,
1317 void *, unsigned int);
1318 static int parse_prefix(struct context *, const struct token *,
1319 const char *, unsigned int,
1320 void *, unsigned int);
1321 static int parse_boolean(struct context *, const struct token *,
1322 const char *, unsigned int,
1323 void *, unsigned int);
1324 static int parse_string(struct context *, const struct token *,
1325 const char *, unsigned int,
1326 void *, unsigned int);
1327 static int parse_hex(struct context *ctx, const struct token *token,
1328 const char *str, unsigned int len,
1329 void *buf, unsigned int size);
1330 static int parse_mac_addr(struct context *, const struct token *,
1331 const char *, unsigned int,
1332 void *, unsigned int);
1333 static int parse_ipv4_addr(struct context *, const struct token *,
1334 const char *, unsigned int,
1335 void *, unsigned int);
1336 static int parse_ipv6_addr(struct context *, const struct token *,
1337 const char *, unsigned int,
1338 void *, unsigned int);
1339 static int parse_port(struct context *, const struct token *,
1340 const char *, unsigned int,
1341 void *, unsigned int);
1342 static int comp_none(struct context *, const struct token *,
1343 unsigned int, char *, unsigned int);
1344 static int comp_boolean(struct context *, const struct token *,
1345 unsigned int, char *, unsigned int);
1346 static int comp_action(struct context *, const struct token *,
1347 unsigned int, char *, unsigned int);
1348 static int comp_port(struct context *, const struct token *,
1349 unsigned int, char *, unsigned int);
1350 static int comp_rule_id(struct context *, const struct token *,
1351 unsigned int, char *, unsigned int);
1352 static int comp_vc_action_rss_type(struct context *, const struct token *,
1353 unsigned int, char *, unsigned int);
1354 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1355 unsigned int, char *, unsigned int);
1357 /** Token definitions. */
1358 static const struct token token_list[] = {
1359 /* Special tokens. */
1362 .help = "null entry, abused as the entry point",
1363 .next = NEXT(NEXT_ENTRY(FLOW)),
1368 .help = "command may end here",
1371 .name = "START_SET",
1372 .help = "null entry, abused as the entry point for set",
1373 .next = NEXT(NEXT_ENTRY(SET)),
1378 .help = "set command may end here",
1380 /* Common tokens. */
1384 .help = "integer value",
1389 .name = "{unsigned}",
1391 .help = "unsigned integer value",
1398 .help = "prefix length for bit-mask",
1399 .call = parse_prefix,
1403 .name = "{boolean}",
1405 .help = "any boolean value",
1406 .call = parse_boolean,
1407 .comp = comp_boolean,
1412 .help = "fixed string",
1413 .call = parse_string,
1419 .help = "fixed string",
1424 .name = "{MAC address}",
1426 .help = "standard MAC address notation",
1427 .call = parse_mac_addr,
1431 .name = "{IPv4 address}",
1432 .type = "IPV4 ADDRESS",
1433 .help = "standard IPv4 address notation",
1434 .call = parse_ipv4_addr,
1438 .name = "{IPv6 address}",
1439 .type = "IPV6 ADDRESS",
1440 .help = "standard IPv6 address notation",
1441 .call = parse_ipv6_addr,
1445 .name = "{rule id}",
1447 .help = "rule identifier",
1449 .comp = comp_rule_id,
1452 .name = "{port_id}",
1454 .help = "port identifier",
1459 .name = "{group_id}",
1461 .help = "group identifier",
1465 [PRIORITY_LEVEL] = {
1468 .help = "priority level",
1472 /* Top-level command. */
1475 .type = "{command} {port_id} [{arg} [...]]",
1476 .help = "manage ingress/egress flow rules",
1477 .next = NEXT(NEXT_ENTRY
1487 /* Sub-level commands. */
1490 .help = "check whether a flow rule can be created",
1491 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1492 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1497 .help = "create a flow rule",
1498 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1499 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1504 .help = "destroy specific flow rules",
1505 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1506 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1507 .call = parse_destroy,
1511 .help = "destroy all flow rules",
1512 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1513 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1514 .call = parse_flush,
1518 .help = "query an existing flow rule",
1519 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1520 NEXT_ENTRY(RULE_ID),
1521 NEXT_ENTRY(PORT_ID)),
1522 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1523 ARGS_ENTRY(struct buffer, args.query.rule),
1524 ARGS_ENTRY(struct buffer, port)),
1525 .call = parse_query,
1529 .help = "list existing flow rules",
1530 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1531 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1536 .help = "restrict ingress traffic to the defined flow rules",
1537 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1538 NEXT_ENTRY(PORT_ID)),
1539 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1540 ARGS_ENTRY(struct buffer, port)),
1541 .call = parse_isolate,
1543 /* Destroy arguments. */
1546 .help = "specify a rule identifier",
1547 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1548 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1549 .call = parse_destroy,
1551 /* Query arguments. */
1555 .help = "action to query, must be part of the rule",
1556 .call = parse_action,
1557 .comp = comp_action,
1559 /* List arguments. */
1562 .help = "specify a group",
1563 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1564 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1567 /* Validate/create attributes. */
1570 .help = "specify a group",
1571 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1572 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1577 .help = "specify a priority level",
1578 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1579 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1584 .help = "affect rule to ingress",
1585 .next = NEXT(next_vc_attr),
1590 .help = "affect rule to egress",
1591 .next = NEXT(next_vc_attr),
1596 .help = "apply rule directly to endpoints found in pattern",
1597 .next = NEXT(next_vc_attr),
1600 /* Validate/create pattern. */
1603 .help = "submit a list of pattern items",
1604 .next = NEXT(next_item),
1609 .help = "match value perfectly (with full bit-mask)",
1610 .call = parse_vc_spec,
1612 [ITEM_PARAM_SPEC] = {
1614 .help = "match value according to configured bit-mask",
1615 .call = parse_vc_spec,
1617 [ITEM_PARAM_LAST] = {
1619 .help = "specify upper bound to establish a range",
1620 .call = parse_vc_spec,
1622 [ITEM_PARAM_MASK] = {
1624 .help = "specify bit-mask with relevant bits set to one",
1625 .call = parse_vc_spec,
1627 [ITEM_PARAM_PREFIX] = {
1629 .help = "generate bit-mask from a prefix length",
1630 .call = parse_vc_spec,
1634 .help = "specify next pattern item",
1635 .next = NEXT(next_item),
1639 .help = "end list of pattern items",
1640 .priv = PRIV_ITEM(END, 0),
1641 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1646 .help = "no-op pattern item",
1647 .priv = PRIV_ITEM(VOID, 0),
1648 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1653 .help = "perform actions when pattern does not match",
1654 .priv = PRIV_ITEM(INVERT, 0),
1655 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1660 .help = "match any protocol for the current layer",
1661 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1662 .next = NEXT(item_any),
1667 .help = "number of layers covered",
1668 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1669 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1673 .help = "match traffic from/to the physical function",
1674 .priv = PRIV_ITEM(PF, 0),
1675 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1680 .help = "match traffic from/to a virtual function ID",
1681 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1682 .next = NEXT(item_vf),
1688 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1689 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1693 .help = "match traffic from/to a specific physical port",
1694 .priv = PRIV_ITEM(PHY_PORT,
1695 sizeof(struct rte_flow_item_phy_port)),
1696 .next = NEXT(item_phy_port),
1699 [ITEM_PHY_PORT_INDEX] = {
1701 .help = "physical port index",
1702 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1703 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1707 .help = "match traffic from/to a given DPDK port ID",
1708 .priv = PRIV_ITEM(PORT_ID,
1709 sizeof(struct rte_flow_item_port_id)),
1710 .next = NEXT(item_port_id),
1713 [ITEM_PORT_ID_ID] = {
1715 .help = "DPDK port ID",
1716 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1717 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1721 .help = "match traffic against value set in previously matched rule",
1722 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1723 .next = NEXT(item_mark),
1728 .help = "Integer value to match against",
1729 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1730 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1734 .help = "match an arbitrary byte string",
1735 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1736 .next = NEXT(item_raw),
1739 [ITEM_RAW_RELATIVE] = {
1741 .help = "look for pattern after the previous item",
1742 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1743 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1746 [ITEM_RAW_SEARCH] = {
1748 .help = "search pattern from offset (see also limit)",
1749 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1750 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1753 [ITEM_RAW_OFFSET] = {
1755 .help = "absolute or relative offset for pattern",
1756 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1757 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1759 [ITEM_RAW_LIMIT] = {
1761 .help = "search area limit for start of pattern",
1762 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1763 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1765 [ITEM_RAW_PATTERN] = {
1767 .help = "byte string to look for",
1768 .next = NEXT(item_raw,
1770 NEXT_ENTRY(ITEM_PARAM_IS,
1773 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1774 ARGS_ENTRY(struct rte_flow_item_raw, length),
1775 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1776 ITEM_RAW_PATTERN_SIZE)),
1780 .help = "match Ethernet header",
1781 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1782 .next = NEXT(item_eth),
1787 .help = "destination MAC",
1788 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1789 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1793 .help = "source MAC",
1794 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1795 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1799 .help = "EtherType",
1800 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1801 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1805 .help = "match 802.1Q/ad VLAN tag",
1806 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1807 .next = NEXT(item_vlan),
1812 .help = "tag control information",
1813 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1814 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1818 .help = "priority code point",
1819 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1820 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1825 .help = "drop eligible indicator",
1826 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1827 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1832 .help = "VLAN identifier",
1833 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1834 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1837 [ITEM_VLAN_INNER_TYPE] = {
1838 .name = "inner_type",
1839 .help = "inner EtherType",
1840 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1841 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1846 .help = "match IPv4 header",
1847 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1848 .next = NEXT(item_ipv4),
1853 .help = "type of service",
1854 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1855 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1856 hdr.type_of_service)),
1860 .help = "time to live",
1861 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1862 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1865 [ITEM_IPV4_PROTO] = {
1867 .help = "next protocol ID",
1868 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1869 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1870 hdr.next_proto_id)),
1874 .help = "source address",
1875 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1876 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1881 .help = "destination address",
1882 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1883 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1888 .help = "match IPv6 header",
1889 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1890 .next = NEXT(item_ipv6),
1895 .help = "traffic class",
1896 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1897 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1899 "\x0f\xf0\x00\x00")),
1901 [ITEM_IPV6_FLOW] = {
1903 .help = "flow label",
1904 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1905 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1907 "\x00\x0f\xff\xff")),
1909 [ITEM_IPV6_PROTO] = {
1911 .help = "protocol (next header)",
1912 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1913 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1918 .help = "hop limit",
1919 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1920 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1925 .help = "source address",
1926 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1927 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1932 .help = "destination address",
1933 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1934 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1939 .help = "match ICMP header",
1940 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1941 .next = NEXT(item_icmp),
1944 [ITEM_ICMP_TYPE] = {
1946 .help = "ICMP packet type",
1947 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1948 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1951 [ITEM_ICMP_CODE] = {
1953 .help = "ICMP packet code",
1954 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1955 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1960 .help = "match UDP header",
1961 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1962 .next = NEXT(item_udp),
1967 .help = "UDP source port",
1968 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1969 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1974 .help = "UDP destination port",
1975 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1976 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1981 .help = "match TCP header",
1982 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1983 .next = NEXT(item_tcp),
1988 .help = "TCP source port",
1989 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1990 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1995 .help = "TCP destination port",
1996 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1997 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2000 [ITEM_TCP_FLAGS] = {
2002 .help = "TCP flags",
2003 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
2004 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
2009 .help = "match SCTP header",
2010 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
2011 .next = NEXT(item_sctp),
2016 .help = "SCTP source port",
2017 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2018 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2023 .help = "SCTP destination port",
2024 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2025 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2030 .help = "validation tag",
2031 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2032 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2035 [ITEM_SCTP_CKSUM] = {
2038 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
2039 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
2044 .help = "match VXLAN header",
2045 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2046 .next = NEXT(item_vxlan),
2049 [ITEM_VXLAN_VNI] = {
2051 .help = "VXLAN identifier",
2052 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2053 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2057 .help = "match E-Tag header",
2058 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2059 .next = NEXT(item_e_tag),
2062 [ITEM_E_TAG_GRP_ECID_B] = {
2063 .name = "grp_ecid_b",
2064 .help = "GRP and E-CID base",
2065 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2066 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2072 .help = "match NVGRE header",
2073 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2074 .next = NEXT(item_nvgre),
2077 [ITEM_NVGRE_TNI] = {
2079 .help = "virtual subnet ID",
2080 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2081 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2085 .help = "match MPLS header",
2086 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2087 .next = NEXT(item_mpls),
2090 [ITEM_MPLS_LABEL] = {
2092 .help = "MPLS label",
2093 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2094 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2100 .help = "MPLS Traffic Class",
2101 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2102 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2108 .help = "MPLS Bottom-of-Stack",
2109 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2110 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2116 .help = "match GRE header",
2117 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2118 .next = NEXT(item_gre),
2121 [ITEM_GRE_PROTO] = {
2123 .help = "GRE protocol type",
2124 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2125 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2128 [ITEM_GRE_C_RSVD0_VER] = {
2129 .name = "c_rsvd0_ver",
2131 "checksum (1b), undefined (1b), key bit (1b),"
2132 " sequence number (1b), reserved 0 (9b),"
2134 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2135 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2138 [ITEM_GRE_C_BIT] = {
2140 .help = "checksum bit (C)",
2141 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2142 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2144 "\x80\x00\x00\x00")),
2146 [ITEM_GRE_S_BIT] = {
2148 .help = "sequence number bit (S)",
2149 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2150 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2152 "\x10\x00\x00\x00")),
2154 [ITEM_GRE_K_BIT] = {
2156 .help = "key bit (K)",
2157 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2158 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2160 "\x20\x00\x00\x00")),
2164 .help = "fuzzy pattern match, expect faster than default",
2165 .priv = PRIV_ITEM(FUZZY,
2166 sizeof(struct rte_flow_item_fuzzy)),
2167 .next = NEXT(item_fuzzy),
2170 [ITEM_FUZZY_THRESH] = {
2172 .help = "match accuracy threshold",
2173 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2174 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2179 .help = "match GTP header",
2180 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2181 .next = NEXT(item_gtp),
2186 .help = "tunnel endpoint identifier",
2187 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2188 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2192 .help = "match GTP header",
2193 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2194 .next = NEXT(item_gtp),
2199 .help = "match GTP header",
2200 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2201 .next = NEXT(item_gtp),
2206 .help = "match GENEVE header",
2207 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2208 .next = NEXT(item_geneve),
2211 [ITEM_GENEVE_VNI] = {
2213 .help = "virtual network identifier",
2214 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2215 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2217 [ITEM_GENEVE_PROTO] = {
2219 .help = "GENEVE protocol type",
2220 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2221 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2224 [ITEM_VXLAN_GPE] = {
2225 .name = "vxlan-gpe",
2226 .help = "match VXLAN-GPE header",
2227 .priv = PRIV_ITEM(VXLAN_GPE,
2228 sizeof(struct rte_flow_item_vxlan_gpe)),
2229 .next = NEXT(item_vxlan_gpe),
2232 [ITEM_VXLAN_GPE_VNI] = {
2234 .help = "VXLAN-GPE identifier",
2235 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2236 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2239 [ITEM_ARP_ETH_IPV4] = {
2240 .name = "arp_eth_ipv4",
2241 .help = "match ARP header for Ethernet/IPv4",
2242 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2243 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2244 .next = NEXT(item_arp_eth_ipv4),
2247 [ITEM_ARP_ETH_IPV4_SHA] = {
2249 .help = "sender hardware address",
2250 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2252 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2255 [ITEM_ARP_ETH_IPV4_SPA] = {
2257 .help = "sender IPv4 address",
2258 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2260 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2263 [ITEM_ARP_ETH_IPV4_THA] = {
2265 .help = "target hardware address",
2266 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2268 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2271 [ITEM_ARP_ETH_IPV4_TPA] = {
2273 .help = "target IPv4 address",
2274 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2276 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2281 .help = "match presence of any IPv6 extension header",
2282 .priv = PRIV_ITEM(IPV6_EXT,
2283 sizeof(struct rte_flow_item_ipv6_ext)),
2284 .next = NEXT(item_ipv6_ext),
2287 [ITEM_IPV6_EXT_NEXT_HDR] = {
2289 .help = "next header",
2290 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2291 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2296 .help = "match any ICMPv6 header",
2297 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2298 .next = NEXT(item_icmp6),
2301 [ITEM_ICMP6_TYPE] = {
2303 .help = "ICMPv6 type",
2304 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2305 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2308 [ITEM_ICMP6_CODE] = {
2310 .help = "ICMPv6 code",
2311 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2312 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2315 [ITEM_ICMP6_ND_NS] = {
2316 .name = "icmp6_nd_ns",
2317 .help = "match ICMPv6 neighbor discovery solicitation",
2318 .priv = PRIV_ITEM(ICMP6_ND_NS,
2319 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2320 .next = NEXT(item_icmp6_nd_ns),
2323 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2324 .name = "target_addr",
2325 .help = "target address",
2326 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2328 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2331 [ITEM_ICMP6_ND_NA] = {
2332 .name = "icmp6_nd_na",
2333 .help = "match ICMPv6 neighbor discovery advertisement",
2334 .priv = PRIV_ITEM(ICMP6_ND_NA,
2335 sizeof(struct rte_flow_item_icmp6_nd_na)),
2336 .next = NEXT(item_icmp6_nd_na),
2339 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2340 .name = "target_addr",
2341 .help = "target address",
2342 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2344 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2347 [ITEM_ICMP6_ND_OPT] = {
2348 .name = "icmp6_nd_opt",
2349 .help = "match presence of any ICMPv6 neighbor discovery"
2351 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2352 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2353 .next = NEXT(item_icmp6_nd_opt),
2356 [ITEM_ICMP6_ND_OPT_TYPE] = {
2358 .help = "ND option type",
2359 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2361 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2364 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2365 .name = "icmp6_nd_opt_sla_eth",
2366 .help = "match ICMPv6 neighbor discovery source Ethernet"
2367 " link-layer address option",
2369 (ICMP6_ND_OPT_SLA_ETH,
2370 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2371 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2374 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2376 .help = "source Ethernet LLA",
2377 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2379 .args = ARGS(ARGS_ENTRY_HTON
2380 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2382 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2383 .name = "icmp6_nd_opt_tla_eth",
2384 .help = "match ICMPv6 neighbor discovery target Ethernet"
2385 " link-layer address option",
2387 (ICMP6_ND_OPT_TLA_ETH,
2388 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2389 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2392 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2394 .help = "target Ethernet LLA",
2395 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2397 .args = ARGS(ARGS_ENTRY_HTON
2398 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2402 .help = "match metadata header",
2403 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2404 .next = NEXT(item_meta),
2407 [ITEM_META_DATA] = {
2409 .help = "metadata value",
2410 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2411 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2412 data, "\xff\xff\xff\xff")),
2416 .help = "match GRE key",
2417 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2418 .next = NEXT(item_gre_key),
2421 [ITEM_GRE_KEY_VALUE] = {
2423 .help = "key value",
2424 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2425 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2429 .help = "match GTP extension header with type 0x85",
2430 .priv = PRIV_ITEM(GTP_PSC,
2431 sizeof(struct rte_flow_item_gtp_psc)),
2432 .next = NEXT(item_gtp_psc),
2435 [ITEM_GTP_PSC_QFI] = {
2437 .help = "QoS flow identifier",
2438 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2439 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2442 [ITEM_GTP_PSC_PDU_T] = {
2445 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2446 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2451 .help = "match PPPoE session header",
2452 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2453 .next = NEXT(item_pppoes),
2458 .help = "match PPPoE discovery header",
2459 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2460 .next = NEXT(item_pppoed),
2463 [ITEM_PPPOE_SEID] = {
2465 .help = "session identifier",
2466 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2467 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2470 [ITEM_PPPOE_PROTO_ID] = {
2472 .help = "match PPPoE session protocol identifier",
2473 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2474 sizeof(struct rte_flow_item_pppoe_proto_id)),
2475 .next = NEXT(item_pppoe_proto_id),
2480 .help = "matches higig2 header",
2481 .priv = PRIV_ITEM(HIGIG2,
2482 sizeof(struct rte_flow_item_higig2_hdr)),
2483 .next = NEXT(item_higig2),
2486 [ITEM_HIGIG2_CLASSIFICATION] = {
2487 .name = "classification",
2488 .help = "matches classification of higig2 header",
2489 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2490 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2491 hdr.ppt1.classification)),
2493 [ITEM_HIGIG2_VID] = {
2495 .help = "matches vid of higig2 header",
2496 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2497 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2500 /* Validate/create actions. */
2503 .help = "submit a list of associated actions",
2504 .next = NEXT(next_action),
2509 .help = "specify next action",
2510 .next = NEXT(next_action),
2514 .help = "end list of actions",
2515 .priv = PRIV_ACTION(END, 0),
2520 .help = "no-op action",
2521 .priv = PRIV_ACTION(VOID, 0),
2522 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2525 [ACTION_PASSTHRU] = {
2527 .help = "let subsequent rule process matched packets",
2528 .priv = PRIV_ACTION(PASSTHRU, 0),
2529 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2534 .help = "redirect traffic to a given group",
2535 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2536 .next = NEXT(action_jump),
2539 [ACTION_JUMP_GROUP] = {
2541 .help = "group to redirect traffic to",
2542 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2543 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2544 .call = parse_vc_conf,
2548 .help = "attach 32 bit value to packets",
2549 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2550 .next = NEXT(action_mark),
2553 [ACTION_MARK_ID] = {
2555 .help = "32 bit value to return with packets",
2556 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2557 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2558 .call = parse_vc_conf,
2562 .help = "flag packets",
2563 .priv = PRIV_ACTION(FLAG, 0),
2564 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2569 .help = "assign packets to a given queue index",
2570 .priv = PRIV_ACTION(QUEUE,
2571 sizeof(struct rte_flow_action_queue)),
2572 .next = NEXT(action_queue),
2575 [ACTION_QUEUE_INDEX] = {
2577 .help = "queue index to use",
2578 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2579 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2580 .call = parse_vc_conf,
2584 .help = "drop packets (note: passthru has priority)",
2585 .priv = PRIV_ACTION(DROP, 0),
2586 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2591 .help = "enable counters for this rule",
2592 .priv = PRIV_ACTION(COUNT,
2593 sizeof(struct rte_flow_action_count)),
2594 .next = NEXT(action_count),
2597 [ACTION_COUNT_ID] = {
2598 .name = "identifier",
2599 .help = "counter identifier to use",
2600 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2601 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2602 .call = parse_vc_conf,
2604 [ACTION_COUNT_SHARED] = {
2606 .help = "shared counter",
2607 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2608 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2610 .call = parse_vc_conf,
2614 .help = "spread packets among several queues",
2615 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2616 .next = NEXT(action_rss),
2617 .call = parse_vc_action_rss,
2619 [ACTION_RSS_FUNC] = {
2621 .help = "RSS hash function to apply",
2622 .next = NEXT(action_rss,
2623 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2624 ACTION_RSS_FUNC_TOEPLITZ,
2625 ACTION_RSS_FUNC_SIMPLE_XOR,
2626 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2628 [ACTION_RSS_FUNC_DEFAULT] = {
2630 .help = "default hash function",
2631 .call = parse_vc_action_rss_func,
2633 [ACTION_RSS_FUNC_TOEPLITZ] = {
2635 .help = "Toeplitz hash function",
2636 .call = parse_vc_action_rss_func,
2638 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2639 .name = "simple_xor",
2640 .help = "simple XOR hash function",
2641 .call = parse_vc_action_rss_func,
2643 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2644 .name = "symmetric_toeplitz",
2645 .help = "Symmetric Toeplitz hash function",
2646 .call = parse_vc_action_rss_func,
2648 [ACTION_RSS_LEVEL] = {
2650 .help = "encapsulation level for \"types\"",
2651 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2652 .args = ARGS(ARGS_ENTRY_ARB
2653 (offsetof(struct action_rss_data, conf) +
2654 offsetof(struct rte_flow_action_rss, level),
2655 sizeof(((struct rte_flow_action_rss *)0)->
2658 [ACTION_RSS_TYPES] = {
2660 .help = "specific RSS hash types",
2661 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2663 [ACTION_RSS_TYPE] = {
2665 .help = "RSS hash type",
2666 .call = parse_vc_action_rss_type,
2667 .comp = comp_vc_action_rss_type,
2669 [ACTION_RSS_KEY] = {
2671 .help = "RSS hash key",
2672 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2673 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2675 (offsetof(struct action_rss_data, conf) +
2676 offsetof(struct rte_flow_action_rss, key_len),
2677 sizeof(((struct rte_flow_action_rss *)0)->
2679 ARGS_ENTRY(struct action_rss_data, key)),
2681 [ACTION_RSS_KEY_LEN] = {
2683 .help = "RSS hash key length in bytes",
2684 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2685 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2686 (offsetof(struct action_rss_data, conf) +
2687 offsetof(struct rte_flow_action_rss, key_len),
2688 sizeof(((struct rte_flow_action_rss *)0)->
2691 RSS_HASH_KEY_LENGTH)),
2693 [ACTION_RSS_QUEUES] = {
2695 .help = "queue indices to use",
2696 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2697 .call = parse_vc_conf,
2699 [ACTION_RSS_QUEUE] = {
2701 .help = "queue index",
2702 .call = parse_vc_action_rss_queue,
2703 .comp = comp_vc_action_rss_queue,
2707 .help = "direct traffic to physical function",
2708 .priv = PRIV_ACTION(PF, 0),
2709 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2714 .help = "direct traffic to a virtual function ID",
2715 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2716 .next = NEXT(action_vf),
2719 [ACTION_VF_ORIGINAL] = {
2721 .help = "use original VF ID if possible",
2722 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2723 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2725 .call = parse_vc_conf,
2730 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2731 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2732 .call = parse_vc_conf,
2734 [ACTION_PHY_PORT] = {
2736 .help = "direct packets to physical port index",
2737 .priv = PRIV_ACTION(PHY_PORT,
2738 sizeof(struct rte_flow_action_phy_port)),
2739 .next = NEXT(action_phy_port),
2742 [ACTION_PHY_PORT_ORIGINAL] = {
2744 .help = "use original port index if possible",
2745 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2746 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2748 .call = parse_vc_conf,
2750 [ACTION_PHY_PORT_INDEX] = {
2752 .help = "physical port index",
2753 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2754 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2756 .call = parse_vc_conf,
2758 [ACTION_PORT_ID] = {
2760 .help = "direct matching traffic to a given DPDK port ID",
2761 .priv = PRIV_ACTION(PORT_ID,
2762 sizeof(struct rte_flow_action_port_id)),
2763 .next = NEXT(action_port_id),
2766 [ACTION_PORT_ID_ORIGINAL] = {
2768 .help = "use original DPDK port ID if possible",
2769 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2770 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2772 .call = parse_vc_conf,
2774 [ACTION_PORT_ID_ID] = {
2776 .help = "DPDK port ID",
2777 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2778 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2779 .call = parse_vc_conf,
2783 .help = "meter the directed packets at given id",
2784 .priv = PRIV_ACTION(METER,
2785 sizeof(struct rte_flow_action_meter)),
2786 .next = NEXT(action_meter),
2789 [ACTION_METER_ID] = {
2791 .help = "meter id to use",
2792 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2793 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2794 .call = parse_vc_conf,
2796 [ACTION_OF_SET_MPLS_TTL] = {
2797 .name = "of_set_mpls_ttl",
2798 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2801 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2802 .next = NEXT(action_of_set_mpls_ttl),
2805 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2808 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2809 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2811 .call = parse_vc_conf,
2813 [ACTION_OF_DEC_MPLS_TTL] = {
2814 .name = "of_dec_mpls_ttl",
2815 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2816 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2817 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2820 [ACTION_OF_SET_NW_TTL] = {
2821 .name = "of_set_nw_ttl",
2822 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2825 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2826 .next = NEXT(action_of_set_nw_ttl),
2829 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2832 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2833 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2835 .call = parse_vc_conf,
2837 [ACTION_OF_DEC_NW_TTL] = {
2838 .name = "of_dec_nw_ttl",
2839 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2840 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2841 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2844 [ACTION_OF_COPY_TTL_OUT] = {
2845 .name = "of_copy_ttl_out",
2846 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2847 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2848 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2851 [ACTION_OF_COPY_TTL_IN] = {
2852 .name = "of_copy_ttl_in",
2853 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2854 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2855 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2858 [ACTION_OF_POP_VLAN] = {
2859 .name = "of_pop_vlan",
2860 .help = "OpenFlow's OFPAT_POP_VLAN",
2861 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2862 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2865 [ACTION_OF_PUSH_VLAN] = {
2866 .name = "of_push_vlan",
2867 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2870 sizeof(struct rte_flow_action_of_push_vlan)),
2871 .next = NEXT(action_of_push_vlan),
2874 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2875 .name = "ethertype",
2876 .help = "EtherType",
2877 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2878 .args = ARGS(ARGS_ENTRY_HTON
2879 (struct rte_flow_action_of_push_vlan,
2881 .call = parse_vc_conf,
2883 [ACTION_OF_SET_VLAN_VID] = {
2884 .name = "of_set_vlan_vid",
2885 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2888 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2889 .next = NEXT(action_of_set_vlan_vid),
2892 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2895 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2896 .args = ARGS(ARGS_ENTRY_HTON
2897 (struct rte_flow_action_of_set_vlan_vid,
2899 .call = parse_vc_conf,
2901 [ACTION_OF_SET_VLAN_PCP] = {
2902 .name = "of_set_vlan_pcp",
2903 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2906 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2907 .next = NEXT(action_of_set_vlan_pcp),
2910 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2912 .help = "VLAN priority",
2913 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2914 .args = ARGS(ARGS_ENTRY_HTON
2915 (struct rte_flow_action_of_set_vlan_pcp,
2917 .call = parse_vc_conf,
2919 [ACTION_OF_POP_MPLS] = {
2920 .name = "of_pop_mpls",
2921 .help = "OpenFlow's OFPAT_POP_MPLS",
2922 .priv = PRIV_ACTION(OF_POP_MPLS,
2923 sizeof(struct rte_flow_action_of_pop_mpls)),
2924 .next = NEXT(action_of_pop_mpls),
2927 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2928 .name = "ethertype",
2929 .help = "EtherType",
2930 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2931 .args = ARGS(ARGS_ENTRY_HTON
2932 (struct rte_flow_action_of_pop_mpls,
2934 .call = parse_vc_conf,
2936 [ACTION_OF_PUSH_MPLS] = {
2937 .name = "of_push_mpls",
2938 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2941 sizeof(struct rte_flow_action_of_push_mpls)),
2942 .next = NEXT(action_of_push_mpls),
2945 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2946 .name = "ethertype",
2947 .help = "EtherType",
2948 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2949 .args = ARGS(ARGS_ENTRY_HTON
2950 (struct rte_flow_action_of_push_mpls,
2952 .call = parse_vc_conf,
2954 [ACTION_VXLAN_ENCAP] = {
2955 .name = "vxlan_encap",
2956 .help = "VXLAN encapsulation, uses configuration set by \"set"
2958 .priv = PRIV_ACTION(VXLAN_ENCAP,
2959 sizeof(struct action_vxlan_encap_data)),
2960 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2961 .call = parse_vc_action_vxlan_encap,
2963 [ACTION_VXLAN_DECAP] = {
2964 .name = "vxlan_decap",
2965 .help = "Performs a decapsulation action by stripping all"
2966 " headers of the VXLAN tunnel network overlay from the"
2968 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2969 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2972 [ACTION_NVGRE_ENCAP] = {
2973 .name = "nvgre_encap",
2974 .help = "NVGRE encapsulation, uses configuration set by \"set"
2976 .priv = PRIV_ACTION(NVGRE_ENCAP,
2977 sizeof(struct action_nvgre_encap_data)),
2978 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2979 .call = parse_vc_action_nvgre_encap,
2981 [ACTION_NVGRE_DECAP] = {
2982 .name = "nvgre_decap",
2983 .help = "Performs a decapsulation action by stripping all"
2984 " headers of the NVGRE tunnel network overlay from the"
2986 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2987 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2990 [ACTION_L2_ENCAP] = {
2992 .help = "l2 encap, uses configuration set by"
2993 " \"set l2_encap\"",
2994 .priv = PRIV_ACTION(RAW_ENCAP,
2995 sizeof(struct action_raw_encap_data)),
2996 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2997 .call = parse_vc_action_l2_encap,
2999 [ACTION_L2_DECAP] = {
3001 .help = "l2 decap, uses configuration set by"
3002 " \"set l2_decap\"",
3003 .priv = PRIV_ACTION(RAW_DECAP,
3004 sizeof(struct action_raw_decap_data)),
3005 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3006 .call = parse_vc_action_l2_decap,
3008 [ACTION_MPLSOGRE_ENCAP] = {
3009 .name = "mplsogre_encap",
3010 .help = "mplsogre encapsulation, uses configuration set by"
3011 " \"set mplsogre_encap\"",
3012 .priv = PRIV_ACTION(RAW_ENCAP,
3013 sizeof(struct action_raw_encap_data)),
3014 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3015 .call = parse_vc_action_mplsogre_encap,
3017 [ACTION_MPLSOGRE_DECAP] = {
3018 .name = "mplsogre_decap",
3019 .help = "mplsogre decapsulation, uses configuration set by"
3020 " \"set mplsogre_decap\"",
3021 .priv = PRIV_ACTION(RAW_DECAP,
3022 sizeof(struct action_raw_decap_data)),
3023 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3024 .call = parse_vc_action_mplsogre_decap,
3026 [ACTION_MPLSOUDP_ENCAP] = {
3027 .name = "mplsoudp_encap",
3028 .help = "mplsoudp encapsulation, uses configuration set by"
3029 " \"set mplsoudp_encap\"",
3030 .priv = PRIV_ACTION(RAW_ENCAP,
3031 sizeof(struct action_raw_encap_data)),
3032 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3033 .call = parse_vc_action_mplsoudp_encap,
3035 [ACTION_MPLSOUDP_DECAP] = {
3036 .name = "mplsoudp_decap",
3037 .help = "mplsoudp decapsulation, uses configuration set by"
3038 " \"set mplsoudp_decap\"",
3039 .priv = PRIV_ACTION(RAW_DECAP,
3040 sizeof(struct action_raw_decap_data)),
3041 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3042 .call = parse_vc_action_mplsoudp_decap,
3044 [ACTION_SET_IPV4_SRC] = {
3045 .name = "set_ipv4_src",
3046 .help = "Set a new IPv4 source address in the outermost"
3048 .priv = PRIV_ACTION(SET_IPV4_SRC,
3049 sizeof(struct rte_flow_action_set_ipv4)),
3050 .next = NEXT(action_set_ipv4_src),
3053 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3054 .name = "ipv4_addr",
3055 .help = "new IPv4 source address to set",
3056 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3057 .args = ARGS(ARGS_ENTRY_HTON
3058 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3059 .call = parse_vc_conf,
3061 [ACTION_SET_IPV4_DST] = {
3062 .name = "set_ipv4_dst",
3063 .help = "Set a new IPv4 destination address in the outermost"
3065 .priv = PRIV_ACTION(SET_IPV4_DST,
3066 sizeof(struct rte_flow_action_set_ipv4)),
3067 .next = NEXT(action_set_ipv4_dst),
3070 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3071 .name = "ipv4_addr",
3072 .help = "new IPv4 destination address to set",
3073 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3074 .args = ARGS(ARGS_ENTRY_HTON
3075 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3076 .call = parse_vc_conf,
3078 [ACTION_SET_IPV6_SRC] = {
3079 .name = "set_ipv6_src",
3080 .help = "Set a new IPv6 source address in the outermost"
3082 .priv = PRIV_ACTION(SET_IPV6_SRC,
3083 sizeof(struct rte_flow_action_set_ipv6)),
3084 .next = NEXT(action_set_ipv6_src),
3087 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3088 .name = "ipv6_addr",
3089 .help = "new IPv6 source address to set",
3090 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3091 .args = ARGS(ARGS_ENTRY_HTON
3092 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3093 .call = parse_vc_conf,
3095 [ACTION_SET_IPV6_DST] = {
3096 .name = "set_ipv6_dst",
3097 .help = "Set a new IPv6 destination address in the outermost"
3099 .priv = PRIV_ACTION(SET_IPV6_DST,
3100 sizeof(struct rte_flow_action_set_ipv6)),
3101 .next = NEXT(action_set_ipv6_dst),
3104 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3105 .name = "ipv6_addr",
3106 .help = "new IPv6 destination address to set",
3107 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3108 .args = ARGS(ARGS_ENTRY_HTON
3109 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3110 .call = parse_vc_conf,
3112 [ACTION_SET_TP_SRC] = {
3113 .name = "set_tp_src",
3114 .help = "set a new source port number in the outermost"
3116 .priv = PRIV_ACTION(SET_TP_SRC,
3117 sizeof(struct rte_flow_action_set_tp)),
3118 .next = NEXT(action_set_tp_src),
3121 [ACTION_SET_TP_SRC_TP_SRC] = {
3123 .help = "new source port number to set",
3124 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3125 .args = ARGS(ARGS_ENTRY_HTON
3126 (struct rte_flow_action_set_tp, port)),
3127 .call = parse_vc_conf,
3129 [ACTION_SET_TP_DST] = {
3130 .name = "set_tp_dst",
3131 .help = "set a new destination port number in the outermost"
3133 .priv = PRIV_ACTION(SET_TP_DST,
3134 sizeof(struct rte_flow_action_set_tp)),
3135 .next = NEXT(action_set_tp_dst),
3138 [ACTION_SET_TP_DST_TP_DST] = {
3140 .help = "new destination port number to set",
3141 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3142 .args = ARGS(ARGS_ENTRY_HTON
3143 (struct rte_flow_action_set_tp, port)),
3144 .call = parse_vc_conf,
3146 [ACTION_MAC_SWAP] = {
3148 .help = "Swap the source and destination MAC addresses"
3149 " in the outermost Ethernet header",
3150 .priv = PRIV_ACTION(MAC_SWAP, 0),
3151 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3154 [ACTION_DEC_TTL] = {
3156 .help = "decrease network TTL if available",
3157 .priv = PRIV_ACTION(DEC_TTL, 0),
3158 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3161 [ACTION_SET_TTL] = {
3163 .help = "set ttl value",
3164 .priv = PRIV_ACTION(SET_TTL,
3165 sizeof(struct rte_flow_action_set_ttl)),
3166 .next = NEXT(action_set_ttl),
3169 [ACTION_SET_TTL_TTL] = {
3170 .name = "ttl_value",
3171 .help = "new ttl value to set",
3172 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3173 .args = ARGS(ARGS_ENTRY_HTON
3174 (struct rte_flow_action_set_ttl, ttl_value)),
3175 .call = parse_vc_conf,
3177 [ACTION_SET_MAC_SRC] = {
3178 .name = "set_mac_src",
3179 .help = "set source mac address",
3180 .priv = PRIV_ACTION(SET_MAC_SRC,
3181 sizeof(struct rte_flow_action_set_mac)),
3182 .next = NEXT(action_set_mac_src),
3185 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3187 .help = "new source mac address",
3188 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3189 .args = ARGS(ARGS_ENTRY_HTON
3190 (struct rte_flow_action_set_mac, mac_addr)),
3191 .call = parse_vc_conf,
3193 [ACTION_SET_MAC_DST] = {
3194 .name = "set_mac_dst",
3195 .help = "set destination mac address",
3196 .priv = PRIV_ACTION(SET_MAC_DST,
3197 sizeof(struct rte_flow_action_set_mac)),
3198 .next = NEXT(action_set_mac_dst),
3201 [ACTION_SET_MAC_DST_MAC_DST] = {
3203 .help = "new destination mac address to set",
3204 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3205 .args = ARGS(ARGS_ENTRY_HTON
3206 (struct rte_flow_action_set_mac, mac_addr)),
3207 .call = parse_vc_conf,
3209 [ACTION_INC_TCP_SEQ] = {
3210 .name = "inc_tcp_seq",
3211 .help = "increase TCP sequence number",
3212 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3213 .next = NEXT(action_inc_tcp_seq),
3216 [ACTION_INC_TCP_SEQ_VALUE] = {
3218 .help = "the value to increase TCP sequence number by",
3219 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3220 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3221 .call = parse_vc_conf,
3223 [ACTION_DEC_TCP_SEQ] = {
3224 .name = "dec_tcp_seq",
3225 .help = "decrease TCP sequence number",
3226 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3227 .next = NEXT(action_dec_tcp_seq),
3230 [ACTION_DEC_TCP_SEQ_VALUE] = {
3232 .help = "the value to decrease TCP sequence number by",
3233 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3234 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3235 .call = parse_vc_conf,
3237 [ACTION_INC_TCP_ACK] = {
3238 .name = "inc_tcp_ack",
3239 .help = "increase TCP acknowledgment number",
3240 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3241 .next = NEXT(action_inc_tcp_ack),
3244 [ACTION_INC_TCP_ACK_VALUE] = {
3246 .help = "the value to increase TCP acknowledgment number by",
3247 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3248 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3249 .call = parse_vc_conf,
3251 [ACTION_DEC_TCP_ACK] = {
3252 .name = "dec_tcp_ack",
3253 .help = "decrease TCP acknowledgment number",
3254 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3255 .next = NEXT(action_dec_tcp_ack),
3258 [ACTION_DEC_TCP_ACK_VALUE] = {
3260 .help = "the value to decrease TCP acknowledgment number by",
3261 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3262 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3263 .call = parse_vc_conf,
3265 [ACTION_RAW_ENCAP] = {
3266 .name = "raw_encap",
3267 .help = "encapsulation data, defined by set raw_encap",
3268 .priv = PRIV_ACTION(RAW_ENCAP,
3269 sizeof(struct rte_flow_action_raw_encap)),
3270 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3271 .call = parse_vc_action_raw_encap,
3273 [ACTION_RAW_DECAP] = {
3274 .name = "raw_decap",
3275 .help = "decapsulation data, defined by set raw_encap",
3276 .priv = PRIV_ACTION(RAW_DECAP,
3277 sizeof(struct rte_flow_action_raw_decap)),
3278 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3279 .call = parse_vc_action_raw_decap,
3281 /* Top level command. */
3284 .help = "set raw encap/decap data",
3285 .type = "set raw_encap|raw_decap <pattern>",
3286 .next = NEXT(NEXT_ENTRY
3289 .call = parse_set_init,
3291 /* Sub-level commands. */
3293 .name = "raw_encap",
3294 .help = "set raw encap data",
3295 .next = NEXT(next_item),
3296 .call = parse_set_raw_encap_decap,
3299 .name = "raw_decap",
3300 .help = "set raw decap data",
3301 .next = NEXT(next_item),
3302 .call = parse_set_raw_encap_decap,
3306 /** Remove and return last entry from argument stack. */
3307 static const struct arg *
3308 pop_args(struct context *ctx)
3310 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3313 /** Add entry on top of the argument stack. */
3315 push_args(struct context *ctx, const struct arg *arg)
3317 if (ctx->args_num == CTX_STACK_SIZE)
3319 ctx->args[ctx->args_num++] = arg;
3323 /** Spread value into buffer according to bit-mask. */
3325 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3327 uint32_t i = arg->size;
3335 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3344 unsigned int shift = 0;
3345 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3347 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3348 if (!(arg->mask[i] & (1 << shift)))
3353 *buf &= ~(1 << shift);
3354 *buf |= (val & 1) << shift;
3362 /** Compare a string with a partial one of a given length. */
3364 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3366 int r = strncmp(full, partial, partial_len);
3370 if (strlen(full) <= partial_len)
3372 return full[partial_len];
3376 * Parse a prefix length and generate a bit-mask.
3378 * Last argument (ctx->args) is retrieved to determine mask size, storage
3379 * location and whether the result must use network byte ordering.
3382 parse_prefix(struct context *ctx, const struct token *token,
3383 const char *str, unsigned int len,
3384 void *buf, unsigned int size)
3386 const struct arg *arg = pop_args(ctx);
3387 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3394 /* Argument is expected. */
3398 u = strtoumax(str, &end, 0);
3399 if (errno || (size_t)(end - str) != len)
3404 extra = arg_entry_bf_fill(NULL, 0, arg);
3413 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3414 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3421 if (bytes > size || bytes + !!extra > size)
3425 buf = (uint8_t *)ctx->object + arg->offset;
3426 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3428 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3429 memset(buf, 0x00, size - bytes);
3431 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3435 memset(buf, 0xff, bytes);
3436 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3438 ((uint8_t *)buf)[bytes] = conv[extra];
3441 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3444 push_args(ctx, arg);
3448 /** Default parsing function for token name matching. */
3450 parse_default(struct context *ctx, const struct token *token,
3451 const char *str, unsigned int len,
3452 void *buf, unsigned int size)
3457 if (strcmp_partial(token->name, str, len))
3462 /** Parse flow command, initialize output buffer for subsequent tokens. */
3464 parse_init(struct context *ctx, const struct token *token,
3465 const char *str, unsigned int len,
3466 void *buf, unsigned int size)
3468 struct buffer *out = buf;
3470 /* Token name must match. */
3471 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3473 /* Nothing else to do if there is no buffer. */
3476 /* Make sure buffer is large enough. */
3477 if (size < sizeof(*out))
3479 /* Initialize buffer. */
3480 memset(out, 0x00, sizeof(*out));
3481 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3484 ctx->objmask = NULL;
3488 /** Parse tokens for validate/create commands. */
3490 parse_vc(struct context *ctx, const struct token *token,
3491 const char *str, unsigned int len,
3492 void *buf, unsigned int size)
3494 struct buffer *out = buf;
3498 /* Token name must match. */
3499 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3501 /* Nothing else to do if there is no buffer. */
3504 if (!out->command) {
3505 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3507 if (sizeof(*out) > size)
3509 out->command = ctx->curr;
3512 ctx->objmask = NULL;
3513 out->args.vc.data = (uint8_t *)out + size;
3517 ctx->object = &out->args.vc.attr;
3518 ctx->objmask = NULL;
3519 switch (ctx->curr) {
3524 out->args.vc.attr.ingress = 1;
3527 out->args.vc.attr.egress = 1;
3530 out->args.vc.attr.transfer = 1;
3533 out->args.vc.pattern =
3534 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3536 ctx->object = out->args.vc.pattern;
3537 ctx->objmask = NULL;
3540 out->args.vc.actions =
3541 (void *)RTE_ALIGN_CEIL((uintptr_t)
3542 (out->args.vc.pattern +
3543 out->args.vc.pattern_n),
3545 ctx->object = out->args.vc.actions;
3546 ctx->objmask = NULL;
3553 if (!out->args.vc.actions) {
3554 const struct parse_item_priv *priv = token->priv;
3555 struct rte_flow_item *item =
3556 out->args.vc.pattern + out->args.vc.pattern_n;
3558 data_size = priv->size * 3; /* spec, last, mask */
3559 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3560 (out->args.vc.data - data_size),
3562 if ((uint8_t *)item + sizeof(*item) > data)
3564 *item = (struct rte_flow_item){
3567 ++out->args.vc.pattern_n;
3569 ctx->objmask = NULL;
3571 const struct parse_action_priv *priv = token->priv;
3572 struct rte_flow_action *action =
3573 out->args.vc.actions + out->args.vc.actions_n;
3575 data_size = priv->size; /* configuration */
3576 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3577 (out->args.vc.data - data_size),
3579 if ((uint8_t *)action + sizeof(*action) > data)
3581 *action = (struct rte_flow_action){
3583 .conf = data_size ? data : NULL,
3585 ++out->args.vc.actions_n;
3586 ctx->object = action;
3587 ctx->objmask = NULL;
3589 memset(data, 0, data_size);
3590 out->args.vc.data = data;
3591 ctx->objdata = data_size;
3595 /** Parse pattern item parameter type. */
3597 parse_vc_spec(struct context *ctx, const struct token *token,
3598 const char *str, unsigned int len,
3599 void *buf, unsigned int size)
3601 struct buffer *out = buf;
3602 struct rte_flow_item *item;
3608 /* Token name must match. */
3609 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3611 /* Parse parameter types. */
3612 switch (ctx->curr) {
3613 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3619 case ITEM_PARAM_SPEC:
3622 case ITEM_PARAM_LAST:
3625 case ITEM_PARAM_PREFIX:
3626 /* Modify next token to expect a prefix. */
3627 if (ctx->next_num < 2)
3629 ctx->next[ctx->next_num - 2] = prefix;
3631 case ITEM_PARAM_MASK:
3637 /* Nothing else to do if there is no buffer. */
3640 if (!out->args.vc.pattern_n)
3642 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3643 data_size = ctx->objdata / 3; /* spec, last, mask */
3644 /* Point to selected object. */
3645 ctx->object = out->args.vc.data + (data_size * index);
3647 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3648 item->mask = ctx->objmask;
3650 ctx->objmask = NULL;
3651 /* Update relevant item pointer. */
3652 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3657 /** Parse action configuration field. */
3659 parse_vc_conf(struct context *ctx, const struct token *token,
3660 const char *str, unsigned int len,
3661 void *buf, unsigned int size)
3663 struct buffer *out = buf;
3666 /* Token name must match. */
3667 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3669 /* Nothing else to do if there is no buffer. */
3672 /* Point to selected object. */
3673 ctx->object = out->args.vc.data;
3674 ctx->objmask = NULL;
3678 /** Parse RSS action. */
3680 parse_vc_action_rss(struct context *ctx, const struct token *token,
3681 const char *str, unsigned int len,
3682 void *buf, unsigned int size)
3684 struct buffer *out = buf;
3685 struct rte_flow_action *action;
3686 struct action_rss_data *action_rss_data;
3690 ret = parse_vc(ctx, token, str, len, buf, size);
3693 /* Nothing else to do if there is no buffer. */
3696 if (!out->args.vc.actions_n)
3698 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3699 /* Point to selected object. */
3700 ctx->object = out->args.vc.data;
3701 ctx->objmask = NULL;
3702 /* Set up default configuration. */
3703 action_rss_data = ctx->object;
3704 *action_rss_data = (struct action_rss_data){
3705 .conf = (struct rte_flow_action_rss){
3706 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3709 .key_len = sizeof(action_rss_data->key),
3710 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3711 .key = action_rss_data->key,
3712 .queue = action_rss_data->queue,
3714 .key = "testpmd's default RSS hash key, "
3715 "override it for better balancing",
3718 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3719 action_rss_data->queue[i] = i;
3720 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3721 ctx->port != (portid_t)RTE_PORT_ALL) {
3722 struct rte_eth_dev_info info;
3725 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3729 action_rss_data->conf.key_len =
3730 RTE_MIN(sizeof(action_rss_data->key),
3731 info.hash_key_size);
3733 action->conf = &action_rss_data->conf;
3738 * Parse func field for RSS action.
3740 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3741 * ACTION_RSS_FUNC_* index that called this function.
3744 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3745 const char *str, unsigned int len,
3746 void *buf, unsigned int size)
3748 struct action_rss_data *action_rss_data;
3749 enum rte_eth_hash_function func;
3753 /* Token name must match. */
3754 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3756 switch (ctx->curr) {
3757 case ACTION_RSS_FUNC_DEFAULT:
3758 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3760 case ACTION_RSS_FUNC_TOEPLITZ:
3761 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3763 case ACTION_RSS_FUNC_SIMPLE_XOR:
3764 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3766 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
3767 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
3774 action_rss_data = ctx->object;
3775 action_rss_data->conf.func = func;
3780 * Parse type field for RSS action.
3782 * Valid tokens are type field names and the "end" token.
3785 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3786 const char *str, unsigned int len,
3787 void *buf, unsigned int size)
3789 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3790 struct action_rss_data *action_rss_data;
3796 if (ctx->curr != ACTION_RSS_TYPE)
3798 if (!(ctx->objdata >> 16) && ctx->object) {
3799 action_rss_data = ctx->object;
3800 action_rss_data->conf.types = 0;
3802 if (!strcmp_partial("end", str, len)) {
3803 ctx->objdata &= 0xffff;
3806 for (i = 0; rss_type_table[i].str; ++i)
3807 if (!strcmp_partial(rss_type_table[i].str, str, len))
3809 if (!rss_type_table[i].str)
3811 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3813 if (ctx->next_num == RTE_DIM(ctx->next))
3815 ctx->next[ctx->next_num++] = next;
3818 action_rss_data = ctx->object;
3819 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3824 * Parse queue field for RSS action.
3826 * Valid tokens are queue indices and the "end" token.
3829 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3830 const char *str, unsigned int len,
3831 void *buf, unsigned int size)
3833 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3834 struct action_rss_data *action_rss_data;
3835 const struct arg *arg;
3842 if (ctx->curr != ACTION_RSS_QUEUE)
3844 i = ctx->objdata >> 16;
3845 if (!strcmp_partial("end", str, len)) {
3846 ctx->objdata &= 0xffff;
3849 if (i >= ACTION_RSS_QUEUE_NUM)
3851 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3852 i * sizeof(action_rss_data->queue[i]),
3853 sizeof(action_rss_data->queue[i]));
3854 if (push_args(ctx, arg))
3856 ret = parse_int(ctx, token, str, len, NULL, 0);
3862 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3864 if (ctx->next_num == RTE_DIM(ctx->next))
3866 ctx->next[ctx->next_num++] = next;
3870 action_rss_data = ctx->object;
3871 action_rss_data->conf.queue_num = i;
3872 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3876 /** Parse VXLAN encap action. */
3878 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3879 const char *str, unsigned int len,
3880 void *buf, unsigned int size)
3882 struct buffer *out = buf;
3883 struct rte_flow_action *action;
3884 struct action_vxlan_encap_data *action_vxlan_encap_data;
3887 ret = parse_vc(ctx, token, str, len, buf, size);
3890 /* Nothing else to do if there is no buffer. */
3893 if (!out->args.vc.actions_n)
3895 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3896 /* Point to selected object. */
3897 ctx->object = out->args.vc.data;
3898 ctx->objmask = NULL;
3899 /* Set up default configuration. */
3900 action_vxlan_encap_data = ctx->object;
3901 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3902 .conf = (struct rte_flow_action_vxlan_encap){
3903 .definition = action_vxlan_encap_data->items,
3907 .type = RTE_FLOW_ITEM_TYPE_ETH,
3908 .spec = &action_vxlan_encap_data->item_eth,
3909 .mask = &rte_flow_item_eth_mask,
3912 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3913 .spec = &action_vxlan_encap_data->item_vlan,
3914 .mask = &rte_flow_item_vlan_mask,
3917 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3918 .spec = &action_vxlan_encap_data->item_ipv4,
3919 .mask = &rte_flow_item_ipv4_mask,
3922 .type = RTE_FLOW_ITEM_TYPE_UDP,
3923 .spec = &action_vxlan_encap_data->item_udp,
3924 .mask = &rte_flow_item_udp_mask,
3927 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3928 .spec = &action_vxlan_encap_data->item_vxlan,
3929 .mask = &rte_flow_item_vxlan_mask,
3932 .type = RTE_FLOW_ITEM_TYPE_END,
3937 .tci = vxlan_encap_conf.vlan_tci,
3941 .src_addr = vxlan_encap_conf.ipv4_src,
3942 .dst_addr = vxlan_encap_conf.ipv4_dst,
3945 .src_port = vxlan_encap_conf.udp_src,
3946 .dst_port = vxlan_encap_conf.udp_dst,
3948 .item_vxlan.flags = 0,
3950 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3951 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3952 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3953 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3954 if (!vxlan_encap_conf.select_ipv4) {
3955 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3956 &vxlan_encap_conf.ipv6_src,
3957 sizeof(vxlan_encap_conf.ipv6_src));
3958 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3959 &vxlan_encap_conf.ipv6_dst,
3960 sizeof(vxlan_encap_conf.ipv6_dst));
3961 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3962 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3963 .spec = &action_vxlan_encap_data->item_ipv6,
3964 .mask = &rte_flow_item_ipv6_mask,
3967 if (!vxlan_encap_conf.select_vlan)
3968 action_vxlan_encap_data->items[1].type =
3969 RTE_FLOW_ITEM_TYPE_VOID;
3970 if (vxlan_encap_conf.select_tos_ttl) {
3971 if (vxlan_encap_conf.select_ipv4) {
3972 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3974 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3975 sizeof(ipv4_mask_tos));
3976 ipv4_mask_tos.hdr.type_of_service = 0xff;
3977 ipv4_mask_tos.hdr.time_to_live = 0xff;
3978 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3979 vxlan_encap_conf.ip_tos;
3980 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3981 vxlan_encap_conf.ip_ttl;
3982 action_vxlan_encap_data->items[2].mask =
3985 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3987 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3988 sizeof(ipv6_mask_tos));
3989 ipv6_mask_tos.hdr.vtc_flow |=
3990 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3991 ipv6_mask_tos.hdr.hop_limits = 0xff;
3992 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3994 ((uint32_t)vxlan_encap_conf.ip_tos <<
3995 RTE_IPV6_HDR_TC_SHIFT);
3996 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3997 vxlan_encap_conf.ip_ttl;
3998 action_vxlan_encap_data->items[2].mask =
4002 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
4003 RTE_DIM(vxlan_encap_conf.vni));
4004 action->conf = &action_vxlan_encap_data->conf;
4008 /** Parse NVGRE encap action. */
4010 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
4011 const char *str, unsigned int len,
4012 void *buf, unsigned int size)
4014 struct buffer *out = buf;
4015 struct rte_flow_action *action;
4016 struct action_nvgre_encap_data *action_nvgre_encap_data;
4019 ret = parse_vc(ctx, token, str, len, buf, size);
4022 /* Nothing else to do if there is no buffer. */
4025 if (!out->args.vc.actions_n)
4027 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4028 /* Point to selected object. */
4029 ctx->object = out->args.vc.data;
4030 ctx->objmask = NULL;
4031 /* Set up default configuration. */
4032 action_nvgre_encap_data = ctx->object;
4033 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
4034 .conf = (struct rte_flow_action_nvgre_encap){
4035 .definition = action_nvgre_encap_data->items,
4039 .type = RTE_FLOW_ITEM_TYPE_ETH,
4040 .spec = &action_nvgre_encap_data->item_eth,
4041 .mask = &rte_flow_item_eth_mask,
4044 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4045 .spec = &action_nvgre_encap_data->item_vlan,
4046 .mask = &rte_flow_item_vlan_mask,
4049 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4050 .spec = &action_nvgre_encap_data->item_ipv4,
4051 .mask = &rte_flow_item_ipv4_mask,
4054 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4055 .spec = &action_nvgre_encap_data->item_nvgre,
4056 .mask = &rte_flow_item_nvgre_mask,
4059 .type = RTE_FLOW_ITEM_TYPE_END,
4064 .tci = nvgre_encap_conf.vlan_tci,
4068 .src_addr = nvgre_encap_conf.ipv4_src,
4069 .dst_addr = nvgre_encap_conf.ipv4_dst,
4071 .item_nvgre.flow_id = 0,
4073 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4074 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4075 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4076 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4077 if (!nvgre_encap_conf.select_ipv4) {
4078 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4079 &nvgre_encap_conf.ipv6_src,
4080 sizeof(nvgre_encap_conf.ipv6_src));
4081 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4082 &nvgre_encap_conf.ipv6_dst,
4083 sizeof(nvgre_encap_conf.ipv6_dst));
4084 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4085 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4086 .spec = &action_nvgre_encap_data->item_ipv6,
4087 .mask = &rte_flow_item_ipv6_mask,
4090 if (!nvgre_encap_conf.select_vlan)
4091 action_nvgre_encap_data->items[1].type =
4092 RTE_FLOW_ITEM_TYPE_VOID;
4093 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4094 RTE_DIM(nvgre_encap_conf.tni));
4095 action->conf = &action_nvgre_encap_data->conf;
4099 /** Parse l2 encap action. */
4101 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4102 const char *str, unsigned int len,
4103 void *buf, unsigned int size)
4105 struct buffer *out = buf;
4106 struct rte_flow_action *action;
4107 struct action_raw_encap_data *action_encap_data;
4108 struct rte_flow_item_eth eth = { .type = 0, };
4109 struct rte_flow_item_vlan vlan = {
4110 .tci = mplsoudp_encap_conf.vlan_tci,
4116 ret = parse_vc(ctx, token, str, len, buf, size);
4119 /* Nothing else to do if there is no buffer. */
4122 if (!out->args.vc.actions_n)
4124 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4125 /* Point to selected object. */
4126 ctx->object = out->args.vc.data;
4127 ctx->objmask = NULL;
4128 /* Copy the headers to the buffer. */
4129 action_encap_data = ctx->object;
4130 *action_encap_data = (struct action_raw_encap_data) {
4131 .conf = (struct rte_flow_action_raw_encap){
4132 .data = action_encap_data->data,
4136 header = action_encap_data->data;
4137 if (l2_encap_conf.select_vlan)
4138 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4139 else if (l2_encap_conf.select_ipv4)
4140 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4142 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4143 memcpy(eth.dst.addr_bytes,
4144 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4145 memcpy(eth.src.addr_bytes,
4146 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4147 memcpy(header, ð, sizeof(eth));
4148 header += sizeof(eth);
4149 if (l2_encap_conf.select_vlan) {
4150 if (l2_encap_conf.select_ipv4)
4151 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4153 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4154 memcpy(header, &vlan, sizeof(vlan));
4155 header += sizeof(vlan);
4157 action_encap_data->conf.size = header -
4158 action_encap_data->data;
4159 action->conf = &action_encap_data->conf;
4163 /** Parse l2 decap action. */
4165 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4166 const char *str, unsigned int len,
4167 void *buf, unsigned int size)
4169 struct buffer *out = buf;
4170 struct rte_flow_action *action;
4171 struct action_raw_decap_data *action_decap_data;
4172 struct rte_flow_item_eth eth = { .type = 0, };
4173 struct rte_flow_item_vlan vlan = {
4174 .tci = mplsoudp_encap_conf.vlan_tci,
4180 ret = parse_vc(ctx, token, str, len, buf, size);
4183 /* Nothing else to do if there is no buffer. */
4186 if (!out->args.vc.actions_n)
4188 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4189 /* Point to selected object. */
4190 ctx->object = out->args.vc.data;
4191 ctx->objmask = NULL;
4192 /* Copy the headers to the buffer. */
4193 action_decap_data = ctx->object;
4194 *action_decap_data = (struct action_raw_decap_data) {
4195 .conf = (struct rte_flow_action_raw_decap){
4196 .data = action_decap_data->data,
4200 header = action_decap_data->data;
4201 if (l2_decap_conf.select_vlan)
4202 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4203 memcpy(header, ð, sizeof(eth));
4204 header += sizeof(eth);
4205 if (l2_decap_conf.select_vlan) {
4206 memcpy(header, &vlan, sizeof(vlan));
4207 header += sizeof(vlan);
4209 action_decap_data->conf.size = header -
4210 action_decap_data->data;
4211 action->conf = &action_decap_data->conf;
4215 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4217 /** Parse MPLSOGRE encap action. */
4219 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4220 const char *str, unsigned int len,
4221 void *buf, unsigned int size)
4223 struct buffer *out = buf;
4224 struct rte_flow_action *action;
4225 struct action_raw_encap_data *action_encap_data;
4226 struct rte_flow_item_eth eth = { .type = 0, };
4227 struct rte_flow_item_vlan vlan = {
4228 .tci = mplsogre_encap_conf.vlan_tci,
4231 struct rte_flow_item_ipv4 ipv4 = {
4233 .src_addr = mplsogre_encap_conf.ipv4_src,
4234 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4235 .next_proto_id = IPPROTO_GRE,
4236 .version_ihl = RTE_IPV4_VHL_DEF,
4237 .time_to_live = IPDEFTTL,
4240 struct rte_flow_item_ipv6 ipv6 = {
4242 .proto = IPPROTO_GRE,
4243 .hop_limits = IPDEFTTL,
4246 struct rte_flow_item_gre gre = {
4247 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4249 struct rte_flow_item_mpls mpls;
4253 ret = parse_vc(ctx, token, str, len, buf, size);
4256 /* Nothing else to do if there is no buffer. */
4259 if (!out->args.vc.actions_n)
4261 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4262 /* Point to selected object. */
4263 ctx->object = out->args.vc.data;
4264 ctx->objmask = NULL;
4265 /* Copy the headers to the buffer. */
4266 action_encap_data = ctx->object;
4267 *action_encap_data = (struct action_raw_encap_data) {
4268 .conf = (struct rte_flow_action_raw_encap){
4269 .data = action_encap_data->data,
4274 header = action_encap_data->data;
4275 if (mplsogre_encap_conf.select_vlan)
4276 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4277 else if (mplsogre_encap_conf.select_ipv4)
4278 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4280 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4281 memcpy(eth.dst.addr_bytes,
4282 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4283 memcpy(eth.src.addr_bytes,
4284 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4285 memcpy(header, ð, sizeof(eth));
4286 header += sizeof(eth);
4287 if (mplsogre_encap_conf.select_vlan) {
4288 if (mplsogre_encap_conf.select_ipv4)
4289 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4291 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4292 memcpy(header, &vlan, sizeof(vlan));
4293 header += sizeof(vlan);
4295 if (mplsogre_encap_conf.select_ipv4) {
4296 memcpy(header, &ipv4, sizeof(ipv4));
4297 header += sizeof(ipv4);
4299 memcpy(&ipv6.hdr.src_addr,
4300 &mplsogre_encap_conf.ipv6_src,
4301 sizeof(mplsogre_encap_conf.ipv6_src));
4302 memcpy(&ipv6.hdr.dst_addr,
4303 &mplsogre_encap_conf.ipv6_dst,
4304 sizeof(mplsogre_encap_conf.ipv6_dst));
4305 memcpy(header, &ipv6, sizeof(ipv6));
4306 header += sizeof(ipv6);
4308 memcpy(header, &gre, sizeof(gre));
4309 header += sizeof(gre);
4310 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4311 RTE_DIM(mplsogre_encap_conf.label));
4312 mpls.label_tc_s[2] |= 0x1;
4313 memcpy(header, &mpls, sizeof(mpls));
4314 header += sizeof(mpls);
4315 action_encap_data->conf.size = header -
4316 action_encap_data->data;
4317 action->conf = &action_encap_data->conf;
4321 /** Parse MPLSOGRE decap action. */
4323 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4324 const char *str, unsigned int len,
4325 void *buf, unsigned int size)
4327 struct buffer *out = buf;
4328 struct rte_flow_action *action;
4329 struct action_raw_decap_data *action_decap_data;
4330 struct rte_flow_item_eth eth = { .type = 0, };
4331 struct rte_flow_item_vlan vlan = {.tci = 0};
4332 struct rte_flow_item_ipv4 ipv4 = {
4334 .next_proto_id = IPPROTO_GRE,
4337 struct rte_flow_item_ipv6 ipv6 = {
4339 .proto = IPPROTO_GRE,
4342 struct rte_flow_item_gre gre = {
4343 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4345 struct rte_flow_item_mpls mpls;
4349 ret = parse_vc(ctx, token, str, len, buf, size);
4352 /* Nothing else to do if there is no buffer. */
4355 if (!out->args.vc.actions_n)
4357 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4358 /* Point to selected object. */
4359 ctx->object = out->args.vc.data;
4360 ctx->objmask = NULL;
4361 /* Copy the headers to the buffer. */
4362 action_decap_data = ctx->object;
4363 *action_decap_data = (struct action_raw_decap_data) {
4364 .conf = (struct rte_flow_action_raw_decap){
4365 .data = action_decap_data->data,
4369 header = action_decap_data->data;
4370 if (mplsogre_decap_conf.select_vlan)
4371 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4372 else if (mplsogre_encap_conf.select_ipv4)
4373 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4375 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4376 memcpy(eth.dst.addr_bytes,
4377 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4378 memcpy(eth.src.addr_bytes,
4379 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4380 memcpy(header, ð, sizeof(eth));
4381 header += sizeof(eth);
4382 if (mplsogre_encap_conf.select_vlan) {
4383 if (mplsogre_encap_conf.select_ipv4)
4384 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4386 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4387 memcpy(header, &vlan, sizeof(vlan));
4388 header += sizeof(vlan);
4390 if (mplsogre_encap_conf.select_ipv4) {
4391 memcpy(header, &ipv4, sizeof(ipv4));
4392 header += sizeof(ipv4);
4394 memcpy(header, &ipv6, sizeof(ipv6));
4395 header += sizeof(ipv6);
4397 memcpy(header, &gre, sizeof(gre));
4398 header += sizeof(gre);
4399 memset(&mpls, 0, sizeof(mpls));
4400 memcpy(header, &mpls, sizeof(mpls));
4401 header += sizeof(mpls);
4402 action_decap_data->conf.size = header -
4403 action_decap_data->data;
4404 action->conf = &action_decap_data->conf;
4408 /** Parse MPLSOUDP encap action. */
4410 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4411 const char *str, unsigned int len,
4412 void *buf, unsigned int size)
4414 struct buffer *out = buf;
4415 struct rte_flow_action *action;
4416 struct action_raw_encap_data *action_encap_data;
4417 struct rte_flow_item_eth eth = { .type = 0, };
4418 struct rte_flow_item_vlan vlan = {
4419 .tci = mplsoudp_encap_conf.vlan_tci,
4422 struct rte_flow_item_ipv4 ipv4 = {
4424 .src_addr = mplsoudp_encap_conf.ipv4_src,
4425 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4426 .next_proto_id = IPPROTO_UDP,
4427 .version_ihl = RTE_IPV4_VHL_DEF,
4428 .time_to_live = IPDEFTTL,
4431 struct rte_flow_item_ipv6 ipv6 = {
4433 .proto = IPPROTO_UDP,
4434 .hop_limits = IPDEFTTL,
4437 struct rte_flow_item_udp udp = {
4439 .src_port = mplsoudp_encap_conf.udp_src,
4440 .dst_port = mplsoudp_encap_conf.udp_dst,
4443 struct rte_flow_item_mpls mpls;
4447 ret = parse_vc(ctx, token, str, len, buf, size);
4450 /* Nothing else to do if there is no buffer. */
4453 if (!out->args.vc.actions_n)
4455 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4456 /* Point to selected object. */
4457 ctx->object = out->args.vc.data;
4458 ctx->objmask = NULL;
4459 /* Copy the headers to the buffer. */
4460 action_encap_data = ctx->object;
4461 *action_encap_data = (struct action_raw_encap_data) {
4462 .conf = (struct rte_flow_action_raw_encap){
4463 .data = action_encap_data->data,
4468 header = action_encap_data->data;
4469 if (mplsoudp_encap_conf.select_vlan)
4470 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4471 else if (mplsoudp_encap_conf.select_ipv4)
4472 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4474 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4475 memcpy(eth.dst.addr_bytes,
4476 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4477 memcpy(eth.src.addr_bytes,
4478 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4479 memcpy(header, ð, sizeof(eth));
4480 header += sizeof(eth);
4481 if (mplsoudp_encap_conf.select_vlan) {
4482 if (mplsoudp_encap_conf.select_ipv4)
4483 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4485 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4486 memcpy(header, &vlan, sizeof(vlan));
4487 header += sizeof(vlan);
4489 if (mplsoudp_encap_conf.select_ipv4) {
4490 memcpy(header, &ipv4, sizeof(ipv4));
4491 header += sizeof(ipv4);
4493 memcpy(&ipv6.hdr.src_addr,
4494 &mplsoudp_encap_conf.ipv6_src,
4495 sizeof(mplsoudp_encap_conf.ipv6_src));
4496 memcpy(&ipv6.hdr.dst_addr,
4497 &mplsoudp_encap_conf.ipv6_dst,
4498 sizeof(mplsoudp_encap_conf.ipv6_dst));
4499 memcpy(header, &ipv6, sizeof(ipv6));
4500 header += sizeof(ipv6);
4502 memcpy(header, &udp, sizeof(udp));
4503 header += sizeof(udp);
4504 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4505 RTE_DIM(mplsoudp_encap_conf.label));
4506 mpls.label_tc_s[2] |= 0x1;
4507 memcpy(header, &mpls, sizeof(mpls));
4508 header += sizeof(mpls);
4509 action_encap_data->conf.size = header -
4510 action_encap_data->data;
4511 action->conf = &action_encap_data->conf;
4515 /** Parse MPLSOUDP decap action. */
4517 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4518 const char *str, unsigned int len,
4519 void *buf, unsigned int size)
4521 struct buffer *out = buf;
4522 struct rte_flow_action *action;
4523 struct action_raw_decap_data *action_decap_data;
4524 struct rte_flow_item_eth eth = { .type = 0, };
4525 struct rte_flow_item_vlan vlan = {.tci = 0};
4526 struct rte_flow_item_ipv4 ipv4 = {
4528 .next_proto_id = IPPROTO_UDP,
4531 struct rte_flow_item_ipv6 ipv6 = {
4533 .proto = IPPROTO_UDP,
4536 struct rte_flow_item_udp udp = {
4538 .dst_port = rte_cpu_to_be_16(6635),
4541 struct rte_flow_item_mpls mpls;
4545 ret = parse_vc(ctx, token, str, len, buf, size);
4548 /* Nothing else to do if there is no buffer. */
4551 if (!out->args.vc.actions_n)
4553 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4554 /* Point to selected object. */
4555 ctx->object = out->args.vc.data;
4556 ctx->objmask = NULL;
4557 /* Copy the headers to the buffer. */
4558 action_decap_data = ctx->object;
4559 *action_decap_data = (struct action_raw_decap_data) {
4560 .conf = (struct rte_flow_action_raw_decap){
4561 .data = action_decap_data->data,
4565 header = action_decap_data->data;
4566 if (mplsoudp_decap_conf.select_vlan)
4567 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4568 else if (mplsoudp_encap_conf.select_ipv4)
4569 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4571 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4572 memcpy(eth.dst.addr_bytes,
4573 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4574 memcpy(eth.src.addr_bytes,
4575 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4576 memcpy(header, ð, sizeof(eth));
4577 header += sizeof(eth);
4578 if (mplsoudp_encap_conf.select_vlan) {
4579 if (mplsoudp_encap_conf.select_ipv4)
4580 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4582 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4583 memcpy(header, &vlan, sizeof(vlan));
4584 header += sizeof(vlan);
4586 if (mplsoudp_encap_conf.select_ipv4) {
4587 memcpy(header, &ipv4, sizeof(ipv4));
4588 header += sizeof(ipv4);
4590 memcpy(header, &ipv6, sizeof(ipv6));
4591 header += sizeof(ipv6);
4593 memcpy(header, &udp, sizeof(udp));
4594 header += sizeof(udp);
4595 memset(&mpls, 0, sizeof(mpls));
4596 memcpy(header, &mpls, sizeof(mpls));
4597 header += sizeof(mpls);
4598 action_decap_data->conf.size = header -
4599 action_decap_data->data;
4600 action->conf = &action_decap_data->conf;
4605 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4606 const char *str, unsigned int len, void *buf,
4609 struct buffer *out = buf;
4610 struct rte_flow_action *action;
4611 struct rte_flow_action_raw_encap *action_raw_encap_conf = NULL;
4612 uint8_t *data = NULL;
4615 ret = parse_vc(ctx, token, str, len, buf, size);
4618 /* Nothing else to do if there is no buffer. */
4621 if (!out->args.vc.actions_n)
4623 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4624 /* Point to selected object. */
4625 ctx->object = out->args.vc.data;
4626 ctx->objmask = NULL;
4627 /* Copy the headers to the buffer. */
4628 action_raw_encap_conf = ctx->object;
4629 /* data stored from tail of data buffer */
4630 data = (uint8_t *)&(raw_encap_conf.data) +
4631 ACTION_RAW_ENCAP_MAX_DATA - raw_encap_conf.size;
4632 action_raw_encap_conf->data = data;
4633 action_raw_encap_conf->preserve = NULL;
4634 action_raw_encap_conf->size = raw_encap_conf.size;
4635 action->conf = action_raw_encap_conf;
4640 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4641 const char *str, unsigned int len, void *buf,
4644 struct buffer *out = buf;
4645 struct rte_flow_action *action;
4646 struct rte_flow_action_raw_decap *action_raw_decap_conf = NULL;
4647 uint8_t *data = NULL;
4650 ret = parse_vc(ctx, token, str, len, buf, size);
4653 /* Nothing else to do if there is no buffer. */
4656 if (!out->args.vc.actions_n)
4658 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4659 /* Point to selected object. */
4660 ctx->object = out->args.vc.data;
4661 ctx->objmask = NULL;
4662 /* Copy the headers to the buffer. */
4663 action_raw_decap_conf = ctx->object;
4664 /* data stored from tail of data buffer */
4665 data = (uint8_t *)&(raw_decap_conf.data) +
4666 ACTION_RAW_ENCAP_MAX_DATA - raw_decap_conf.size;
4667 action_raw_decap_conf->data = data;
4668 action_raw_decap_conf->size = raw_decap_conf.size;
4669 action->conf = action_raw_decap_conf;
4673 /** Parse tokens for destroy command. */
4675 parse_destroy(struct context *ctx, const struct token *token,
4676 const char *str, unsigned int len,
4677 void *buf, unsigned int size)
4679 struct buffer *out = buf;
4681 /* Token name must match. */
4682 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4684 /* Nothing else to do if there is no buffer. */
4687 if (!out->command) {
4688 if (ctx->curr != DESTROY)
4690 if (sizeof(*out) > size)
4692 out->command = ctx->curr;
4695 ctx->objmask = NULL;
4696 out->args.destroy.rule =
4697 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4701 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4702 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4705 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4706 ctx->objmask = NULL;
4710 /** Parse tokens for flush command. */
4712 parse_flush(struct context *ctx, const struct token *token,
4713 const char *str, unsigned int len,
4714 void *buf, unsigned int size)
4716 struct buffer *out = buf;
4718 /* Token name must match. */
4719 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4721 /* Nothing else to do if there is no buffer. */
4724 if (!out->command) {
4725 if (ctx->curr != FLUSH)
4727 if (sizeof(*out) > size)
4729 out->command = ctx->curr;
4732 ctx->objmask = NULL;
4737 /** Parse tokens for query command. */
4739 parse_query(struct context *ctx, const struct token *token,
4740 const char *str, unsigned int len,
4741 void *buf, unsigned int size)
4743 struct buffer *out = buf;
4745 /* Token name must match. */
4746 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4748 /* Nothing else to do if there is no buffer. */
4751 if (!out->command) {
4752 if (ctx->curr != QUERY)
4754 if (sizeof(*out) > size)
4756 out->command = ctx->curr;
4759 ctx->objmask = NULL;
4764 /** Parse action names. */
4766 parse_action(struct context *ctx, const struct token *token,
4767 const char *str, unsigned int len,
4768 void *buf, unsigned int size)
4770 struct buffer *out = buf;
4771 const struct arg *arg = pop_args(ctx);
4775 /* Argument is expected. */
4778 /* Parse action name. */
4779 for (i = 0; next_action[i]; ++i) {
4780 const struct parse_action_priv *priv;
4782 token = &token_list[next_action[i]];
4783 if (strcmp_partial(token->name, str, len))
4789 memcpy((uint8_t *)ctx->object + arg->offset,
4795 push_args(ctx, arg);
4799 /** Parse tokens for list command. */
4801 parse_list(struct context *ctx, const struct token *token,
4802 const char *str, unsigned int len,
4803 void *buf, unsigned int size)
4805 struct buffer *out = buf;
4807 /* Token name must match. */
4808 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4810 /* Nothing else to do if there is no buffer. */
4813 if (!out->command) {
4814 if (ctx->curr != LIST)
4816 if (sizeof(*out) > size)
4818 out->command = ctx->curr;
4821 ctx->objmask = NULL;
4822 out->args.list.group =
4823 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4827 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4828 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4831 ctx->object = out->args.list.group + out->args.list.group_n++;
4832 ctx->objmask = NULL;
4836 /** Parse tokens for isolate command. */
4838 parse_isolate(struct context *ctx, const struct token *token,
4839 const char *str, unsigned int len,
4840 void *buf, unsigned int size)
4842 struct buffer *out = buf;
4844 /* Token name must match. */
4845 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4847 /* Nothing else to do if there is no buffer. */
4850 if (!out->command) {
4851 if (ctx->curr != ISOLATE)
4853 if (sizeof(*out) > size)
4855 out->command = ctx->curr;
4858 ctx->objmask = NULL;
4864 * Parse signed/unsigned integers 8 to 64-bit long.
4866 * Last argument (ctx->args) is retrieved to determine integer type and
4870 parse_int(struct context *ctx, const struct token *token,
4871 const char *str, unsigned int len,
4872 void *buf, unsigned int size)
4874 const struct arg *arg = pop_args(ctx);
4879 /* Argument is expected. */
4884 (uintmax_t)strtoimax(str, &end, 0) :
4885 strtoumax(str, &end, 0);
4886 if (errno || (size_t)(end - str) != len)
4889 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4890 (intmax_t)u > (intmax_t)arg->max)) ||
4891 (!arg->sign && (u < arg->min || u > arg->max))))
4896 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4897 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4901 buf = (uint8_t *)ctx->object + arg->offset;
4903 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4907 case sizeof(uint8_t):
4908 *(uint8_t *)buf = u;
4910 case sizeof(uint16_t):
4911 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4913 case sizeof(uint8_t [3]):
4914 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4916 ((uint8_t *)buf)[0] = u;
4917 ((uint8_t *)buf)[1] = u >> 8;
4918 ((uint8_t *)buf)[2] = u >> 16;
4922 ((uint8_t *)buf)[0] = u >> 16;
4923 ((uint8_t *)buf)[1] = u >> 8;
4924 ((uint8_t *)buf)[2] = u;
4926 case sizeof(uint32_t):
4927 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4929 case sizeof(uint64_t):
4930 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4935 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4937 buf = (uint8_t *)ctx->objmask + arg->offset;
4942 push_args(ctx, arg);
4949 * Three arguments (ctx->args) are retrieved from the stack to store data,
4950 * its actual length and address (in that order).
4953 parse_string(struct context *ctx, const struct token *token,
4954 const char *str, unsigned int len,
4955 void *buf, unsigned int size)
4957 const struct arg *arg_data = pop_args(ctx);
4958 const struct arg *arg_len = pop_args(ctx);
4959 const struct arg *arg_addr = pop_args(ctx);
4960 char tmp[16]; /* Ought to be enough. */
4963 /* Arguments are expected. */
4967 push_args(ctx, arg_data);
4971 push_args(ctx, arg_len);
4972 push_args(ctx, arg_data);
4975 size = arg_data->size;
4976 /* Bit-mask fill is not supported. */
4977 if (arg_data->mask || size < len)
4981 /* Let parse_int() fill length information first. */
4982 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4985 push_args(ctx, arg_len);
4986 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4991 buf = (uint8_t *)ctx->object + arg_data->offset;
4992 /* Output buffer is not necessarily NUL-terminated. */
4993 memcpy(buf, str, len);
4994 memset((uint8_t *)buf + len, 0x00, size - len);
4996 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4997 /* Save address if requested. */
4998 if (arg_addr->size) {
4999 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5001 (uint8_t *)ctx->object + arg_data->offset
5005 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5007 (uint8_t *)ctx->objmask + arg_data->offset
5013 push_args(ctx, arg_addr);
5014 push_args(ctx, arg_len);
5015 push_args(ctx, arg_data);
5020 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
5026 /* Check input parameters */
5027 if ((src == NULL) ||
5033 /* Convert chars to bytes */
5034 for (i = 0, len = 0; i < *size; i += 2) {
5035 snprintf(tmp, 3, "%s", src + i);
5036 dst[len++] = strtoul(tmp, &c, 16);
5051 parse_hex(struct context *ctx, const struct token *token,
5052 const char *str, unsigned int len,
5053 void *buf, unsigned int size)
5055 const struct arg *arg_data = pop_args(ctx);
5056 const struct arg *arg_len = pop_args(ctx);
5057 const struct arg *arg_addr = pop_args(ctx);
5058 char tmp[16]; /* Ought to be enough. */
5060 unsigned int hexlen = len;
5061 unsigned int length = 256;
5062 uint8_t hex_tmp[length];
5064 /* Arguments are expected. */
5068 push_args(ctx, arg_data);
5072 push_args(ctx, arg_len);
5073 push_args(ctx, arg_data);
5076 size = arg_data->size;
5077 /* Bit-mask fill is not supported. */
5083 /* translate bytes string to array. */
5084 if (str[0] == '0' && ((str[1] == 'x') ||
5089 if (hexlen > length)
5091 ret = parse_hex_string(str, hex_tmp, &hexlen);
5094 /* Let parse_int() fill length information first. */
5095 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5098 push_args(ctx, arg_len);
5099 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5104 buf = (uint8_t *)ctx->object + arg_data->offset;
5105 /* Output buffer is not necessarily NUL-terminated. */
5106 memcpy(buf, hex_tmp, hexlen);
5107 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5109 memset((uint8_t *)ctx->objmask + arg_data->offset,
5111 /* Save address if requested. */
5112 if (arg_addr->size) {
5113 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5115 (uint8_t *)ctx->object + arg_data->offset
5119 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5121 (uint8_t *)ctx->objmask + arg_data->offset
5127 push_args(ctx, arg_addr);
5128 push_args(ctx, arg_len);
5129 push_args(ctx, arg_data);
5135 * Parse a MAC address.
5137 * Last argument (ctx->args) is retrieved to determine storage size and
5141 parse_mac_addr(struct context *ctx, const struct token *token,
5142 const char *str, unsigned int len,
5143 void *buf, unsigned int size)
5145 const struct arg *arg = pop_args(ctx);
5146 struct rte_ether_addr tmp;
5150 /* Argument is expected. */
5154 /* Bit-mask fill is not supported. */
5155 if (arg->mask || size != sizeof(tmp))
5157 /* Only network endian is supported. */
5160 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5161 if (ret < 0 || (unsigned int)ret != len)
5165 buf = (uint8_t *)ctx->object + arg->offset;
5166 memcpy(buf, &tmp, size);
5168 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5171 push_args(ctx, arg);
5176 * Parse an IPv4 address.
5178 * Last argument (ctx->args) is retrieved to determine storage size and
5182 parse_ipv4_addr(struct context *ctx, const struct token *token,
5183 const char *str, unsigned int len,
5184 void *buf, unsigned int size)
5186 const struct arg *arg = pop_args(ctx);
5191 /* Argument is expected. */
5195 /* Bit-mask fill is not supported. */
5196 if (arg->mask || size != sizeof(tmp))
5198 /* Only network endian is supported. */
5201 memcpy(str2, str, len);
5203 ret = inet_pton(AF_INET, str2, &tmp);
5205 /* Attempt integer parsing. */
5206 push_args(ctx, arg);
5207 return parse_int(ctx, token, str, len, buf, size);
5211 buf = (uint8_t *)ctx->object + arg->offset;
5212 memcpy(buf, &tmp, size);
5214 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5217 push_args(ctx, arg);
5222 * Parse an IPv6 address.
5224 * Last argument (ctx->args) is retrieved to determine storage size and
5228 parse_ipv6_addr(struct context *ctx, const struct token *token,
5229 const char *str, unsigned int len,
5230 void *buf, unsigned int size)
5232 const struct arg *arg = pop_args(ctx);
5234 struct in6_addr tmp;
5238 /* Argument is expected. */
5242 /* Bit-mask fill is not supported. */
5243 if (arg->mask || size != sizeof(tmp))
5245 /* Only network endian is supported. */
5248 memcpy(str2, str, len);
5250 ret = inet_pton(AF_INET6, str2, &tmp);
5255 buf = (uint8_t *)ctx->object + arg->offset;
5256 memcpy(buf, &tmp, size);
5258 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5261 push_args(ctx, arg);
5265 /** Boolean values (even indices stand for false). */
5266 static const char *const boolean_name[] = {
5276 * Parse a boolean value.
5278 * Last argument (ctx->args) is retrieved to determine storage size and
5282 parse_boolean(struct context *ctx, const struct token *token,
5283 const char *str, unsigned int len,
5284 void *buf, unsigned int size)
5286 const struct arg *arg = pop_args(ctx);
5290 /* Argument is expected. */
5293 for (i = 0; boolean_name[i]; ++i)
5294 if (!strcmp_partial(boolean_name[i], str, len))
5296 /* Process token as integer. */
5297 if (boolean_name[i])
5298 str = i & 1 ? "1" : "0";
5299 push_args(ctx, arg);
5300 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5301 return ret > 0 ? (int)len : ret;
5304 /** Parse port and update context. */
5306 parse_port(struct context *ctx, const struct token *token,
5307 const char *str, unsigned int len,
5308 void *buf, unsigned int size)
5310 struct buffer *out = &(struct buffer){ .port = 0 };
5318 ctx->objmask = NULL;
5319 size = sizeof(*out);
5321 ret = parse_int(ctx, token, str, len, out, size);
5323 ctx->port = out->port;
5329 /** Parse set command, initialize output buffer for subsequent tokens. */
5331 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5332 const char *str, unsigned int len,
5333 void *buf, unsigned int size)
5335 struct buffer *out = buf;
5337 /* Token name must match. */
5338 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5340 /* Nothing else to do if there is no buffer. */
5343 /* Make sure buffer is large enough. */
5344 if (size < sizeof(*out))
5347 ctx->objmask = NULL;
5350 out->command = ctx->curr;
5355 * Parse set raw_encap/raw_decap command,
5356 * initialize output buffer for subsequent tokens.
5359 parse_set_init(struct context *ctx, const struct token *token,
5360 const char *str, unsigned int len,
5361 void *buf, unsigned int size)
5363 struct buffer *out = buf;
5365 /* Token name must match. */
5366 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5368 /* Nothing else to do if there is no buffer. */
5371 /* Make sure buffer is large enough. */
5372 if (size < sizeof(*out))
5374 /* Initialize buffer. */
5375 memset(out, 0x00, sizeof(*out));
5376 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5379 ctx->objmask = NULL;
5380 if (!out->command) {
5381 if (ctx->curr != SET)
5383 if (sizeof(*out) > size)
5385 out->command = ctx->curr;
5386 out->args.vc.data = (uint8_t *)out + size;
5387 /* All we need is pattern */
5388 out->args.vc.pattern =
5389 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5391 ctx->object = out->args.vc.pattern;
5396 /** No completion. */
5398 comp_none(struct context *ctx, const struct token *token,
5399 unsigned int ent, char *buf, unsigned int size)
5409 /** Complete boolean values. */
5411 comp_boolean(struct context *ctx, const struct token *token,
5412 unsigned int ent, char *buf, unsigned int size)
5418 for (i = 0; boolean_name[i]; ++i)
5419 if (buf && i == ent)
5420 return strlcpy(buf, boolean_name[i], size);
5426 /** Complete action names. */
5428 comp_action(struct context *ctx, const struct token *token,
5429 unsigned int ent, char *buf, unsigned int size)
5435 for (i = 0; next_action[i]; ++i)
5436 if (buf && i == ent)
5437 return strlcpy(buf, token_list[next_action[i]].name,
5444 /** Complete available ports. */
5446 comp_port(struct context *ctx, const struct token *token,
5447 unsigned int ent, char *buf, unsigned int size)
5454 RTE_ETH_FOREACH_DEV(p) {
5455 if (buf && i == ent)
5456 return snprintf(buf, size, "%u", p);
5464 /** Complete available rule IDs. */
5466 comp_rule_id(struct context *ctx, const struct token *token,
5467 unsigned int ent, char *buf, unsigned int size)
5470 struct rte_port *port;
5471 struct port_flow *pf;
5474 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5475 ctx->port == (portid_t)RTE_PORT_ALL)
5477 port = &ports[ctx->port];
5478 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5479 if (buf && i == ent)
5480 return snprintf(buf, size, "%u", pf->id);
5488 /** Complete type field for RSS action. */
5490 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5491 unsigned int ent, char *buf, unsigned int size)
5497 for (i = 0; rss_type_table[i].str; ++i)
5502 return strlcpy(buf, rss_type_table[ent].str, size);
5504 return snprintf(buf, size, "end");
5508 /** Complete queue field for RSS action. */
5510 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5511 unsigned int ent, char *buf, unsigned int size)
5518 return snprintf(buf, size, "%u", ent);
5520 return snprintf(buf, size, "end");
5524 /** Internal context. */
5525 static struct context cmd_flow_context;
5527 /** Global parser instance (cmdline API). */
5528 cmdline_parse_inst_t cmd_flow;
5529 cmdline_parse_inst_t cmd_set_raw;
5531 /** Initialize context. */
5533 cmd_flow_context_init(struct context *ctx)
5535 /* A full memset() is not necessary. */
5545 ctx->objmask = NULL;
5548 /** Parse a token (cmdline API). */
5550 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5553 struct context *ctx = &cmd_flow_context;
5554 const struct token *token;
5555 const enum index *list;
5560 token = &token_list[ctx->curr];
5561 /* Check argument length. */
5564 for (len = 0; src[len]; ++len)
5565 if (src[len] == '#' || isspace(src[len]))
5569 /* Last argument and EOL detection. */
5570 for (i = len; src[i]; ++i)
5571 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5573 else if (!isspace(src[i])) {
5578 if (src[i] == '\r' || src[i] == '\n') {
5582 /* Initialize context if necessary. */
5583 if (!ctx->next_num) {
5586 ctx->next[ctx->next_num++] = token->next[0];
5588 /* Process argument through candidates. */
5589 ctx->prev = ctx->curr;
5590 list = ctx->next[ctx->next_num - 1];
5591 for (i = 0; list[i]; ++i) {
5592 const struct token *next = &token_list[list[i]];
5595 ctx->curr = list[i];
5597 tmp = next->call(ctx, next, src, len, result, size);
5599 tmp = parse_default(ctx, next, src, len, result, size);
5600 if (tmp == -1 || tmp != len)
5608 /* Push subsequent tokens if any. */
5610 for (i = 0; token->next[i]; ++i) {
5611 if (ctx->next_num == RTE_DIM(ctx->next))
5613 ctx->next[ctx->next_num++] = token->next[i];
5615 /* Push arguments if any. */
5617 for (i = 0; token->args[i]; ++i) {
5618 if (ctx->args_num == RTE_DIM(ctx->args))
5620 ctx->args[ctx->args_num++] = token->args[i];
5625 /** Return number of completion entries (cmdline API). */
5627 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5629 struct context *ctx = &cmd_flow_context;
5630 const struct token *token = &token_list[ctx->curr];
5631 const enum index *list;
5635 /* Count number of tokens in current list. */
5637 list = ctx->next[ctx->next_num - 1];
5639 list = token->next[0];
5640 for (i = 0; list[i]; ++i)
5645 * If there is a single token, use its completion callback, otherwise
5646 * return the number of entries.
5648 token = &token_list[list[0]];
5649 if (i == 1 && token->comp) {
5650 /* Save index for cmd_flow_get_help(). */
5651 ctx->prev = list[0];
5652 return token->comp(ctx, token, 0, NULL, 0);
5657 /** Return a completion entry (cmdline API). */
5659 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5660 char *dst, unsigned int size)
5662 struct context *ctx = &cmd_flow_context;
5663 const struct token *token = &token_list[ctx->curr];
5664 const enum index *list;
5668 /* Count number of tokens in current list. */
5670 list = ctx->next[ctx->next_num - 1];
5672 list = token->next[0];
5673 for (i = 0; list[i]; ++i)
5677 /* If there is a single token, use its completion callback. */
5678 token = &token_list[list[0]];
5679 if (i == 1 && token->comp) {
5680 /* Save index for cmd_flow_get_help(). */
5681 ctx->prev = list[0];
5682 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5684 /* Otherwise make sure the index is valid and use defaults. */
5687 token = &token_list[list[index]];
5688 strlcpy(dst, token->name, size);
5689 /* Save index for cmd_flow_get_help(). */
5690 ctx->prev = list[index];
5694 /** Populate help strings for current token (cmdline API). */
5696 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5698 struct context *ctx = &cmd_flow_context;
5699 const struct token *token = &token_list[ctx->prev];
5704 /* Set token type and update global help with details. */
5705 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5707 cmd_flow.help_str = token->help;
5709 cmd_flow.help_str = token->name;
5713 /** Token definition template (cmdline API). */
5714 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5715 .ops = &(struct cmdline_token_ops){
5716 .parse = cmd_flow_parse,
5717 .complete_get_nb = cmd_flow_complete_get_nb,
5718 .complete_get_elt = cmd_flow_complete_get_elt,
5719 .get_help = cmd_flow_get_help,
5724 /** Populate the next dynamic token. */
5726 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5727 cmdline_parse_token_hdr_t **hdr_inst)
5729 struct context *ctx = &cmd_flow_context;
5731 /* Always reinitialize context before requesting the first token. */
5732 if (!(hdr_inst - cmd_flow.tokens))
5733 cmd_flow_context_init(ctx);
5734 /* Return NULL when no more tokens are expected. */
5735 if (!ctx->next_num && ctx->curr) {
5739 /* Determine if command should end here. */
5740 if (ctx->eol && ctx->last && ctx->next_num) {
5741 const enum index *list = ctx->next[ctx->next_num - 1];
5744 for (i = 0; list[i]; ++i) {
5751 *hdr = &cmd_flow_token_hdr;
5754 /** Dispatch parsed buffer to function calls. */
5756 cmd_flow_parsed(const struct buffer *in)
5758 switch (in->command) {
5760 port_flow_validate(in->port, &in->args.vc.attr,
5761 in->args.vc.pattern, in->args.vc.actions);
5764 port_flow_create(in->port, &in->args.vc.attr,
5765 in->args.vc.pattern, in->args.vc.actions);
5768 port_flow_destroy(in->port, in->args.destroy.rule_n,
5769 in->args.destroy.rule);
5772 port_flow_flush(in->port);
5775 port_flow_query(in->port, in->args.query.rule,
5776 &in->args.query.action);
5779 port_flow_list(in->port, in->args.list.group_n,
5780 in->args.list.group);
5783 port_flow_isolate(in->port, in->args.isolate.set);
5790 /** Token generator and output processing callback (cmdline API). */
5792 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5795 cmd_flow_tok(arg0, arg2);
5797 cmd_flow_parsed(arg0);
5800 /** Global parser instance (cmdline API). */
5801 cmdline_parse_inst_t cmd_flow = {
5803 .data = NULL, /**< Unused. */
5804 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5807 }, /**< Tokens are returned by cmd_flow_tok(). */
5810 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5813 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5815 struct rte_flow_item_ipv4 *ipv4;
5816 struct rte_flow_item_eth *eth;
5817 struct rte_flow_item_ipv6 *ipv6;
5818 struct rte_flow_item_vxlan *vxlan;
5819 struct rte_flow_item_vxlan_gpe *gpe;
5820 struct rte_flow_item_nvgre *nvgre;
5821 uint32_t ipv6_vtc_flow;
5823 switch (item->type) {
5824 case RTE_FLOW_ITEM_TYPE_ETH:
5825 eth = (struct rte_flow_item_eth *)buf;
5827 eth->type = rte_cpu_to_be_16(next_proto);
5829 case RTE_FLOW_ITEM_TYPE_IPV4:
5830 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5831 ipv4->hdr.version_ihl = 0x45;
5832 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
5834 case RTE_FLOW_ITEM_TYPE_IPV6:
5835 ipv6 = (struct rte_flow_item_ipv6 *)buf;
5836 ipv6->hdr.proto = (uint8_t)next_proto;
5837 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
5838 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
5839 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
5840 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
5842 case RTE_FLOW_ITEM_TYPE_VXLAN:
5843 vxlan = (struct rte_flow_item_vxlan *)buf;
5844 vxlan->flags = 0x08;
5846 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5847 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
5850 case RTE_FLOW_ITEM_TYPE_NVGRE:
5851 nvgre = (struct rte_flow_item_nvgre *)buf;
5852 nvgre->protocol = rte_cpu_to_be_16(0x6558);
5853 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
5860 /** Helper of get item's default mask. */
5862 flow_item_default_mask(const struct rte_flow_item *item)
5864 const void *mask = NULL;
5865 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5867 switch (item->type) {
5868 case RTE_FLOW_ITEM_TYPE_ANY:
5869 mask = &rte_flow_item_any_mask;
5871 case RTE_FLOW_ITEM_TYPE_VF:
5872 mask = &rte_flow_item_vf_mask;
5874 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5875 mask = &rte_flow_item_port_id_mask;
5877 case RTE_FLOW_ITEM_TYPE_RAW:
5878 mask = &rte_flow_item_raw_mask;
5880 case RTE_FLOW_ITEM_TYPE_ETH:
5881 mask = &rte_flow_item_eth_mask;
5883 case RTE_FLOW_ITEM_TYPE_VLAN:
5884 mask = &rte_flow_item_vlan_mask;
5886 case RTE_FLOW_ITEM_TYPE_IPV4:
5887 mask = &rte_flow_item_ipv4_mask;
5889 case RTE_FLOW_ITEM_TYPE_IPV6:
5890 mask = &rte_flow_item_ipv6_mask;
5892 case RTE_FLOW_ITEM_TYPE_ICMP:
5893 mask = &rte_flow_item_icmp_mask;
5895 case RTE_FLOW_ITEM_TYPE_UDP:
5896 mask = &rte_flow_item_udp_mask;
5898 case RTE_FLOW_ITEM_TYPE_TCP:
5899 mask = &rte_flow_item_tcp_mask;
5901 case RTE_FLOW_ITEM_TYPE_SCTP:
5902 mask = &rte_flow_item_sctp_mask;
5904 case RTE_FLOW_ITEM_TYPE_VXLAN:
5905 mask = &rte_flow_item_vxlan_mask;
5907 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5908 mask = &rte_flow_item_vxlan_gpe_mask;
5910 case RTE_FLOW_ITEM_TYPE_E_TAG:
5911 mask = &rte_flow_item_e_tag_mask;
5913 case RTE_FLOW_ITEM_TYPE_NVGRE:
5914 mask = &rte_flow_item_nvgre_mask;
5916 case RTE_FLOW_ITEM_TYPE_MPLS:
5917 mask = &rte_flow_item_mpls_mask;
5919 case RTE_FLOW_ITEM_TYPE_GRE:
5920 mask = &rte_flow_item_gre_mask;
5922 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5923 mask = &gre_key_default_mask;
5925 case RTE_FLOW_ITEM_TYPE_META:
5926 mask = &rte_flow_item_meta_mask;
5928 case RTE_FLOW_ITEM_TYPE_FUZZY:
5929 mask = &rte_flow_item_fuzzy_mask;
5931 case RTE_FLOW_ITEM_TYPE_GTP:
5932 mask = &rte_flow_item_gtp_mask;
5934 case RTE_FLOW_ITEM_TYPE_ESP:
5935 mask = &rte_flow_item_esp_mask;
5937 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5938 mask = &rte_flow_item_gtp_psc_mask;
5940 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
5941 mask = &rte_flow_item_pppoe_proto_id_mask;
5950 /** Dispatch parsed buffer to function calls. */
5952 cmd_set_raw_parsed(const struct buffer *in)
5954 uint32_t n = in->args.vc.pattern_n;
5956 struct rte_flow_item *item = NULL;
5958 uint8_t *data = NULL;
5959 uint8_t *data_tail = NULL;
5960 size_t *total_size = NULL;
5961 uint16_t upper_layer = 0;
5964 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
5965 in->command == SET_RAW_DECAP);
5966 if (in->command == SET_RAW_ENCAP) {
5967 total_size = &raw_encap_conf.size;
5968 data = (uint8_t *)&raw_encap_conf.data;
5970 total_size = &raw_decap_conf.size;
5971 data = (uint8_t *)&raw_decap_conf.data;
5974 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5975 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
5976 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
5977 for (i = n - 1 ; i >= 0; --i) {
5978 item = in->args.vc.pattern + i;
5979 if (item->spec == NULL)
5980 item->spec = flow_item_default_mask(item);
5981 switch (item->type) {
5982 case RTE_FLOW_ITEM_TYPE_ETH:
5983 size = sizeof(struct rte_flow_item_eth);
5985 case RTE_FLOW_ITEM_TYPE_VLAN:
5986 size = sizeof(struct rte_flow_item_vlan);
5987 proto = RTE_ETHER_TYPE_VLAN;
5989 case RTE_FLOW_ITEM_TYPE_IPV4:
5990 size = sizeof(struct rte_flow_item_ipv4);
5991 proto = RTE_ETHER_TYPE_IPV4;
5993 case RTE_FLOW_ITEM_TYPE_IPV6:
5994 size = sizeof(struct rte_flow_item_ipv6);
5995 proto = RTE_ETHER_TYPE_IPV6;
5997 case RTE_FLOW_ITEM_TYPE_UDP:
5998 size = sizeof(struct rte_flow_item_udp);
6001 case RTE_FLOW_ITEM_TYPE_TCP:
6002 size = sizeof(struct rte_flow_item_tcp);
6005 case RTE_FLOW_ITEM_TYPE_VXLAN:
6006 size = sizeof(struct rte_flow_item_vxlan);
6008 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
6009 size = sizeof(struct rte_flow_item_vxlan_gpe);
6011 case RTE_FLOW_ITEM_TYPE_GRE:
6012 size = sizeof(struct rte_flow_item_gre);
6015 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
6016 size = sizeof(rte_be32_t);
6018 case RTE_FLOW_ITEM_TYPE_MPLS:
6019 size = sizeof(struct rte_flow_item_mpls);
6021 case RTE_FLOW_ITEM_TYPE_NVGRE:
6022 size = sizeof(struct rte_flow_item_nvgre);
6025 case RTE_FLOW_ITEM_TYPE_GENEVE:
6026 size = sizeof(struct rte_flow_item_geneve);
6029 printf("Error - Not supported item\n");
6031 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
6034 *total_size += size;
6035 rte_memcpy(data_tail - (*total_size), item->spec, size);
6036 /* update some fields which cannot be set by cmdline */
6037 update_fields((data_tail - (*total_size)), item,
6039 upper_layer = proto;
6041 if (verbose_level & 0x1)
6042 printf("total data size is %zu\n", (*total_size));
6043 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
6046 /** Populate help strings for current token (cmdline API). */
6048 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6051 struct context *ctx = &cmd_flow_context;
6052 const struct token *token = &token_list[ctx->prev];
6057 /* Set token type and update global help with details. */
6058 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6060 cmd_set_raw.help_str = token->help;
6062 cmd_set_raw.help_str = token->name;
6066 /** Token definition template (cmdline API). */
6067 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6068 .ops = &(struct cmdline_token_ops){
6069 .parse = cmd_flow_parse,
6070 .complete_get_nb = cmd_flow_complete_get_nb,
6071 .complete_get_elt = cmd_flow_complete_get_elt,
6072 .get_help = cmd_set_raw_get_help,
6077 /** Populate the next dynamic token. */
6079 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6080 cmdline_parse_token_hdr_t **hdr_inst)
6082 struct context *ctx = &cmd_flow_context;
6084 /* Always reinitialize context before requesting the first token. */
6085 if (!(hdr_inst - cmd_set_raw.tokens)) {
6086 cmd_flow_context_init(ctx);
6087 ctx->curr = START_SET;
6089 /* Return NULL when no more tokens are expected. */
6090 if (!ctx->next_num && (ctx->curr != START_SET)) {
6094 /* Determine if command should end here. */
6095 if (ctx->eol && ctx->last && ctx->next_num) {
6096 const enum index *list = ctx->next[ctx->next_num - 1];
6099 for (i = 0; list[i]; ++i) {
6106 *hdr = &cmd_set_raw_token_hdr;
6109 /** Token generator and output processing callback (cmdline API). */
6111 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6114 cmd_set_raw_tok(arg0, arg2);
6116 cmd_set_raw_parsed(arg0);
6119 /** Global parser instance (cmdline API). */
6120 cmdline_parse_inst_t cmd_set_raw = {
6121 .f = cmd_set_raw_cb,
6122 .data = NULL, /**< Unused. */
6123 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6126 }, /**< Tokens are returned by cmd_flow_tok(). */