1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
49 /* Top-level command. */
51 /* Sub-leve commands. */
55 /* Top-level command. */
57 /* Sub-level commands. */
66 /* Destroy arguments. */
69 /* Query arguments. */
75 /* Validate/create arguments. */
82 /* Validate/create pattern. */
119 ITEM_VLAN_INNER_TYPE,
151 ITEM_E_TAG_GRP_ECID_B,
160 ITEM_GRE_C_RSVD0_VER,
176 ITEM_ARP_ETH_IPV4_SHA,
177 ITEM_ARP_ETH_IPV4_SPA,
178 ITEM_ARP_ETH_IPV4_THA,
179 ITEM_ARP_ETH_IPV4_TPA,
181 ITEM_IPV6_EXT_NEXT_HDR,
186 ITEM_ICMP6_ND_NS_TARGET_ADDR,
188 ITEM_ICMP6_ND_NA_TARGET_ADDR,
190 ITEM_ICMP6_ND_OPT_TYPE,
191 ITEM_ICMP6_ND_OPT_SLA_ETH,
192 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
193 ITEM_ICMP6_ND_OPT_TLA_ETH,
194 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
203 /* Validate/create actions. */
223 ACTION_RSS_FUNC_DEFAULT,
224 ACTION_RSS_FUNC_TOEPLITZ,
225 ACTION_RSS_FUNC_SIMPLE_XOR,
237 ACTION_PHY_PORT_ORIGINAL,
238 ACTION_PHY_PORT_INDEX,
240 ACTION_PORT_ID_ORIGINAL,
244 ACTION_OF_SET_MPLS_TTL,
245 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
246 ACTION_OF_DEC_MPLS_TTL,
247 ACTION_OF_SET_NW_TTL,
248 ACTION_OF_SET_NW_TTL_NW_TTL,
249 ACTION_OF_DEC_NW_TTL,
250 ACTION_OF_COPY_TTL_OUT,
251 ACTION_OF_COPY_TTL_IN,
254 ACTION_OF_PUSH_VLAN_ETHERTYPE,
255 ACTION_OF_SET_VLAN_VID,
256 ACTION_OF_SET_VLAN_VID_VLAN_VID,
257 ACTION_OF_SET_VLAN_PCP,
258 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
260 ACTION_OF_POP_MPLS_ETHERTYPE,
262 ACTION_OF_PUSH_MPLS_ETHERTYPE,
269 ACTION_MPLSOGRE_ENCAP,
270 ACTION_MPLSOGRE_DECAP,
271 ACTION_MPLSOUDP_ENCAP,
272 ACTION_MPLSOUDP_DECAP,
274 ACTION_SET_IPV4_SRC_IPV4_SRC,
276 ACTION_SET_IPV4_DST_IPV4_DST,
278 ACTION_SET_IPV6_SRC_IPV6_SRC,
280 ACTION_SET_IPV6_DST_IPV6_DST,
282 ACTION_SET_TP_SRC_TP_SRC,
284 ACTION_SET_TP_DST_TP_DST,
290 ACTION_SET_MAC_SRC_MAC_SRC,
292 ACTION_SET_MAC_DST_MAC_DST,
294 ACTION_INC_TCP_SEQ_VALUE,
296 ACTION_DEC_TCP_SEQ_VALUE,
298 ACTION_INC_TCP_ACK_VALUE,
300 ACTION_DEC_TCP_ACK_VALUE,
305 /** Maximum size for pattern in struct rte_flow_item_raw. */
306 #define ITEM_RAW_PATTERN_SIZE 40
308 /** Storage size for struct rte_flow_item_raw including pattern. */
309 #define ITEM_RAW_SIZE \
310 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
312 /** Maximum number of queue indices in struct rte_flow_action_rss. */
313 #define ACTION_RSS_QUEUE_NUM 32
315 /** Storage for struct rte_flow_action_rss including external data. */
316 struct action_rss_data {
317 struct rte_flow_action_rss conf;
318 uint8_t key[RSS_HASH_KEY_LENGTH];
319 uint16_t queue[ACTION_RSS_QUEUE_NUM];
322 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
323 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
325 #define ACTION_RAW_ENCAP_MAX_DATA 128
327 /** Storage for struct rte_flow_action_raw_encap. */
328 struct raw_encap_conf {
329 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
330 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
334 struct raw_encap_conf raw_encap_conf = {.size = 0};
336 /** Storage for struct rte_flow_action_raw_decap. */
337 struct raw_decap_conf {
338 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
342 struct raw_decap_conf raw_decap_conf = {.size = 0};
344 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
345 struct action_vxlan_encap_data {
346 struct rte_flow_action_vxlan_encap conf;
347 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
348 struct rte_flow_item_eth item_eth;
349 struct rte_flow_item_vlan item_vlan;
351 struct rte_flow_item_ipv4 item_ipv4;
352 struct rte_flow_item_ipv6 item_ipv6;
354 struct rte_flow_item_udp item_udp;
355 struct rte_flow_item_vxlan item_vxlan;
358 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
359 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
361 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
362 struct action_nvgre_encap_data {
363 struct rte_flow_action_nvgre_encap conf;
364 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
365 struct rte_flow_item_eth item_eth;
366 struct rte_flow_item_vlan item_vlan;
368 struct rte_flow_item_ipv4 item_ipv4;
369 struct rte_flow_item_ipv6 item_ipv6;
371 struct rte_flow_item_nvgre item_nvgre;
374 /** Maximum data size in struct rte_flow_action_raw_encap. */
375 #define ACTION_RAW_ENCAP_MAX_DATA 128
377 /** Storage for struct rte_flow_action_raw_encap including external data. */
378 struct action_raw_encap_data {
379 struct rte_flow_action_raw_encap conf;
380 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
381 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
384 /** Storage for struct rte_flow_action_raw_decap including external data. */
385 struct action_raw_decap_data {
386 struct rte_flow_action_raw_decap conf;
387 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
390 /** Maximum number of subsequent tokens and arguments on the stack. */
391 #define CTX_STACK_SIZE 16
393 /** Parser context. */
395 /** Stack of subsequent token lists to process. */
396 const enum index *next[CTX_STACK_SIZE];
397 /** Arguments for stacked tokens. */
398 const void *args[CTX_STACK_SIZE];
399 enum index curr; /**< Current token index. */
400 enum index prev; /**< Index of the last token seen. */
401 int next_num; /**< Number of entries in next[]. */
402 int args_num; /**< Number of entries in args[]. */
403 uint32_t eol:1; /**< EOL has been detected. */
404 uint32_t last:1; /**< No more arguments. */
405 portid_t port; /**< Current port ID (for completions). */
406 uint32_t objdata; /**< Object-specific data. */
407 void *object; /**< Address of current object for relative offsets. */
408 void *objmask; /**< Object a full mask must be written to. */
411 /** Token argument. */
413 uint32_t hton:1; /**< Use network byte ordering. */
414 uint32_t sign:1; /**< Value is signed. */
415 uint32_t bounded:1; /**< Value is bounded. */
416 uintmax_t min; /**< Minimum value if bounded. */
417 uintmax_t max; /**< Maximum value if bounded. */
418 uint32_t offset; /**< Relative offset from ctx->object. */
419 uint32_t size; /**< Field size. */
420 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
423 /** Parser token definition. */
425 /** Type displayed during completion (defaults to "TOKEN"). */
427 /** Help displayed during completion (defaults to token name). */
429 /** Private data used by parser functions. */
432 * Lists of subsequent tokens to push on the stack. Each call to the
433 * parser consumes the last entry of that stack.
435 const enum index *const *next;
436 /** Arguments stack for subsequent tokens that need them. */
437 const struct arg *const *args;
439 * Token-processing callback, returns -1 in case of error, the
440 * length of the matched string otherwise. If NULL, attempts to
441 * match the token name.
443 * If buf is not NULL, the result should be stored in it according
444 * to context. An error is returned if not large enough.
446 int (*call)(struct context *ctx, const struct token *token,
447 const char *str, unsigned int len,
448 void *buf, unsigned int size);
450 * Callback that provides possible values for this token, used for
451 * completion. Returns -1 in case of error, the number of possible
452 * values otherwise. If NULL, the token name is used.
454 * If buf is not NULL, entry index ent is written to buf and the
455 * full length of the entry is returned (same behavior as
458 int (*comp)(struct context *ctx, const struct token *token,
459 unsigned int ent, char *buf, unsigned int size);
460 /** Mandatory token name, no default value. */
464 /** Static initializer for the next field. */
465 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
467 /** Static initializer for a NEXT() entry. */
468 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
470 /** Static initializer for the args field. */
471 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
473 /** Static initializer for ARGS() to target a field. */
474 #define ARGS_ENTRY(s, f) \
475 (&(const struct arg){ \
476 .offset = offsetof(s, f), \
477 .size = sizeof(((s *)0)->f), \
480 /** Static initializer for ARGS() to target a bit-field. */
481 #define ARGS_ENTRY_BF(s, f, b) \
482 (&(const struct arg){ \
484 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
487 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
488 #define ARGS_ENTRY_MASK(s, f, m) \
489 (&(const struct arg){ \
490 .offset = offsetof(s, f), \
491 .size = sizeof(((s *)0)->f), \
492 .mask = (const void *)(m), \
495 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
496 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
497 (&(const struct arg){ \
499 .offset = offsetof(s, f), \
500 .size = sizeof(((s *)0)->f), \
501 .mask = (const void *)(m), \
504 /** Static initializer for ARGS() to target a pointer. */
505 #define ARGS_ENTRY_PTR(s, f) \
506 (&(const struct arg){ \
507 .size = sizeof(*((s *)0)->f), \
510 /** Static initializer for ARGS() with arbitrary offset and size. */
511 #define ARGS_ENTRY_ARB(o, s) \
512 (&(const struct arg){ \
517 /** Same as ARGS_ENTRY_ARB() with bounded values. */
518 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
519 (&(const struct arg){ \
527 /** Same as ARGS_ENTRY() using network byte ordering. */
528 #define ARGS_ENTRY_HTON(s, f) \
529 (&(const struct arg){ \
531 .offset = offsetof(s, f), \
532 .size = sizeof(((s *)0)->f), \
535 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
536 #define ARG_ENTRY_HTON(s) \
537 (&(const struct arg){ \
543 /** Parser output buffer layout expected by cmd_flow_parsed(). */
545 enum index command; /**< Flow command. */
546 portid_t port; /**< Affected port ID. */
549 struct rte_flow_attr attr;
550 struct rte_flow_item *pattern;
551 struct rte_flow_action *actions;
555 } vc; /**< Validate/create arguments. */
559 } destroy; /**< Destroy arguments. */
562 struct rte_flow_action action;
563 } query; /**< Query arguments. */
567 } list; /**< List arguments. */
570 } isolate; /**< Isolated mode arguments. */
571 } args; /**< Command arguments. */
574 /** Private data for pattern items. */
575 struct parse_item_priv {
576 enum rte_flow_item_type type; /**< Item type. */
577 uint32_t size; /**< Size of item specification structure. */
580 #define PRIV_ITEM(t, s) \
581 (&(const struct parse_item_priv){ \
582 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
586 /** Private data for actions. */
587 struct parse_action_priv {
588 enum rte_flow_action_type type; /**< Action type. */
589 uint32_t size; /**< Size of action configuration structure. */
592 #define PRIV_ACTION(t, s) \
593 (&(const struct parse_action_priv){ \
594 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
598 static const enum index next_vc_attr[] = {
608 static const enum index next_destroy_attr[] = {
614 static const enum index next_list_attr[] = {
620 static const enum index item_param[] = {
629 static const enum index next_item[] = {
665 ITEM_ICMP6_ND_OPT_SLA_ETH,
666 ITEM_ICMP6_ND_OPT_TLA_ETH,
674 static const enum index item_fuzzy[] = {
680 static const enum index item_any[] = {
686 static const enum index item_vf[] = {
692 static const enum index item_phy_port[] = {
698 static const enum index item_port_id[] = {
704 static const enum index item_mark[] = {
710 static const enum index item_raw[] = {
720 static const enum index item_eth[] = {
728 static const enum index item_vlan[] = {
733 ITEM_VLAN_INNER_TYPE,
738 static const enum index item_ipv4[] = {
748 static const enum index item_ipv6[] = {
759 static const enum index item_icmp[] = {
766 static const enum index item_udp[] = {
773 static const enum index item_tcp[] = {
781 static const enum index item_sctp[] = {
790 static const enum index item_vxlan[] = {
796 static const enum index item_e_tag[] = {
797 ITEM_E_TAG_GRP_ECID_B,
802 static const enum index item_nvgre[] = {
808 static const enum index item_mpls[] = {
816 static const enum index item_gre[] = {
818 ITEM_GRE_C_RSVD0_VER,
826 static const enum index item_gre_key[] = {
832 static const enum index item_gtp[] = {
838 static const enum index item_geneve[] = {
845 static const enum index item_vxlan_gpe[] = {
851 static const enum index item_arp_eth_ipv4[] = {
852 ITEM_ARP_ETH_IPV4_SHA,
853 ITEM_ARP_ETH_IPV4_SPA,
854 ITEM_ARP_ETH_IPV4_THA,
855 ITEM_ARP_ETH_IPV4_TPA,
860 static const enum index item_ipv6_ext[] = {
861 ITEM_IPV6_EXT_NEXT_HDR,
866 static const enum index item_icmp6[] = {
873 static const enum index item_icmp6_nd_ns[] = {
874 ITEM_ICMP6_ND_NS_TARGET_ADDR,
879 static const enum index item_icmp6_nd_na[] = {
880 ITEM_ICMP6_ND_NA_TARGET_ADDR,
885 static const enum index item_icmp6_nd_opt[] = {
886 ITEM_ICMP6_ND_OPT_TYPE,
891 static const enum index item_icmp6_nd_opt_sla_eth[] = {
892 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
897 static const enum index item_icmp6_nd_opt_tla_eth[] = {
898 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
903 static const enum index item_meta[] = {
909 static const enum index item_gtp_psc[] = {
916 static const enum index next_action[] = {
932 ACTION_OF_SET_MPLS_TTL,
933 ACTION_OF_DEC_MPLS_TTL,
934 ACTION_OF_SET_NW_TTL,
935 ACTION_OF_DEC_NW_TTL,
936 ACTION_OF_COPY_TTL_OUT,
937 ACTION_OF_COPY_TTL_IN,
940 ACTION_OF_SET_VLAN_VID,
941 ACTION_OF_SET_VLAN_PCP,
950 ACTION_MPLSOGRE_ENCAP,
951 ACTION_MPLSOGRE_DECAP,
952 ACTION_MPLSOUDP_ENCAP,
953 ACTION_MPLSOUDP_DECAP,
974 static const enum index action_mark[] = {
980 static const enum index action_queue[] = {
986 static const enum index action_count[] = {
993 static const enum index action_rss[] = {
1004 static const enum index action_vf[] = {
1011 static const enum index action_phy_port[] = {
1012 ACTION_PHY_PORT_ORIGINAL,
1013 ACTION_PHY_PORT_INDEX,
1018 static const enum index action_port_id[] = {
1019 ACTION_PORT_ID_ORIGINAL,
1025 static const enum index action_meter[] = {
1031 static const enum index action_of_set_mpls_ttl[] = {
1032 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1037 static const enum index action_of_set_nw_ttl[] = {
1038 ACTION_OF_SET_NW_TTL_NW_TTL,
1043 static const enum index action_of_push_vlan[] = {
1044 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1049 static const enum index action_of_set_vlan_vid[] = {
1050 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1055 static const enum index action_of_set_vlan_pcp[] = {
1056 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1061 static const enum index action_of_pop_mpls[] = {
1062 ACTION_OF_POP_MPLS_ETHERTYPE,
1067 static const enum index action_of_push_mpls[] = {
1068 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1073 static const enum index action_set_ipv4_src[] = {
1074 ACTION_SET_IPV4_SRC_IPV4_SRC,
1079 static const enum index action_set_mac_src[] = {
1080 ACTION_SET_MAC_SRC_MAC_SRC,
1085 static const enum index action_set_ipv4_dst[] = {
1086 ACTION_SET_IPV4_DST_IPV4_DST,
1091 static const enum index action_set_ipv6_src[] = {
1092 ACTION_SET_IPV6_SRC_IPV6_SRC,
1097 static const enum index action_set_ipv6_dst[] = {
1098 ACTION_SET_IPV6_DST_IPV6_DST,
1103 static const enum index action_set_tp_src[] = {
1104 ACTION_SET_TP_SRC_TP_SRC,
1109 static const enum index action_set_tp_dst[] = {
1110 ACTION_SET_TP_DST_TP_DST,
1115 static const enum index action_set_ttl[] = {
1121 static const enum index action_jump[] = {
1127 static const enum index action_set_mac_dst[] = {
1128 ACTION_SET_MAC_DST_MAC_DST,
1133 static const enum index action_inc_tcp_seq[] = {
1134 ACTION_INC_TCP_SEQ_VALUE,
1139 static const enum index action_dec_tcp_seq[] = {
1140 ACTION_DEC_TCP_SEQ_VALUE,
1145 static const enum index action_inc_tcp_ack[] = {
1146 ACTION_INC_TCP_ACK_VALUE,
1151 static const enum index action_dec_tcp_ack[] = {
1152 ACTION_DEC_TCP_ACK_VALUE,
1157 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1158 const char *, unsigned int,
1159 void *, unsigned int);
1160 static int parse_set_init(struct context *, const struct token *,
1161 const char *, unsigned int,
1162 void *, unsigned int);
1163 static int parse_init(struct context *, const struct token *,
1164 const char *, unsigned int,
1165 void *, unsigned int);
1166 static int parse_vc(struct context *, const struct token *,
1167 const char *, unsigned int,
1168 void *, unsigned int);
1169 static int parse_vc_spec(struct context *, const struct token *,
1170 const char *, unsigned int, void *, unsigned int);
1171 static int parse_vc_conf(struct context *, const struct token *,
1172 const char *, unsigned int, void *, unsigned int);
1173 static int parse_vc_action_rss(struct context *, const struct token *,
1174 const char *, unsigned int, void *,
1176 static int parse_vc_action_rss_func(struct context *, const struct token *,
1177 const char *, unsigned int, void *,
1179 static int parse_vc_action_rss_type(struct context *, const struct token *,
1180 const char *, unsigned int, void *,
1182 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1183 const char *, unsigned int, void *,
1185 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1186 const char *, unsigned int, void *,
1188 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1189 const char *, unsigned int, void *,
1191 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1192 const char *, unsigned int, void *,
1194 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1195 const char *, unsigned int, void *,
1197 static int parse_vc_action_mplsogre_encap(struct context *,
1198 const struct token *, const char *,
1199 unsigned int, void *, unsigned int);
1200 static int parse_vc_action_mplsogre_decap(struct context *,
1201 const struct token *, const char *,
1202 unsigned int, void *, unsigned int);
1203 static int parse_vc_action_mplsoudp_encap(struct context *,
1204 const struct token *, const char *,
1205 unsigned int, void *, unsigned int);
1206 static int parse_vc_action_mplsoudp_decap(struct context *,
1207 const struct token *, const char *,
1208 unsigned int, void *, unsigned int);
1209 static int parse_vc_action_raw_encap(struct context *,
1210 const struct token *, const char *,
1211 unsigned int, void *, unsigned int);
1212 static int parse_vc_action_raw_decap(struct context *,
1213 const struct token *, const char *,
1214 unsigned int, void *, unsigned int);
1215 static int parse_destroy(struct context *, const struct token *,
1216 const char *, unsigned int,
1217 void *, unsigned int);
1218 static int parse_flush(struct context *, const struct token *,
1219 const char *, unsigned int,
1220 void *, unsigned int);
1221 static int parse_query(struct context *, const struct token *,
1222 const char *, unsigned int,
1223 void *, unsigned int);
1224 static int parse_action(struct context *, const struct token *,
1225 const char *, unsigned int,
1226 void *, unsigned int);
1227 static int parse_list(struct context *, const struct token *,
1228 const char *, unsigned int,
1229 void *, unsigned int);
1230 static int parse_isolate(struct context *, const struct token *,
1231 const char *, unsigned int,
1232 void *, unsigned int);
1233 static int parse_int(struct context *, const struct token *,
1234 const char *, unsigned int,
1235 void *, unsigned int);
1236 static int parse_prefix(struct context *, const struct token *,
1237 const char *, unsigned int,
1238 void *, unsigned int);
1239 static int parse_boolean(struct context *, const struct token *,
1240 const char *, unsigned int,
1241 void *, unsigned int);
1242 static int parse_string(struct context *, const struct token *,
1243 const char *, unsigned int,
1244 void *, unsigned int);
1245 static int parse_hex(struct context *ctx, const struct token *token,
1246 const char *str, unsigned int len,
1247 void *buf, unsigned int size);
1248 static int parse_mac_addr(struct context *, const struct token *,
1249 const char *, unsigned int,
1250 void *, unsigned int);
1251 static int parse_ipv4_addr(struct context *, const struct token *,
1252 const char *, unsigned int,
1253 void *, unsigned int);
1254 static int parse_ipv6_addr(struct context *, const struct token *,
1255 const char *, unsigned int,
1256 void *, unsigned int);
1257 static int parse_port(struct context *, const struct token *,
1258 const char *, unsigned int,
1259 void *, unsigned int);
1260 static int comp_none(struct context *, const struct token *,
1261 unsigned int, char *, unsigned int);
1262 static int comp_boolean(struct context *, const struct token *,
1263 unsigned int, char *, unsigned int);
1264 static int comp_action(struct context *, const struct token *,
1265 unsigned int, char *, unsigned int);
1266 static int comp_port(struct context *, const struct token *,
1267 unsigned int, char *, unsigned int);
1268 static int comp_rule_id(struct context *, const struct token *,
1269 unsigned int, char *, unsigned int);
1270 static int comp_vc_action_rss_type(struct context *, const struct token *,
1271 unsigned int, char *, unsigned int);
1272 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1273 unsigned int, char *, unsigned int);
1275 /** Token definitions. */
1276 static const struct token token_list[] = {
1277 /* Special tokens. */
1280 .help = "null entry, abused as the entry point",
1281 .next = NEXT(NEXT_ENTRY(FLOW)),
1286 .help = "command may end here",
1289 .name = "START_SET",
1290 .help = "null entry, abused as the entry point for set",
1291 .next = NEXT(NEXT_ENTRY(SET)),
1296 .help = "set command may end here",
1298 /* Common tokens. */
1302 .help = "integer value",
1307 .name = "{unsigned}",
1309 .help = "unsigned integer value",
1316 .help = "prefix length for bit-mask",
1317 .call = parse_prefix,
1321 .name = "{boolean}",
1323 .help = "any boolean value",
1324 .call = parse_boolean,
1325 .comp = comp_boolean,
1330 .help = "fixed string",
1331 .call = parse_string,
1337 .help = "fixed string",
1342 .name = "{MAC address}",
1344 .help = "standard MAC address notation",
1345 .call = parse_mac_addr,
1349 .name = "{IPv4 address}",
1350 .type = "IPV4 ADDRESS",
1351 .help = "standard IPv4 address notation",
1352 .call = parse_ipv4_addr,
1356 .name = "{IPv6 address}",
1357 .type = "IPV6 ADDRESS",
1358 .help = "standard IPv6 address notation",
1359 .call = parse_ipv6_addr,
1363 .name = "{rule id}",
1365 .help = "rule identifier",
1367 .comp = comp_rule_id,
1370 .name = "{port_id}",
1372 .help = "port identifier",
1377 .name = "{group_id}",
1379 .help = "group identifier",
1383 [PRIORITY_LEVEL] = {
1386 .help = "priority level",
1390 /* Top-level command. */
1393 .type = "{command} {port_id} [{arg} [...]]",
1394 .help = "manage ingress/egress flow rules",
1395 .next = NEXT(NEXT_ENTRY
1405 /* Sub-level commands. */
1408 .help = "check whether a flow rule can be created",
1409 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1410 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1415 .help = "create a flow rule",
1416 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1417 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1422 .help = "destroy specific flow rules",
1423 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1424 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1425 .call = parse_destroy,
1429 .help = "destroy all flow rules",
1430 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1431 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1432 .call = parse_flush,
1436 .help = "query an existing flow rule",
1437 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1438 NEXT_ENTRY(RULE_ID),
1439 NEXT_ENTRY(PORT_ID)),
1440 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1441 ARGS_ENTRY(struct buffer, args.query.rule),
1442 ARGS_ENTRY(struct buffer, port)),
1443 .call = parse_query,
1447 .help = "list existing flow rules",
1448 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1449 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1454 .help = "restrict ingress traffic to the defined flow rules",
1455 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1456 NEXT_ENTRY(PORT_ID)),
1457 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1458 ARGS_ENTRY(struct buffer, port)),
1459 .call = parse_isolate,
1461 /* Destroy arguments. */
1464 .help = "specify a rule identifier",
1465 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1466 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1467 .call = parse_destroy,
1469 /* Query arguments. */
1473 .help = "action to query, must be part of the rule",
1474 .call = parse_action,
1475 .comp = comp_action,
1477 /* List arguments. */
1480 .help = "specify a group",
1481 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1482 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1485 /* Validate/create attributes. */
1488 .help = "specify a group",
1489 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1490 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1495 .help = "specify a priority level",
1496 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1497 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1502 .help = "affect rule to ingress",
1503 .next = NEXT(next_vc_attr),
1508 .help = "affect rule to egress",
1509 .next = NEXT(next_vc_attr),
1514 .help = "apply rule directly to endpoints found in pattern",
1515 .next = NEXT(next_vc_attr),
1518 /* Validate/create pattern. */
1521 .help = "submit a list of pattern items",
1522 .next = NEXT(next_item),
1527 .help = "match value perfectly (with full bit-mask)",
1528 .call = parse_vc_spec,
1530 [ITEM_PARAM_SPEC] = {
1532 .help = "match value according to configured bit-mask",
1533 .call = parse_vc_spec,
1535 [ITEM_PARAM_LAST] = {
1537 .help = "specify upper bound to establish a range",
1538 .call = parse_vc_spec,
1540 [ITEM_PARAM_MASK] = {
1542 .help = "specify bit-mask with relevant bits set to one",
1543 .call = parse_vc_spec,
1545 [ITEM_PARAM_PREFIX] = {
1547 .help = "generate bit-mask from a prefix length",
1548 .call = parse_vc_spec,
1552 .help = "specify next pattern item",
1553 .next = NEXT(next_item),
1557 .help = "end list of pattern items",
1558 .priv = PRIV_ITEM(END, 0),
1559 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1564 .help = "no-op pattern item",
1565 .priv = PRIV_ITEM(VOID, 0),
1566 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1571 .help = "perform actions when pattern does not match",
1572 .priv = PRIV_ITEM(INVERT, 0),
1573 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1578 .help = "match any protocol for the current layer",
1579 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1580 .next = NEXT(item_any),
1585 .help = "number of layers covered",
1586 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1587 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1591 .help = "match traffic from/to the physical function",
1592 .priv = PRIV_ITEM(PF, 0),
1593 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1598 .help = "match traffic from/to a virtual function ID",
1599 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1600 .next = NEXT(item_vf),
1606 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1607 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1611 .help = "match traffic from/to a specific physical port",
1612 .priv = PRIV_ITEM(PHY_PORT,
1613 sizeof(struct rte_flow_item_phy_port)),
1614 .next = NEXT(item_phy_port),
1617 [ITEM_PHY_PORT_INDEX] = {
1619 .help = "physical port index",
1620 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1621 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1625 .help = "match traffic from/to a given DPDK port ID",
1626 .priv = PRIV_ITEM(PORT_ID,
1627 sizeof(struct rte_flow_item_port_id)),
1628 .next = NEXT(item_port_id),
1631 [ITEM_PORT_ID_ID] = {
1633 .help = "DPDK port ID",
1634 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1635 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1639 .help = "match traffic against value set in previously matched rule",
1640 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1641 .next = NEXT(item_mark),
1646 .help = "Integer value to match against",
1647 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1648 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1652 .help = "match an arbitrary byte string",
1653 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1654 .next = NEXT(item_raw),
1657 [ITEM_RAW_RELATIVE] = {
1659 .help = "look for pattern after the previous item",
1660 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1661 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1664 [ITEM_RAW_SEARCH] = {
1666 .help = "search pattern from offset (see also limit)",
1667 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1668 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1671 [ITEM_RAW_OFFSET] = {
1673 .help = "absolute or relative offset for pattern",
1674 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1675 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1677 [ITEM_RAW_LIMIT] = {
1679 .help = "search area limit for start of pattern",
1680 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1681 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1683 [ITEM_RAW_PATTERN] = {
1685 .help = "byte string to look for",
1686 .next = NEXT(item_raw,
1688 NEXT_ENTRY(ITEM_PARAM_IS,
1691 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1692 ARGS_ENTRY(struct rte_flow_item_raw, length),
1693 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1694 ITEM_RAW_PATTERN_SIZE)),
1698 .help = "match Ethernet header",
1699 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1700 .next = NEXT(item_eth),
1705 .help = "destination MAC",
1706 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1707 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1711 .help = "source MAC",
1712 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1713 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1717 .help = "EtherType",
1718 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1719 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1723 .help = "match 802.1Q/ad VLAN tag",
1724 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1725 .next = NEXT(item_vlan),
1730 .help = "tag control information",
1731 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1732 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1736 .help = "priority code point",
1737 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1738 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1743 .help = "drop eligible indicator",
1744 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1745 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1750 .help = "VLAN identifier",
1751 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1752 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1755 [ITEM_VLAN_INNER_TYPE] = {
1756 .name = "inner_type",
1757 .help = "inner EtherType",
1758 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1759 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1764 .help = "match IPv4 header",
1765 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1766 .next = NEXT(item_ipv4),
1771 .help = "type of service",
1772 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1773 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1774 hdr.type_of_service)),
1778 .help = "time to live",
1779 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1780 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1783 [ITEM_IPV4_PROTO] = {
1785 .help = "next protocol ID",
1786 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1787 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1788 hdr.next_proto_id)),
1792 .help = "source address",
1793 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1794 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1799 .help = "destination address",
1800 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1801 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1806 .help = "match IPv6 header",
1807 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1808 .next = NEXT(item_ipv6),
1813 .help = "traffic class",
1814 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1815 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1817 "\x0f\xf0\x00\x00")),
1819 [ITEM_IPV6_FLOW] = {
1821 .help = "flow label",
1822 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1823 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1825 "\x00\x0f\xff\xff")),
1827 [ITEM_IPV6_PROTO] = {
1829 .help = "protocol (next header)",
1830 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1831 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1836 .help = "hop limit",
1837 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1838 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1843 .help = "source address",
1844 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1845 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1850 .help = "destination address",
1851 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1852 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1857 .help = "match ICMP header",
1858 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1859 .next = NEXT(item_icmp),
1862 [ITEM_ICMP_TYPE] = {
1864 .help = "ICMP packet type",
1865 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1866 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1869 [ITEM_ICMP_CODE] = {
1871 .help = "ICMP packet code",
1872 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1873 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1878 .help = "match UDP header",
1879 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1880 .next = NEXT(item_udp),
1885 .help = "UDP source port",
1886 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1887 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1892 .help = "UDP destination port",
1893 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1894 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1899 .help = "match TCP header",
1900 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1901 .next = NEXT(item_tcp),
1906 .help = "TCP source port",
1907 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1908 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1913 .help = "TCP destination port",
1914 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1915 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1918 [ITEM_TCP_FLAGS] = {
1920 .help = "TCP flags",
1921 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1922 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1927 .help = "match SCTP header",
1928 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1929 .next = NEXT(item_sctp),
1934 .help = "SCTP source port",
1935 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1936 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1941 .help = "SCTP destination port",
1942 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1943 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1948 .help = "validation tag",
1949 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1950 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1953 [ITEM_SCTP_CKSUM] = {
1956 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1957 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1962 .help = "match VXLAN header",
1963 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1964 .next = NEXT(item_vxlan),
1967 [ITEM_VXLAN_VNI] = {
1969 .help = "VXLAN identifier",
1970 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1971 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1975 .help = "match E-Tag header",
1976 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1977 .next = NEXT(item_e_tag),
1980 [ITEM_E_TAG_GRP_ECID_B] = {
1981 .name = "grp_ecid_b",
1982 .help = "GRP and E-CID base",
1983 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1984 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1990 .help = "match NVGRE header",
1991 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1992 .next = NEXT(item_nvgre),
1995 [ITEM_NVGRE_TNI] = {
1997 .help = "virtual subnet ID",
1998 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1999 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2003 .help = "match MPLS header",
2004 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2005 .next = NEXT(item_mpls),
2008 [ITEM_MPLS_LABEL] = {
2010 .help = "MPLS label",
2011 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2012 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2018 .help = "MPLS Traffic Class",
2019 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2020 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2026 .help = "MPLS Bottom-of-Stack",
2027 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2028 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2034 .help = "match GRE header",
2035 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2036 .next = NEXT(item_gre),
2039 [ITEM_GRE_PROTO] = {
2041 .help = "GRE protocol type",
2042 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2043 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2046 [ITEM_GRE_C_RSVD0_VER] = {
2047 .name = "c_rsvd0_ver",
2049 "checksum (1b), undefined (1b), key bit (1b),"
2050 " sequence number (1b), reserved 0 (9b),"
2052 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2053 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2056 [ITEM_GRE_C_BIT] = {
2058 .help = "checksum bit (C)",
2059 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2060 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2062 "\x80\x00\x00\x00")),
2064 [ITEM_GRE_S_BIT] = {
2066 .help = "sequence number bit (S)",
2067 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2068 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2070 "\x10\x00\x00\x00")),
2072 [ITEM_GRE_K_BIT] = {
2074 .help = "key bit (K)",
2075 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2076 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2078 "\x20\x00\x00\x00")),
2082 .help = "fuzzy pattern match, expect faster than default",
2083 .priv = PRIV_ITEM(FUZZY,
2084 sizeof(struct rte_flow_item_fuzzy)),
2085 .next = NEXT(item_fuzzy),
2088 [ITEM_FUZZY_THRESH] = {
2090 .help = "match accuracy threshold",
2091 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2092 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2097 .help = "match GTP header",
2098 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2099 .next = NEXT(item_gtp),
2104 .help = "tunnel endpoint identifier",
2105 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2106 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2110 .help = "match GTP header",
2111 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2112 .next = NEXT(item_gtp),
2117 .help = "match GTP header",
2118 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2119 .next = NEXT(item_gtp),
2124 .help = "match GENEVE header",
2125 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2126 .next = NEXT(item_geneve),
2129 [ITEM_GENEVE_VNI] = {
2131 .help = "virtual network identifier",
2132 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2133 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2135 [ITEM_GENEVE_PROTO] = {
2137 .help = "GENEVE protocol type",
2138 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2139 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2142 [ITEM_VXLAN_GPE] = {
2143 .name = "vxlan-gpe",
2144 .help = "match VXLAN-GPE header",
2145 .priv = PRIV_ITEM(VXLAN_GPE,
2146 sizeof(struct rte_flow_item_vxlan_gpe)),
2147 .next = NEXT(item_vxlan_gpe),
2150 [ITEM_VXLAN_GPE_VNI] = {
2152 .help = "VXLAN-GPE identifier",
2153 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2154 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2157 [ITEM_ARP_ETH_IPV4] = {
2158 .name = "arp_eth_ipv4",
2159 .help = "match ARP header for Ethernet/IPv4",
2160 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2161 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2162 .next = NEXT(item_arp_eth_ipv4),
2165 [ITEM_ARP_ETH_IPV4_SHA] = {
2167 .help = "sender hardware address",
2168 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2170 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2173 [ITEM_ARP_ETH_IPV4_SPA] = {
2175 .help = "sender IPv4 address",
2176 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2178 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2181 [ITEM_ARP_ETH_IPV4_THA] = {
2183 .help = "target hardware address",
2184 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2186 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2189 [ITEM_ARP_ETH_IPV4_TPA] = {
2191 .help = "target IPv4 address",
2192 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2194 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2199 .help = "match presence of any IPv6 extension header",
2200 .priv = PRIV_ITEM(IPV6_EXT,
2201 sizeof(struct rte_flow_item_ipv6_ext)),
2202 .next = NEXT(item_ipv6_ext),
2205 [ITEM_IPV6_EXT_NEXT_HDR] = {
2207 .help = "next header",
2208 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2209 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2214 .help = "match any ICMPv6 header",
2215 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2216 .next = NEXT(item_icmp6),
2219 [ITEM_ICMP6_TYPE] = {
2221 .help = "ICMPv6 type",
2222 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2223 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2226 [ITEM_ICMP6_CODE] = {
2228 .help = "ICMPv6 code",
2229 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2230 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2233 [ITEM_ICMP6_ND_NS] = {
2234 .name = "icmp6_nd_ns",
2235 .help = "match ICMPv6 neighbor discovery solicitation",
2236 .priv = PRIV_ITEM(ICMP6_ND_NS,
2237 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2238 .next = NEXT(item_icmp6_nd_ns),
2241 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2242 .name = "target_addr",
2243 .help = "target address",
2244 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2246 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2249 [ITEM_ICMP6_ND_NA] = {
2250 .name = "icmp6_nd_na",
2251 .help = "match ICMPv6 neighbor discovery advertisement",
2252 .priv = PRIV_ITEM(ICMP6_ND_NA,
2253 sizeof(struct rte_flow_item_icmp6_nd_na)),
2254 .next = NEXT(item_icmp6_nd_na),
2257 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2258 .name = "target_addr",
2259 .help = "target address",
2260 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2262 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2265 [ITEM_ICMP6_ND_OPT] = {
2266 .name = "icmp6_nd_opt",
2267 .help = "match presence of any ICMPv6 neighbor discovery"
2269 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2270 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2271 .next = NEXT(item_icmp6_nd_opt),
2274 [ITEM_ICMP6_ND_OPT_TYPE] = {
2276 .help = "ND option type",
2277 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2279 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2282 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2283 .name = "icmp6_nd_opt_sla_eth",
2284 .help = "match ICMPv6 neighbor discovery source Ethernet"
2285 " link-layer address option",
2287 (ICMP6_ND_OPT_SLA_ETH,
2288 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2289 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2292 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2294 .help = "source Ethernet LLA",
2295 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2297 .args = ARGS(ARGS_ENTRY_HTON
2298 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2300 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2301 .name = "icmp6_nd_opt_tla_eth",
2302 .help = "match ICMPv6 neighbor discovery target Ethernet"
2303 " link-layer address option",
2305 (ICMP6_ND_OPT_TLA_ETH,
2306 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2307 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2310 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2312 .help = "target Ethernet LLA",
2313 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2315 .args = ARGS(ARGS_ENTRY_HTON
2316 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2320 .help = "match metadata header",
2321 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2322 .next = NEXT(item_meta),
2325 [ITEM_META_DATA] = {
2327 .help = "metadata value",
2328 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2329 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2330 data, "\xff\xff\xff\xff")),
2334 .help = "match GRE key",
2335 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2336 .next = NEXT(item_gre_key),
2339 [ITEM_GRE_KEY_VALUE] = {
2341 .help = "key value",
2342 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2343 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2347 .help = "match GTP extension header with type 0x85",
2348 .priv = PRIV_ITEM(GTP_PSC,
2349 sizeof(struct rte_flow_item_gtp_psc)),
2350 .next = NEXT(item_gtp_psc),
2353 [ITEM_GTP_PSC_QFI] = {
2355 .help = "QoS flow identifier",
2356 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2357 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2360 [ITEM_GTP_PSC_PDU_T] = {
2363 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2364 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2368 /* Validate/create actions. */
2371 .help = "submit a list of associated actions",
2372 .next = NEXT(next_action),
2377 .help = "specify next action",
2378 .next = NEXT(next_action),
2382 .help = "end list of actions",
2383 .priv = PRIV_ACTION(END, 0),
2388 .help = "no-op action",
2389 .priv = PRIV_ACTION(VOID, 0),
2390 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2393 [ACTION_PASSTHRU] = {
2395 .help = "let subsequent rule process matched packets",
2396 .priv = PRIV_ACTION(PASSTHRU, 0),
2397 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2402 .help = "redirect traffic to a given group",
2403 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2404 .next = NEXT(action_jump),
2407 [ACTION_JUMP_GROUP] = {
2409 .help = "group to redirect traffic to",
2410 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2411 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2412 .call = parse_vc_conf,
2416 .help = "attach 32 bit value to packets",
2417 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2418 .next = NEXT(action_mark),
2421 [ACTION_MARK_ID] = {
2423 .help = "32 bit value to return with packets",
2424 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2425 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2426 .call = parse_vc_conf,
2430 .help = "flag packets",
2431 .priv = PRIV_ACTION(FLAG, 0),
2432 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2437 .help = "assign packets to a given queue index",
2438 .priv = PRIV_ACTION(QUEUE,
2439 sizeof(struct rte_flow_action_queue)),
2440 .next = NEXT(action_queue),
2443 [ACTION_QUEUE_INDEX] = {
2445 .help = "queue index to use",
2446 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2447 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2448 .call = parse_vc_conf,
2452 .help = "drop packets (note: passthru has priority)",
2453 .priv = PRIV_ACTION(DROP, 0),
2454 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2459 .help = "enable counters for this rule",
2460 .priv = PRIV_ACTION(COUNT,
2461 sizeof(struct rte_flow_action_count)),
2462 .next = NEXT(action_count),
2465 [ACTION_COUNT_ID] = {
2466 .name = "identifier",
2467 .help = "counter identifier to use",
2468 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2469 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2470 .call = parse_vc_conf,
2472 [ACTION_COUNT_SHARED] = {
2474 .help = "shared counter",
2475 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2476 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2478 .call = parse_vc_conf,
2482 .help = "spread packets among several queues",
2483 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2484 .next = NEXT(action_rss),
2485 .call = parse_vc_action_rss,
2487 [ACTION_RSS_FUNC] = {
2489 .help = "RSS hash function to apply",
2490 .next = NEXT(action_rss,
2491 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2492 ACTION_RSS_FUNC_TOEPLITZ,
2493 ACTION_RSS_FUNC_SIMPLE_XOR)),
2495 [ACTION_RSS_FUNC_DEFAULT] = {
2497 .help = "default hash function",
2498 .call = parse_vc_action_rss_func,
2500 [ACTION_RSS_FUNC_TOEPLITZ] = {
2502 .help = "Toeplitz hash function",
2503 .call = parse_vc_action_rss_func,
2505 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2506 .name = "simple_xor",
2507 .help = "simple XOR hash function",
2508 .call = parse_vc_action_rss_func,
2510 [ACTION_RSS_LEVEL] = {
2512 .help = "encapsulation level for \"types\"",
2513 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2514 .args = ARGS(ARGS_ENTRY_ARB
2515 (offsetof(struct action_rss_data, conf) +
2516 offsetof(struct rte_flow_action_rss, level),
2517 sizeof(((struct rte_flow_action_rss *)0)->
2520 [ACTION_RSS_TYPES] = {
2522 .help = "specific RSS hash types",
2523 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2525 [ACTION_RSS_TYPE] = {
2527 .help = "RSS hash type",
2528 .call = parse_vc_action_rss_type,
2529 .comp = comp_vc_action_rss_type,
2531 [ACTION_RSS_KEY] = {
2533 .help = "RSS hash key",
2534 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2535 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2537 (offsetof(struct action_rss_data, conf) +
2538 offsetof(struct rte_flow_action_rss, key_len),
2539 sizeof(((struct rte_flow_action_rss *)0)->
2541 ARGS_ENTRY(struct action_rss_data, key)),
2543 [ACTION_RSS_KEY_LEN] = {
2545 .help = "RSS hash key length in bytes",
2546 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2547 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2548 (offsetof(struct action_rss_data, conf) +
2549 offsetof(struct rte_flow_action_rss, key_len),
2550 sizeof(((struct rte_flow_action_rss *)0)->
2553 RSS_HASH_KEY_LENGTH)),
2555 [ACTION_RSS_QUEUES] = {
2557 .help = "queue indices to use",
2558 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2559 .call = parse_vc_conf,
2561 [ACTION_RSS_QUEUE] = {
2563 .help = "queue index",
2564 .call = parse_vc_action_rss_queue,
2565 .comp = comp_vc_action_rss_queue,
2569 .help = "direct traffic to physical function",
2570 .priv = PRIV_ACTION(PF, 0),
2571 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2576 .help = "direct traffic to a virtual function ID",
2577 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2578 .next = NEXT(action_vf),
2581 [ACTION_VF_ORIGINAL] = {
2583 .help = "use original VF ID if possible",
2584 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2585 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2587 .call = parse_vc_conf,
2592 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2593 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2594 .call = parse_vc_conf,
2596 [ACTION_PHY_PORT] = {
2598 .help = "direct packets to physical port index",
2599 .priv = PRIV_ACTION(PHY_PORT,
2600 sizeof(struct rte_flow_action_phy_port)),
2601 .next = NEXT(action_phy_port),
2604 [ACTION_PHY_PORT_ORIGINAL] = {
2606 .help = "use original port index if possible",
2607 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2608 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2610 .call = parse_vc_conf,
2612 [ACTION_PHY_PORT_INDEX] = {
2614 .help = "physical port index",
2615 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2616 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2618 .call = parse_vc_conf,
2620 [ACTION_PORT_ID] = {
2622 .help = "direct matching traffic to a given DPDK port ID",
2623 .priv = PRIV_ACTION(PORT_ID,
2624 sizeof(struct rte_flow_action_port_id)),
2625 .next = NEXT(action_port_id),
2628 [ACTION_PORT_ID_ORIGINAL] = {
2630 .help = "use original DPDK port ID if possible",
2631 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2632 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2634 .call = parse_vc_conf,
2636 [ACTION_PORT_ID_ID] = {
2638 .help = "DPDK port ID",
2639 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2640 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2641 .call = parse_vc_conf,
2645 .help = "meter the directed packets at given id",
2646 .priv = PRIV_ACTION(METER,
2647 sizeof(struct rte_flow_action_meter)),
2648 .next = NEXT(action_meter),
2651 [ACTION_METER_ID] = {
2653 .help = "meter id to use",
2654 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2655 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2656 .call = parse_vc_conf,
2658 [ACTION_OF_SET_MPLS_TTL] = {
2659 .name = "of_set_mpls_ttl",
2660 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2663 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2664 .next = NEXT(action_of_set_mpls_ttl),
2667 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2670 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2671 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2673 .call = parse_vc_conf,
2675 [ACTION_OF_DEC_MPLS_TTL] = {
2676 .name = "of_dec_mpls_ttl",
2677 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2678 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2679 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2682 [ACTION_OF_SET_NW_TTL] = {
2683 .name = "of_set_nw_ttl",
2684 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2687 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2688 .next = NEXT(action_of_set_nw_ttl),
2691 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2694 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2695 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2697 .call = parse_vc_conf,
2699 [ACTION_OF_DEC_NW_TTL] = {
2700 .name = "of_dec_nw_ttl",
2701 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2702 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2703 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2706 [ACTION_OF_COPY_TTL_OUT] = {
2707 .name = "of_copy_ttl_out",
2708 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2709 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2710 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2713 [ACTION_OF_COPY_TTL_IN] = {
2714 .name = "of_copy_ttl_in",
2715 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2716 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2717 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2720 [ACTION_OF_POP_VLAN] = {
2721 .name = "of_pop_vlan",
2722 .help = "OpenFlow's OFPAT_POP_VLAN",
2723 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2724 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2727 [ACTION_OF_PUSH_VLAN] = {
2728 .name = "of_push_vlan",
2729 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2732 sizeof(struct rte_flow_action_of_push_vlan)),
2733 .next = NEXT(action_of_push_vlan),
2736 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2737 .name = "ethertype",
2738 .help = "EtherType",
2739 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2740 .args = ARGS(ARGS_ENTRY_HTON
2741 (struct rte_flow_action_of_push_vlan,
2743 .call = parse_vc_conf,
2745 [ACTION_OF_SET_VLAN_VID] = {
2746 .name = "of_set_vlan_vid",
2747 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2750 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2751 .next = NEXT(action_of_set_vlan_vid),
2754 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2757 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2758 .args = ARGS(ARGS_ENTRY_HTON
2759 (struct rte_flow_action_of_set_vlan_vid,
2761 .call = parse_vc_conf,
2763 [ACTION_OF_SET_VLAN_PCP] = {
2764 .name = "of_set_vlan_pcp",
2765 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2768 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2769 .next = NEXT(action_of_set_vlan_pcp),
2772 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2774 .help = "VLAN priority",
2775 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2776 .args = ARGS(ARGS_ENTRY_HTON
2777 (struct rte_flow_action_of_set_vlan_pcp,
2779 .call = parse_vc_conf,
2781 [ACTION_OF_POP_MPLS] = {
2782 .name = "of_pop_mpls",
2783 .help = "OpenFlow's OFPAT_POP_MPLS",
2784 .priv = PRIV_ACTION(OF_POP_MPLS,
2785 sizeof(struct rte_flow_action_of_pop_mpls)),
2786 .next = NEXT(action_of_pop_mpls),
2789 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2790 .name = "ethertype",
2791 .help = "EtherType",
2792 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2793 .args = ARGS(ARGS_ENTRY_HTON
2794 (struct rte_flow_action_of_pop_mpls,
2796 .call = parse_vc_conf,
2798 [ACTION_OF_PUSH_MPLS] = {
2799 .name = "of_push_mpls",
2800 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2803 sizeof(struct rte_flow_action_of_push_mpls)),
2804 .next = NEXT(action_of_push_mpls),
2807 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2808 .name = "ethertype",
2809 .help = "EtherType",
2810 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2811 .args = ARGS(ARGS_ENTRY_HTON
2812 (struct rte_flow_action_of_push_mpls,
2814 .call = parse_vc_conf,
2816 [ACTION_VXLAN_ENCAP] = {
2817 .name = "vxlan_encap",
2818 .help = "VXLAN encapsulation, uses configuration set by \"set"
2820 .priv = PRIV_ACTION(VXLAN_ENCAP,
2821 sizeof(struct action_vxlan_encap_data)),
2822 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2823 .call = parse_vc_action_vxlan_encap,
2825 [ACTION_VXLAN_DECAP] = {
2826 .name = "vxlan_decap",
2827 .help = "Performs a decapsulation action by stripping all"
2828 " headers of the VXLAN tunnel network overlay from the"
2830 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2831 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2834 [ACTION_NVGRE_ENCAP] = {
2835 .name = "nvgre_encap",
2836 .help = "NVGRE encapsulation, uses configuration set by \"set"
2838 .priv = PRIV_ACTION(NVGRE_ENCAP,
2839 sizeof(struct action_nvgre_encap_data)),
2840 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2841 .call = parse_vc_action_nvgre_encap,
2843 [ACTION_NVGRE_DECAP] = {
2844 .name = "nvgre_decap",
2845 .help = "Performs a decapsulation action by stripping all"
2846 " headers of the NVGRE tunnel network overlay from the"
2848 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2849 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2852 [ACTION_L2_ENCAP] = {
2854 .help = "l2 encap, uses configuration set by"
2855 " \"set l2_encap\"",
2856 .priv = PRIV_ACTION(RAW_ENCAP,
2857 sizeof(struct action_raw_encap_data)),
2858 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2859 .call = parse_vc_action_l2_encap,
2861 [ACTION_L2_DECAP] = {
2863 .help = "l2 decap, uses configuration set by"
2864 " \"set l2_decap\"",
2865 .priv = PRIV_ACTION(RAW_DECAP,
2866 sizeof(struct action_raw_decap_data)),
2867 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2868 .call = parse_vc_action_l2_decap,
2870 [ACTION_MPLSOGRE_ENCAP] = {
2871 .name = "mplsogre_encap",
2872 .help = "mplsogre encapsulation, uses configuration set by"
2873 " \"set mplsogre_encap\"",
2874 .priv = PRIV_ACTION(RAW_ENCAP,
2875 sizeof(struct action_raw_encap_data)),
2876 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2877 .call = parse_vc_action_mplsogre_encap,
2879 [ACTION_MPLSOGRE_DECAP] = {
2880 .name = "mplsogre_decap",
2881 .help = "mplsogre decapsulation, uses configuration set by"
2882 " \"set mplsogre_decap\"",
2883 .priv = PRIV_ACTION(RAW_DECAP,
2884 sizeof(struct action_raw_decap_data)),
2885 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2886 .call = parse_vc_action_mplsogre_decap,
2888 [ACTION_MPLSOUDP_ENCAP] = {
2889 .name = "mplsoudp_encap",
2890 .help = "mplsoudp encapsulation, uses configuration set by"
2891 " \"set mplsoudp_encap\"",
2892 .priv = PRIV_ACTION(RAW_ENCAP,
2893 sizeof(struct action_raw_encap_data)),
2894 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2895 .call = parse_vc_action_mplsoudp_encap,
2897 [ACTION_MPLSOUDP_DECAP] = {
2898 .name = "mplsoudp_decap",
2899 .help = "mplsoudp decapsulation, uses configuration set by"
2900 " \"set mplsoudp_decap\"",
2901 .priv = PRIV_ACTION(RAW_DECAP,
2902 sizeof(struct action_raw_decap_data)),
2903 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2904 .call = parse_vc_action_mplsoudp_decap,
2906 [ACTION_SET_IPV4_SRC] = {
2907 .name = "set_ipv4_src",
2908 .help = "Set a new IPv4 source address in the outermost"
2910 .priv = PRIV_ACTION(SET_IPV4_SRC,
2911 sizeof(struct rte_flow_action_set_ipv4)),
2912 .next = NEXT(action_set_ipv4_src),
2915 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2916 .name = "ipv4_addr",
2917 .help = "new IPv4 source address to set",
2918 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2919 .args = ARGS(ARGS_ENTRY_HTON
2920 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2921 .call = parse_vc_conf,
2923 [ACTION_SET_IPV4_DST] = {
2924 .name = "set_ipv4_dst",
2925 .help = "Set a new IPv4 destination address in the outermost"
2927 .priv = PRIV_ACTION(SET_IPV4_DST,
2928 sizeof(struct rte_flow_action_set_ipv4)),
2929 .next = NEXT(action_set_ipv4_dst),
2932 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2933 .name = "ipv4_addr",
2934 .help = "new IPv4 destination address to set",
2935 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2936 .args = ARGS(ARGS_ENTRY_HTON
2937 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2938 .call = parse_vc_conf,
2940 [ACTION_SET_IPV6_SRC] = {
2941 .name = "set_ipv6_src",
2942 .help = "Set a new IPv6 source address in the outermost"
2944 .priv = PRIV_ACTION(SET_IPV6_SRC,
2945 sizeof(struct rte_flow_action_set_ipv6)),
2946 .next = NEXT(action_set_ipv6_src),
2949 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2950 .name = "ipv6_addr",
2951 .help = "new IPv6 source address to set",
2952 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2953 .args = ARGS(ARGS_ENTRY_HTON
2954 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2955 .call = parse_vc_conf,
2957 [ACTION_SET_IPV6_DST] = {
2958 .name = "set_ipv6_dst",
2959 .help = "Set a new IPv6 destination address in the outermost"
2961 .priv = PRIV_ACTION(SET_IPV6_DST,
2962 sizeof(struct rte_flow_action_set_ipv6)),
2963 .next = NEXT(action_set_ipv6_dst),
2966 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2967 .name = "ipv6_addr",
2968 .help = "new IPv6 destination address to set",
2969 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2970 .args = ARGS(ARGS_ENTRY_HTON
2971 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2972 .call = parse_vc_conf,
2974 [ACTION_SET_TP_SRC] = {
2975 .name = "set_tp_src",
2976 .help = "set a new source port number in the outermost"
2978 .priv = PRIV_ACTION(SET_TP_SRC,
2979 sizeof(struct rte_flow_action_set_tp)),
2980 .next = NEXT(action_set_tp_src),
2983 [ACTION_SET_TP_SRC_TP_SRC] = {
2985 .help = "new source port number to set",
2986 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2987 .args = ARGS(ARGS_ENTRY_HTON
2988 (struct rte_flow_action_set_tp, port)),
2989 .call = parse_vc_conf,
2991 [ACTION_SET_TP_DST] = {
2992 .name = "set_tp_dst",
2993 .help = "set a new destination port number in the outermost"
2995 .priv = PRIV_ACTION(SET_TP_DST,
2996 sizeof(struct rte_flow_action_set_tp)),
2997 .next = NEXT(action_set_tp_dst),
3000 [ACTION_SET_TP_DST_TP_DST] = {
3002 .help = "new destination port number to set",
3003 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3004 .args = ARGS(ARGS_ENTRY_HTON
3005 (struct rte_flow_action_set_tp, port)),
3006 .call = parse_vc_conf,
3008 [ACTION_MAC_SWAP] = {
3010 .help = "Swap the source and destination MAC addresses"
3011 " in the outermost Ethernet header",
3012 .priv = PRIV_ACTION(MAC_SWAP, 0),
3013 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3016 [ACTION_DEC_TTL] = {
3018 .help = "decrease network TTL if available",
3019 .priv = PRIV_ACTION(DEC_TTL, 0),
3020 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3023 [ACTION_SET_TTL] = {
3025 .help = "set ttl value",
3026 .priv = PRIV_ACTION(SET_TTL,
3027 sizeof(struct rte_flow_action_set_ttl)),
3028 .next = NEXT(action_set_ttl),
3031 [ACTION_SET_TTL_TTL] = {
3032 .name = "ttl_value",
3033 .help = "new ttl value to set",
3034 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3035 .args = ARGS(ARGS_ENTRY_HTON
3036 (struct rte_flow_action_set_ttl, ttl_value)),
3037 .call = parse_vc_conf,
3039 [ACTION_SET_MAC_SRC] = {
3040 .name = "set_mac_src",
3041 .help = "set source mac address",
3042 .priv = PRIV_ACTION(SET_MAC_SRC,
3043 sizeof(struct rte_flow_action_set_mac)),
3044 .next = NEXT(action_set_mac_src),
3047 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3049 .help = "new source mac address",
3050 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3051 .args = ARGS(ARGS_ENTRY_HTON
3052 (struct rte_flow_action_set_mac, mac_addr)),
3053 .call = parse_vc_conf,
3055 [ACTION_SET_MAC_DST] = {
3056 .name = "set_mac_dst",
3057 .help = "set destination mac address",
3058 .priv = PRIV_ACTION(SET_MAC_DST,
3059 sizeof(struct rte_flow_action_set_mac)),
3060 .next = NEXT(action_set_mac_dst),
3063 [ACTION_SET_MAC_DST_MAC_DST] = {
3065 .help = "new destination mac address to set",
3066 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3067 .args = ARGS(ARGS_ENTRY_HTON
3068 (struct rte_flow_action_set_mac, mac_addr)),
3069 .call = parse_vc_conf,
3071 [ACTION_INC_TCP_SEQ] = {
3072 .name = "inc_tcp_seq",
3073 .help = "increase TCP sequence number",
3074 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3075 .next = NEXT(action_inc_tcp_seq),
3078 [ACTION_INC_TCP_SEQ_VALUE] = {
3080 .help = "the value to increase TCP sequence number by",
3081 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3082 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3083 .call = parse_vc_conf,
3085 [ACTION_DEC_TCP_SEQ] = {
3086 .name = "dec_tcp_seq",
3087 .help = "decrease TCP sequence number",
3088 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3089 .next = NEXT(action_dec_tcp_seq),
3092 [ACTION_DEC_TCP_SEQ_VALUE] = {
3094 .help = "the value to decrease TCP sequence number by",
3095 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3096 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3097 .call = parse_vc_conf,
3099 [ACTION_INC_TCP_ACK] = {
3100 .name = "inc_tcp_ack",
3101 .help = "increase TCP acknowledgment number",
3102 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3103 .next = NEXT(action_inc_tcp_ack),
3106 [ACTION_INC_TCP_ACK_VALUE] = {
3108 .help = "the value to increase TCP acknowledgment number by",
3109 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3110 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3111 .call = parse_vc_conf,
3113 [ACTION_DEC_TCP_ACK] = {
3114 .name = "dec_tcp_ack",
3115 .help = "decrease TCP acknowledgment number",
3116 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3117 .next = NEXT(action_dec_tcp_ack),
3120 [ACTION_DEC_TCP_ACK_VALUE] = {
3122 .help = "the value to decrease TCP acknowledgment number by",
3123 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3124 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3125 .call = parse_vc_conf,
3127 [ACTION_RAW_ENCAP] = {
3128 .name = "raw_encap",
3129 .help = "encapsulation data, defined by set raw_encap",
3130 .priv = PRIV_ACTION(RAW_ENCAP,
3131 sizeof(struct rte_flow_action_raw_encap)),
3132 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3133 .call = parse_vc_action_raw_encap,
3135 [ACTION_RAW_DECAP] = {
3136 .name = "raw_decap",
3137 .help = "decapsulation data, defined by set raw_encap",
3138 .priv = PRIV_ACTION(RAW_DECAP,
3139 sizeof(struct rte_flow_action_raw_decap)),
3140 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3141 .call = parse_vc_action_raw_decap,
3143 /* Top level command. */
3146 .help = "set raw encap/decap data",
3147 .type = "set raw_encap|raw_decap <pattern>",
3148 .next = NEXT(NEXT_ENTRY
3151 .call = parse_set_init,
3153 /* Sub-level commands. */
3155 .name = "raw_encap",
3156 .help = "set raw encap data",
3157 .next = NEXT(next_item),
3158 .call = parse_set_raw_encap_decap,
3161 .name = "raw_decap",
3162 .help = "set raw decap data",
3163 .next = NEXT(next_item),
3164 .call = parse_set_raw_encap_decap,
3168 /** Remove and return last entry from argument stack. */
3169 static const struct arg *
3170 pop_args(struct context *ctx)
3172 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3175 /** Add entry on top of the argument stack. */
3177 push_args(struct context *ctx, const struct arg *arg)
3179 if (ctx->args_num == CTX_STACK_SIZE)
3181 ctx->args[ctx->args_num++] = arg;
3185 /** Spread value into buffer according to bit-mask. */
3187 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3189 uint32_t i = arg->size;
3197 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3206 unsigned int shift = 0;
3207 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3209 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3210 if (!(arg->mask[i] & (1 << shift)))
3215 *buf &= ~(1 << shift);
3216 *buf |= (val & 1) << shift;
3224 /** Compare a string with a partial one of a given length. */
3226 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3228 int r = strncmp(full, partial, partial_len);
3232 if (strlen(full) <= partial_len)
3234 return full[partial_len];
3238 * Parse a prefix length and generate a bit-mask.
3240 * Last argument (ctx->args) is retrieved to determine mask size, storage
3241 * location and whether the result must use network byte ordering.
3244 parse_prefix(struct context *ctx, const struct token *token,
3245 const char *str, unsigned int len,
3246 void *buf, unsigned int size)
3248 const struct arg *arg = pop_args(ctx);
3249 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3256 /* Argument is expected. */
3260 u = strtoumax(str, &end, 0);
3261 if (errno || (size_t)(end - str) != len)
3266 extra = arg_entry_bf_fill(NULL, 0, arg);
3275 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3276 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3283 if (bytes > size || bytes + !!extra > size)
3287 buf = (uint8_t *)ctx->object + arg->offset;
3288 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3290 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3291 memset(buf, 0x00, size - bytes);
3293 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3297 memset(buf, 0xff, bytes);
3298 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3300 ((uint8_t *)buf)[bytes] = conv[extra];
3303 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3306 push_args(ctx, arg);
3310 /** Default parsing function for token name matching. */
3312 parse_default(struct context *ctx, const struct token *token,
3313 const char *str, unsigned int len,
3314 void *buf, unsigned int size)
3319 if (strcmp_partial(token->name, str, len))
3324 /** Parse flow command, initialize output buffer for subsequent tokens. */
3326 parse_init(struct context *ctx, const struct token *token,
3327 const char *str, unsigned int len,
3328 void *buf, unsigned int size)
3330 struct buffer *out = buf;
3332 /* Token name must match. */
3333 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3335 /* Nothing else to do if there is no buffer. */
3338 /* Make sure buffer is large enough. */
3339 if (size < sizeof(*out))
3341 /* Initialize buffer. */
3342 memset(out, 0x00, sizeof(*out));
3343 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3346 ctx->objmask = NULL;
3350 /** Parse tokens for validate/create commands. */
3352 parse_vc(struct context *ctx, const struct token *token,
3353 const char *str, unsigned int len,
3354 void *buf, unsigned int size)
3356 struct buffer *out = buf;
3360 /* Token name must match. */
3361 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3363 /* Nothing else to do if there is no buffer. */
3366 if (!out->command) {
3367 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3369 if (sizeof(*out) > size)
3371 out->command = ctx->curr;
3374 ctx->objmask = NULL;
3375 out->args.vc.data = (uint8_t *)out + size;
3379 ctx->object = &out->args.vc.attr;
3380 ctx->objmask = NULL;
3381 switch (ctx->curr) {
3386 out->args.vc.attr.ingress = 1;
3389 out->args.vc.attr.egress = 1;
3392 out->args.vc.attr.transfer = 1;
3395 out->args.vc.pattern =
3396 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3398 ctx->object = out->args.vc.pattern;
3399 ctx->objmask = NULL;
3402 out->args.vc.actions =
3403 (void *)RTE_ALIGN_CEIL((uintptr_t)
3404 (out->args.vc.pattern +
3405 out->args.vc.pattern_n),
3407 ctx->object = out->args.vc.actions;
3408 ctx->objmask = NULL;
3415 if (!out->args.vc.actions) {
3416 const struct parse_item_priv *priv = token->priv;
3417 struct rte_flow_item *item =
3418 out->args.vc.pattern + out->args.vc.pattern_n;
3420 data_size = priv->size * 3; /* spec, last, mask */
3421 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3422 (out->args.vc.data - data_size),
3424 if ((uint8_t *)item + sizeof(*item) > data)
3426 *item = (struct rte_flow_item){
3429 ++out->args.vc.pattern_n;
3431 ctx->objmask = NULL;
3433 const struct parse_action_priv *priv = token->priv;
3434 struct rte_flow_action *action =
3435 out->args.vc.actions + out->args.vc.actions_n;
3437 data_size = priv->size; /* configuration */
3438 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3439 (out->args.vc.data - data_size),
3441 if ((uint8_t *)action + sizeof(*action) > data)
3443 *action = (struct rte_flow_action){
3445 .conf = data_size ? data : NULL,
3447 ++out->args.vc.actions_n;
3448 ctx->object = action;
3449 ctx->objmask = NULL;
3451 memset(data, 0, data_size);
3452 out->args.vc.data = data;
3453 ctx->objdata = data_size;
3457 /** Parse pattern item parameter type. */
3459 parse_vc_spec(struct context *ctx, const struct token *token,
3460 const char *str, unsigned int len,
3461 void *buf, unsigned int size)
3463 struct buffer *out = buf;
3464 struct rte_flow_item *item;
3470 /* Token name must match. */
3471 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3473 /* Parse parameter types. */
3474 switch (ctx->curr) {
3475 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3481 case ITEM_PARAM_SPEC:
3484 case ITEM_PARAM_LAST:
3487 case ITEM_PARAM_PREFIX:
3488 /* Modify next token to expect a prefix. */
3489 if (ctx->next_num < 2)
3491 ctx->next[ctx->next_num - 2] = prefix;
3493 case ITEM_PARAM_MASK:
3499 /* Nothing else to do if there is no buffer. */
3502 if (!out->args.vc.pattern_n)
3504 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3505 data_size = ctx->objdata / 3; /* spec, last, mask */
3506 /* Point to selected object. */
3507 ctx->object = out->args.vc.data + (data_size * index);
3509 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3510 item->mask = ctx->objmask;
3512 ctx->objmask = NULL;
3513 /* Update relevant item pointer. */
3514 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3519 /** Parse action configuration field. */
3521 parse_vc_conf(struct context *ctx, const struct token *token,
3522 const char *str, unsigned int len,
3523 void *buf, unsigned int size)
3525 struct buffer *out = buf;
3528 /* Token name must match. */
3529 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3531 /* Nothing else to do if there is no buffer. */
3534 /* Point to selected object. */
3535 ctx->object = out->args.vc.data;
3536 ctx->objmask = NULL;
3540 /** Parse RSS action. */
3542 parse_vc_action_rss(struct context *ctx, const struct token *token,
3543 const char *str, unsigned int len,
3544 void *buf, unsigned int size)
3546 struct buffer *out = buf;
3547 struct rte_flow_action *action;
3548 struct action_rss_data *action_rss_data;
3552 ret = parse_vc(ctx, token, str, len, buf, size);
3555 /* Nothing else to do if there is no buffer. */
3558 if (!out->args.vc.actions_n)
3560 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3561 /* Point to selected object. */
3562 ctx->object = out->args.vc.data;
3563 ctx->objmask = NULL;
3564 /* Set up default configuration. */
3565 action_rss_data = ctx->object;
3566 *action_rss_data = (struct action_rss_data){
3567 .conf = (struct rte_flow_action_rss){
3568 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3571 .key_len = sizeof(action_rss_data->key),
3572 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3573 .key = action_rss_data->key,
3574 .queue = action_rss_data->queue,
3576 .key = "testpmd's default RSS hash key, "
3577 "override it for better balancing",
3580 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3581 action_rss_data->queue[i] = i;
3582 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3583 ctx->port != (portid_t)RTE_PORT_ALL) {
3584 struct rte_eth_dev_info info;
3587 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3591 action_rss_data->conf.key_len =
3592 RTE_MIN(sizeof(action_rss_data->key),
3593 info.hash_key_size);
3595 action->conf = &action_rss_data->conf;
3600 * Parse func field for RSS action.
3602 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3603 * ACTION_RSS_FUNC_* index that called this function.
3606 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3607 const char *str, unsigned int len,
3608 void *buf, unsigned int size)
3610 struct action_rss_data *action_rss_data;
3611 enum rte_eth_hash_function func;
3615 /* Token name must match. */
3616 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3618 switch (ctx->curr) {
3619 case ACTION_RSS_FUNC_DEFAULT:
3620 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3622 case ACTION_RSS_FUNC_TOEPLITZ:
3623 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3625 case ACTION_RSS_FUNC_SIMPLE_XOR:
3626 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3633 action_rss_data = ctx->object;
3634 action_rss_data->conf.func = func;
3639 * Parse type field for RSS action.
3641 * Valid tokens are type field names and the "end" token.
3644 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3645 const char *str, unsigned int len,
3646 void *buf, unsigned int size)
3648 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3649 struct action_rss_data *action_rss_data;
3655 if (ctx->curr != ACTION_RSS_TYPE)
3657 if (!(ctx->objdata >> 16) && ctx->object) {
3658 action_rss_data = ctx->object;
3659 action_rss_data->conf.types = 0;
3661 if (!strcmp_partial("end", str, len)) {
3662 ctx->objdata &= 0xffff;
3665 for (i = 0; rss_type_table[i].str; ++i)
3666 if (!strcmp_partial(rss_type_table[i].str, str, len))
3668 if (!rss_type_table[i].str)
3670 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3672 if (ctx->next_num == RTE_DIM(ctx->next))
3674 ctx->next[ctx->next_num++] = next;
3677 action_rss_data = ctx->object;
3678 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3683 * Parse queue field for RSS action.
3685 * Valid tokens are queue indices and the "end" token.
3688 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3689 const char *str, unsigned int len,
3690 void *buf, unsigned int size)
3692 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3693 struct action_rss_data *action_rss_data;
3694 const struct arg *arg;
3701 if (ctx->curr != ACTION_RSS_QUEUE)
3703 i = ctx->objdata >> 16;
3704 if (!strcmp_partial("end", str, len)) {
3705 ctx->objdata &= 0xffff;
3708 if (i >= ACTION_RSS_QUEUE_NUM)
3710 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3711 i * sizeof(action_rss_data->queue[i]),
3712 sizeof(action_rss_data->queue[i]));
3713 if (push_args(ctx, arg))
3715 ret = parse_int(ctx, token, str, len, NULL, 0);
3721 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3723 if (ctx->next_num == RTE_DIM(ctx->next))
3725 ctx->next[ctx->next_num++] = next;
3729 action_rss_data = ctx->object;
3730 action_rss_data->conf.queue_num = i;
3731 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3735 /** Parse VXLAN encap action. */
3737 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3738 const char *str, unsigned int len,
3739 void *buf, unsigned int size)
3741 struct buffer *out = buf;
3742 struct rte_flow_action *action;
3743 struct action_vxlan_encap_data *action_vxlan_encap_data;
3746 ret = parse_vc(ctx, token, str, len, buf, size);
3749 /* Nothing else to do if there is no buffer. */
3752 if (!out->args.vc.actions_n)
3754 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3755 /* Point to selected object. */
3756 ctx->object = out->args.vc.data;
3757 ctx->objmask = NULL;
3758 /* Set up default configuration. */
3759 action_vxlan_encap_data = ctx->object;
3760 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3761 .conf = (struct rte_flow_action_vxlan_encap){
3762 .definition = action_vxlan_encap_data->items,
3766 .type = RTE_FLOW_ITEM_TYPE_ETH,
3767 .spec = &action_vxlan_encap_data->item_eth,
3768 .mask = &rte_flow_item_eth_mask,
3771 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3772 .spec = &action_vxlan_encap_data->item_vlan,
3773 .mask = &rte_flow_item_vlan_mask,
3776 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3777 .spec = &action_vxlan_encap_data->item_ipv4,
3778 .mask = &rte_flow_item_ipv4_mask,
3781 .type = RTE_FLOW_ITEM_TYPE_UDP,
3782 .spec = &action_vxlan_encap_data->item_udp,
3783 .mask = &rte_flow_item_udp_mask,
3786 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3787 .spec = &action_vxlan_encap_data->item_vxlan,
3788 .mask = &rte_flow_item_vxlan_mask,
3791 .type = RTE_FLOW_ITEM_TYPE_END,
3796 .tci = vxlan_encap_conf.vlan_tci,
3800 .src_addr = vxlan_encap_conf.ipv4_src,
3801 .dst_addr = vxlan_encap_conf.ipv4_dst,
3804 .src_port = vxlan_encap_conf.udp_src,
3805 .dst_port = vxlan_encap_conf.udp_dst,
3807 .item_vxlan.flags = 0,
3809 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3810 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3811 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3812 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3813 if (!vxlan_encap_conf.select_ipv4) {
3814 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3815 &vxlan_encap_conf.ipv6_src,
3816 sizeof(vxlan_encap_conf.ipv6_src));
3817 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3818 &vxlan_encap_conf.ipv6_dst,
3819 sizeof(vxlan_encap_conf.ipv6_dst));
3820 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3821 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3822 .spec = &action_vxlan_encap_data->item_ipv6,
3823 .mask = &rte_flow_item_ipv6_mask,
3826 if (!vxlan_encap_conf.select_vlan)
3827 action_vxlan_encap_data->items[1].type =
3828 RTE_FLOW_ITEM_TYPE_VOID;
3829 if (vxlan_encap_conf.select_tos_ttl) {
3830 if (vxlan_encap_conf.select_ipv4) {
3831 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3833 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3834 sizeof(ipv4_mask_tos));
3835 ipv4_mask_tos.hdr.type_of_service = 0xff;
3836 ipv4_mask_tos.hdr.time_to_live = 0xff;
3837 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3838 vxlan_encap_conf.ip_tos;
3839 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3840 vxlan_encap_conf.ip_ttl;
3841 action_vxlan_encap_data->items[2].mask =
3844 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3846 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3847 sizeof(ipv6_mask_tos));
3848 ipv6_mask_tos.hdr.vtc_flow |=
3849 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3850 ipv6_mask_tos.hdr.hop_limits = 0xff;
3851 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3853 ((uint32_t)vxlan_encap_conf.ip_tos <<
3854 RTE_IPV6_HDR_TC_SHIFT);
3855 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3856 vxlan_encap_conf.ip_ttl;
3857 action_vxlan_encap_data->items[2].mask =
3861 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3862 RTE_DIM(vxlan_encap_conf.vni));
3863 action->conf = &action_vxlan_encap_data->conf;
3867 /** Parse NVGRE encap action. */
3869 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3870 const char *str, unsigned int len,
3871 void *buf, unsigned int size)
3873 struct buffer *out = buf;
3874 struct rte_flow_action *action;
3875 struct action_nvgre_encap_data *action_nvgre_encap_data;
3878 ret = parse_vc(ctx, token, str, len, buf, size);
3881 /* Nothing else to do if there is no buffer. */
3884 if (!out->args.vc.actions_n)
3886 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3887 /* Point to selected object. */
3888 ctx->object = out->args.vc.data;
3889 ctx->objmask = NULL;
3890 /* Set up default configuration. */
3891 action_nvgre_encap_data = ctx->object;
3892 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3893 .conf = (struct rte_flow_action_nvgre_encap){
3894 .definition = action_nvgre_encap_data->items,
3898 .type = RTE_FLOW_ITEM_TYPE_ETH,
3899 .spec = &action_nvgre_encap_data->item_eth,
3900 .mask = &rte_flow_item_eth_mask,
3903 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3904 .spec = &action_nvgre_encap_data->item_vlan,
3905 .mask = &rte_flow_item_vlan_mask,
3908 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3909 .spec = &action_nvgre_encap_data->item_ipv4,
3910 .mask = &rte_flow_item_ipv4_mask,
3913 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3914 .spec = &action_nvgre_encap_data->item_nvgre,
3915 .mask = &rte_flow_item_nvgre_mask,
3918 .type = RTE_FLOW_ITEM_TYPE_END,
3923 .tci = nvgre_encap_conf.vlan_tci,
3927 .src_addr = nvgre_encap_conf.ipv4_src,
3928 .dst_addr = nvgre_encap_conf.ipv4_dst,
3930 .item_nvgre.flow_id = 0,
3932 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3933 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3934 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3935 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3936 if (!nvgre_encap_conf.select_ipv4) {
3937 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3938 &nvgre_encap_conf.ipv6_src,
3939 sizeof(nvgre_encap_conf.ipv6_src));
3940 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3941 &nvgre_encap_conf.ipv6_dst,
3942 sizeof(nvgre_encap_conf.ipv6_dst));
3943 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3944 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3945 .spec = &action_nvgre_encap_data->item_ipv6,
3946 .mask = &rte_flow_item_ipv6_mask,
3949 if (!nvgre_encap_conf.select_vlan)
3950 action_nvgre_encap_data->items[1].type =
3951 RTE_FLOW_ITEM_TYPE_VOID;
3952 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3953 RTE_DIM(nvgre_encap_conf.tni));
3954 action->conf = &action_nvgre_encap_data->conf;
3958 /** Parse l2 encap action. */
3960 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3961 const char *str, unsigned int len,
3962 void *buf, unsigned int size)
3964 struct buffer *out = buf;
3965 struct rte_flow_action *action;
3966 struct action_raw_encap_data *action_encap_data;
3967 struct rte_flow_item_eth eth = { .type = 0, };
3968 struct rte_flow_item_vlan vlan = {
3969 .tci = mplsoudp_encap_conf.vlan_tci,
3975 ret = parse_vc(ctx, token, str, len, buf, size);
3978 /* Nothing else to do if there is no buffer. */
3981 if (!out->args.vc.actions_n)
3983 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3984 /* Point to selected object. */
3985 ctx->object = out->args.vc.data;
3986 ctx->objmask = NULL;
3987 /* Copy the headers to the buffer. */
3988 action_encap_data = ctx->object;
3989 *action_encap_data = (struct action_raw_encap_data) {
3990 .conf = (struct rte_flow_action_raw_encap){
3991 .data = action_encap_data->data,
3995 header = action_encap_data->data;
3996 if (l2_encap_conf.select_vlan)
3997 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3998 else if (l2_encap_conf.select_ipv4)
3999 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4001 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4002 memcpy(eth.dst.addr_bytes,
4003 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4004 memcpy(eth.src.addr_bytes,
4005 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4006 memcpy(header, ð, sizeof(eth));
4007 header += sizeof(eth);
4008 if (l2_encap_conf.select_vlan) {
4009 if (l2_encap_conf.select_ipv4)
4010 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4012 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4013 memcpy(header, &vlan, sizeof(vlan));
4014 header += sizeof(vlan);
4016 action_encap_data->conf.size = header -
4017 action_encap_data->data;
4018 action->conf = &action_encap_data->conf;
4022 /** Parse l2 decap action. */
4024 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4025 const char *str, unsigned int len,
4026 void *buf, unsigned int size)
4028 struct buffer *out = buf;
4029 struct rte_flow_action *action;
4030 struct action_raw_decap_data *action_decap_data;
4031 struct rte_flow_item_eth eth = { .type = 0, };
4032 struct rte_flow_item_vlan vlan = {
4033 .tci = mplsoudp_encap_conf.vlan_tci,
4039 ret = parse_vc(ctx, token, str, len, buf, size);
4042 /* Nothing else to do if there is no buffer. */
4045 if (!out->args.vc.actions_n)
4047 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4048 /* Point to selected object. */
4049 ctx->object = out->args.vc.data;
4050 ctx->objmask = NULL;
4051 /* Copy the headers to the buffer. */
4052 action_decap_data = ctx->object;
4053 *action_decap_data = (struct action_raw_decap_data) {
4054 .conf = (struct rte_flow_action_raw_decap){
4055 .data = action_decap_data->data,
4059 header = action_decap_data->data;
4060 if (l2_decap_conf.select_vlan)
4061 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4062 memcpy(header, ð, sizeof(eth));
4063 header += sizeof(eth);
4064 if (l2_decap_conf.select_vlan) {
4065 memcpy(header, &vlan, sizeof(vlan));
4066 header += sizeof(vlan);
4068 action_decap_data->conf.size = header -
4069 action_decap_data->data;
4070 action->conf = &action_decap_data->conf;
4074 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4076 /** Parse MPLSOGRE encap action. */
4078 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4079 const char *str, unsigned int len,
4080 void *buf, unsigned int size)
4082 struct buffer *out = buf;
4083 struct rte_flow_action *action;
4084 struct action_raw_encap_data *action_encap_data;
4085 struct rte_flow_item_eth eth = { .type = 0, };
4086 struct rte_flow_item_vlan vlan = {
4087 .tci = mplsogre_encap_conf.vlan_tci,
4090 struct rte_flow_item_ipv4 ipv4 = {
4092 .src_addr = mplsogre_encap_conf.ipv4_src,
4093 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4094 .next_proto_id = IPPROTO_GRE,
4095 .version_ihl = RTE_IPV4_VHL_DEF,
4096 .time_to_live = IPDEFTTL,
4099 struct rte_flow_item_ipv6 ipv6 = {
4101 .proto = IPPROTO_GRE,
4102 .hop_limits = IPDEFTTL,
4105 struct rte_flow_item_gre gre = {
4106 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4108 struct rte_flow_item_mpls mpls;
4112 ret = parse_vc(ctx, token, str, len, buf, size);
4115 /* Nothing else to do if there is no buffer. */
4118 if (!out->args.vc.actions_n)
4120 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4121 /* Point to selected object. */
4122 ctx->object = out->args.vc.data;
4123 ctx->objmask = NULL;
4124 /* Copy the headers to the buffer. */
4125 action_encap_data = ctx->object;
4126 *action_encap_data = (struct action_raw_encap_data) {
4127 .conf = (struct rte_flow_action_raw_encap){
4128 .data = action_encap_data->data,
4133 header = action_encap_data->data;
4134 if (mplsogre_encap_conf.select_vlan)
4135 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4136 else if (mplsogre_encap_conf.select_ipv4)
4137 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4139 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4140 memcpy(eth.dst.addr_bytes,
4141 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4142 memcpy(eth.src.addr_bytes,
4143 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4144 memcpy(header, ð, sizeof(eth));
4145 header += sizeof(eth);
4146 if (mplsogre_encap_conf.select_vlan) {
4147 if (mplsogre_encap_conf.select_ipv4)
4148 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4150 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4151 memcpy(header, &vlan, sizeof(vlan));
4152 header += sizeof(vlan);
4154 if (mplsogre_encap_conf.select_ipv4) {
4155 memcpy(header, &ipv4, sizeof(ipv4));
4156 header += sizeof(ipv4);
4158 memcpy(&ipv6.hdr.src_addr,
4159 &mplsogre_encap_conf.ipv6_src,
4160 sizeof(mplsogre_encap_conf.ipv6_src));
4161 memcpy(&ipv6.hdr.dst_addr,
4162 &mplsogre_encap_conf.ipv6_dst,
4163 sizeof(mplsogre_encap_conf.ipv6_dst));
4164 memcpy(header, &ipv6, sizeof(ipv6));
4165 header += sizeof(ipv6);
4167 memcpy(header, &gre, sizeof(gre));
4168 header += sizeof(gre);
4169 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4170 RTE_DIM(mplsogre_encap_conf.label));
4171 mpls.label_tc_s[2] |= 0x1;
4172 memcpy(header, &mpls, sizeof(mpls));
4173 header += sizeof(mpls);
4174 action_encap_data->conf.size = header -
4175 action_encap_data->data;
4176 action->conf = &action_encap_data->conf;
4180 /** Parse MPLSOGRE decap action. */
4182 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4183 const char *str, unsigned int len,
4184 void *buf, unsigned int size)
4186 struct buffer *out = buf;
4187 struct rte_flow_action *action;
4188 struct action_raw_decap_data *action_decap_data;
4189 struct rte_flow_item_eth eth = { .type = 0, };
4190 struct rte_flow_item_vlan vlan = {.tci = 0};
4191 struct rte_flow_item_ipv4 ipv4 = {
4193 .next_proto_id = IPPROTO_GRE,
4196 struct rte_flow_item_ipv6 ipv6 = {
4198 .proto = IPPROTO_GRE,
4201 struct rte_flow_item_gre gre = {
4202 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4204 struct rte_flow_item_mpls mpls;
4208 ret = parse_vc(ctx, token, str, len, buf, size);
4211 /* Nothing else to do if there is no buffer. */
4214 if (!out->args.vc.actions_n)
4216 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4217 /* Point to selected object. */
4218 ctx->object = out->args.vc.data;
4219 ctx->objmask = NULL;
4220 /* Copy the headers to the buffer. */
4221 action_decap_data = ctx->object;
4222 *action_decap_data = (struct action_raw_decap_data) {
4223 .conf = (struct rte_flow_action_raw_decap){
4224 .data = action_decap_data->data,
4228 header = action_decap_data->data;
4229 if (mplsogre_decap_conf.select_vlan)
4230 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4231 else if (mplsogre_encap_conf.select_ipv4)
4232 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4234 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4235 memcpy(eth.dst.addr_bytes,
4236 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4237 memcpy(eth.src.addr_bytes,
4238 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4239 memcpy(header, ð, sizeof(eth));
4240 header += sizeof(eth);
4241 if (mplsogre_encap_conf.select_vlan) {
4242 if (mplsogre_encap_conf.select_ipv4)
4243 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4245 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4246 memcpy(header, &vlan, sizeof(vlan));
4247 header += sizeof(vlan);
4249 if (mplsogre_encap_conf.select_ipv4) {
4250 memcpy(header, &ipv4, sizeof(ipv4));
4251 header += sizeof(ipv4);
4253 memcpy(header, &ipv6, sizeof(ipv6));
4254 header += sizeof(ipv6);
4256 memcpy(header, &gre, sizeof(gre));
4257 header += sizeof(gre);
4258 memset(&mpls, 0, sizeof(mpls));
4259 memcpy(header, &mpls, sizeof(mpls));
4260 header += sizeof(mpls);
4261 action_decap_data->conf.size = header -
4262 action_decap_data->data;
4263 action->conf = &action_decap_data->conf;
4267 /** Parse MPLSOUDP encap action. */
4269 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4270 const char *str, unsigned int len,
4271 void *buf, unsigned int size)
4273 struct buffer *out = buf;
4274 struct rte_flow_action *action;
4275 struct action_raw_encap_data *action_encap_data;
4276 struct rte_flow_item_eth eth = { .type = 0, };
4277 struct rte_flow_item_vlan vlan = {
4278 .tci = mplsoudp_encap_conf.vlan_tci,
4281 struct rte_flow_item_ipv4 ipv4 = {
4283 .src_addr = mplsoudp_encap_conf.ipv4_src,
4284 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4285 .next_proto_id = IPPROTO_UDP,
4286 .version_ihl = RTE_IPV4_VHL_DEF,
4287 .time_to_live = IPDEFTTL,
4290 struct rte_flow_item_ipv6 ipv6 = {
4292 .proto = IPPROTO_UDP,
4293 .hop_limits = IPDEFTTL,
4296 struct rte_flow_item_udp udp = {
4298 .src_port = mplsoudp_encap_conf.udp_src,
4299 .dst_port = mplsoudp_encap_conf.udp_dst,
4302 struct rte_flow_item_mpls mpls;
4306 ret = parse_vc(ctx, token, str, len, buf, size);
4309 /* Nothing else to do if there is no buffer. */
4312 if (!out->args.vc.actions_n)
4314 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4315 /* Point to selected object. */
4316 ctx->object = out->args.vc.data;
4317 ctx->objmask = NULL;
4318 /* Copy the headers to the buffer. */
4319 action_encap_data = ctx->object;
4320 *action_encap_data = (struct action_raw_encap_data) {
4321 .conf = (struct rte_flow_action_raw_encap){
4322 .data = action_encap_data->data,
4327 header = action_encap_data->data;
4328 if (mplsoudp_encap_conf.select_vlan)
4329 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4330 else if (mplsoudp_encap_conf.select_ipv4)
4331 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4333 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4334 memcpy(eth.dst.addr_bytes,
4335 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4336 memcpy(eth.src.addr_bytes,
4337 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4338 memcpy(header, ð, sizeof(eth));
4339 header += sizeof(eth);
4340 if (mplsoudp_encap_conf.select_vlan) {
4341 if (mplsoudp_encap_conf.select_ipv4)
4342 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4344 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4345 memcpy(header, &vlan, sizeof(vlan));
4346 header += sizeof(vlan);
4348 if (mplsoudp_encap_conf.select_ipv4) {
4349 memcpy(header, &ipv4, sizeof(ipv4));
4350 header += sizeof(ipv4);
4352 memcpy(&ipv6.hdr.src_addr,
4353 &mplsoudp_encap_conf.ipv6_src,
4354 sizeof(mplsoudp_encap_conf.ipv6_src));
4355 memcpy(&ipv6.hdr.dst_addr,
4356 &mplsoudp_encap_conf.ipv6_dst,
4357 sizeof(mplsoudp_encap_conf.ipv6_dst));
4358 memcpy(header, &ipv6, sizeof(ipv6));
4359 header += sizeof(ipv6);
4361 memcpy(header, &udp, sizeof(udp));
4362 header += sizeof(udp);
4363 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4364 RTE_DIM(mplsoudp_encap_conf.label));
4365 mpls.label_tc_s[2] |= 0x1;
4366 memcpy(header, &mpls, sizeof(mpls));
4367 header += sizeof(mpls);
4368 action_encap_data->conf.size = header -
4369 action_encap_data->data;
4370 action->conf = &action_encap_data->conf;
4374 /** Parse MPLSOUDP decap action. */
4376 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4377 const char *str, unsigned int len,
4378 void *buf, unsigned int size)
4380 struct buffer *out = buf;
4381 struct rte_flow_action *action;
4382 struct action_raw_decap_data *action_decap_data;
4383 struct rte_flow_item_eth eth = { .type = 0, };
4384 struct rte_flow_item_vlan vlan = {.tci = 0};
4385 struct rte_flow_item_ipv4 ipv4 = {
4387 .next_proto_id = IPPROTO_UDP,
4390 struct rte_flow_item_ipv6 ipv6 = {
4392 .proto = IPPROTO_UDP,
4395 struct rte_flow_item_udp udp = {
4397 .dst_port = rte_cpu_to_be_16(6635),
4400 struct rte_flow_item_mpls mpls;
4404 ret = parse_vc(ctx, token, str, len, buf, size);
4407 /* Nothing else to do if there is no buffer. */
4410 if (!out->args.vc.actions_n)
4412 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4413 /* Point to selected object. */
4414 ctx->object = out->args.vc.data;
4415 ctx->objmask = NULL;
4416 /* Copy the headers to the buffer. */
4417 action_decap_data = ctx->object;
4418 *action_decap_data = (struct action_raw_decap_data) {
4419 .conf = (struct rte_flow_action_raw_decap){
4420 .data = action_decap_data->data,
4424 header = action_decap_data->data;
4425 if (mplsoudp_decap_conf.select_vlan)
4426 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4427 else if (mplsoudp_encap_conf.select_ipv4)
4428 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4430 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4431 memcpy(eth.dst.addr_bytes,
4432 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4433 memcpy(eth.src.addr_bytes,
4434 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4435 memcpy(header, ð, sizeof(eth));
4436 header += sizeof(eth);
4437 if (mplsoudp_encap_conf.select_vlan) {
4438 if (mplsoudp_encap_conf.select_ipv4)
4439 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4441 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4442 memcpy(header, &vlan, sizeof(vlan));
4443 header += sizeof(vlan);
4445 if (mplsoudp_encap_conf.select_ipv4) {
4446 memcpy(header, &ipv4, sizeof(ipv4));
4447 header += sizeof(ipv4);
4449 memcpy(header, &ipv6, sizeof(ipv6));
4450 header += sizeof(ipv6);
4452 memcpy(header, &udp, sizeof(udp));
4453 header += sizeof(udp);
4454 memset(&mpls, 0, sizeof(mpls));
4455 memcpy(header, &mpls, sizeof(mpls));
4456 header += sizeof(mpls);
4457 action_decap_data->conf.size = header -
4458 action_decap_data->data;
4459 action->conf = &action_decap_data->conf;
4464 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4465 const char *str, unsigned int len, void *buf,
4468 struct buffer *out = buf;
4469 struct rte_flow_action *action;
4470 struct rte_flow_action_raw_encap *action_raw_encap_conf = NULL;
4471 uint8_t *data = NULL;
4474 ret = parse_vc(ctx, token, str, len, buf, size);
4477 /* Nothing else to do if there is no buffer. */
4480 if (!out->args.vc.actions_n)
4482 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4483 /* Point to selected object. */
4484 ctx->object = out->args.vc.data;
4485 ctx->objmask = NULL;
4486 /* Copy the headers to the buffer. */
4487 action_raw_encap_conf = ctx->object;
4488 /* data stored from tail of data buffer */
4489 data = (uint8_t *)&(raw_encap_conf.data) +
4490 ACTION_RAW_ENCAP_MAX_DATA - raw_encap_conf.size;
4491 action_raw_encap_conf->data = data;
4492 action_raw_encap_conf->preserve = NULL;
4493 action_raw_encap_conf->size = raw_encap_conf.size;
4494 action->conf = action_raw_encap_conf;
4499 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4500 const char *str, unsigned int len, void *buf,
4503 struct buffer *out = buf;
4504 struct rte_flow_action *action;
4505 struct rte_flow_action_raw_decap *action_raw_decap_conf = NULL;
4506 uint8_t *data = NULL;
4509 ret = parse_vc(ctx, token, str, len, buf, size);
4512 /* Nothing else to do if there is no buffer. */
4515 if (!out->args.vc.actions_n)
4517 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4518 /* Point to selected object. */
4519 ctx->object = out->args.vc.data;
4520 ctx->objmask = NULL;
4521 /* Copy the headers to the buffer. */
4522 action_raw_decap_conf = ctx->object;
4523 /* data stored from tail of data buffer */
4524 data = (uint8_t *)&(raw_decap_conf.data) +
4525 ACTION_RAW_ENCAP_MAX_DATA - raw_decap_conf.size;
4526 action_raw_decap_conf->data = data;
4527 action_raw_decap_conf->size = raw_decap_conf.size;
4528 action->conf = action_raw_decap_conf;
4532 /** Parse tokens for destroy command. */
4534 parse_destroy(struct context *ctx, const struct token *token,
4535 const char *str, unsigned int len,
4536 void *buf, unsigned int size)
4538 struct buffer *out = buf;
4540 /* Token name must match. */
4541 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4543 /* Nothing else to do if there is no buffer. */
4546 if (!out->command) {
4547 if (ctx->curr != DESTROY)
4549 if (sizeof(*out) > size)
4551 out->command = ctx->curr;
4554 ctx->objmask = NULL;
4555 out->args.destroy.rule =
4556 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4560 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4561 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4564 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4565 ctx->objmask = NULL;
4569 /** Parse tokens for flush command. */
4571 parse_flush(struct context *ctx, const struct token *token,
4572 const char *str, unsigned int len,
4573 void *buf, unsigned int size)
4575 struct buffer *out = buf;
4577 /* Token name must match. */
4578 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4580 /* Nothing else to do if there is no buffer. */
4583 if (!out->command) {
4584 if (ctx->curr != FLUSH)
4586 if (sizeof(*out) > size)
4588 out->command = ctx->curr;
4591 ctx->objmask = NULL;
4596 /** Parse tokens for query command. */
4598 parse_query(struct context *ctx, const struct token *token,
4599 const char *str, unsigned int len,
4600 void *buf, unsigned int size)
4602 struct buffer *out = buf;
4604 /* Token name must match. */
4605 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4607 /* Nothing else to do if there is no buffer. */
4610 if (!out->command) {
4611 if (ctx->curr != QUERY)
4613 if (sizeof(*out) > size)
4615 out->command = ctx->curr;
4618 ctx->objmask = NULL;
4623 /** Parse action names. */
4625 parse_action(struct context *ctx, const struct token *token,
4626 const char *str, unsigned int len,
4627 void *buf, unsigned int size)
4629 struct buffer *out = buf;
4630 const struct arg *arg = pop_args(ctx);
4634 /* Argument is expected. */
4637 /* Parse action name. */
4638 for (i = 0; next_action[i]; ++i) {
4639 const struct parse_action_priv *priv;
4641 token = &token_list[next_action[i]];
4642 if (strcmp_partial(token->name, str, len))
4648 memcpy((uint8_t *)ctx->object + arg->offset,
4654 push_args(ctx, arg);
4658 /** Parse tokens for list command. */
4660 parse_list(struct context *ctx, const struct token *token,
4661 const char *str, unsigned int len,
4662 void *buf, unsigned int size)
4664 struct buffer *out = buf;
4666 /* Token name must match. */
4667 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4669 /* Nothing else to do if there is no buffer. */
4672 if (!out->command) {
4673 if (ctx->curr != LIST)
4675 if (sizeof(*out) > size)
4677 out->command = ctx->curr;
4680 ctx->objmask = NULL;
4681 out->args.list.group =
4682 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4686 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4687 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4690 ctx->object = out->args.list.group + out->args.list.group_n++;
4691 ctx->objmask = NULL;
4695 /** Parse tokens for isolate command. */
4697 parse_isolate(struct context *ctx, const struct token *token,
4698 const char *str, unsigned int len,
4699 void *buf, unsigned int size)
4701 struct buffer *out = buf;
4703 /* Token name must match. */
4704 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4706 /* Nothing else to do if there is no buffer. */
4709 if (!out->command) {
4710 if (ctx->curr != ISOLATE)
4712 if (sizeof(*out) > size)
4714 out->command = ctx->curr;
4717 ctx->objmask = NULL;
4723 * Parse signed/unsigned integers 8 to 64-bit long.
4725 * Last argument (ctx->args) is retrieved to determine integer type and
4729 parse_int(struct context *ctx, const struct token *token,
4730 const char *str, unsigned int len,
4731 void *buf, unsigned int size)
4733 const struct arg *arg = pop_args(ctx);
4738 /* Argument is expected. */
4743 (uintmax_t)strtoimax(str, &end, 0) :
4744 strtoumax(str, &end, 0);
4745 if (errno || (size_t)(end - str) != len)
4748 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4749 (intmax_t)u > (intmax_t)arg->max)) ||
4750 (!arg->sign && (u < arg->min || u > arg->max))))
4755 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4756 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4760 buf = (uint8_t *)ctx->object + arg->offset;
4762 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4766 case sizeof(uint8_t):
4767 *(uint8_t *)buf = u;
4769 case sizeof(uint16_t):
4770 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4772 case sizeof(uint8_t [3]):
4773 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4775 ((uint8_t *)buf)[0] = u;
4776 ((uint8_t *)buf)[1] = u >> 8;
4777 ((uint8_t *)buf)[2] = u >> 16;
4781 ((uint8_t *)buf)[0] = u >> 16;
4782 ((uint8_t *)buf)[1] = u >> 8;
4783 ((uint8_t *)buf)[2] = u;
4785 case sizeof(uint32_t):
4786 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4788 case sizeof(uint64_t):
4789 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4794 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4796 buf = (uint8_t *)ctx->objmask + arg->offset;
4801 push_args(ctx, arg);
4808 * Three arguments (ctx->args) are retrieved from the stack to store data,
4809 * its actual length and address (in that order).
4812 parse_string(struct context *ctx, const struct token *token,
4813 const char *str, unsigned int len,
4814 void *buf, unsigned int size)
4816 const struct arg *arg_data = pop_args(ctx);
4817 const struct arg *arg_len = pop_args(ctx);
4818 const struct arg *arg_addr = pop_args(ctx);
4819 char tmp[16]; /* Ought to be enough. */
4822 /* Arguments are expected. */
4826 push_args(ctx, arg_data);
4830 push_args(ctx, arg_len);
4831 push_args(ctx, arg_data);
4834 size = arg_data->size;
4835 /* Bit-mask fill is not supported. */
4836 if (arg_data->mask || size < len)
4840 /* Let parse_int() fill length information first. */
4841 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4844 push_args(ctx, arg_len);
4845 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4850 buf = (uint8_t *)ctx->object + arg_data->offset;
4851 /* Output buffer is not necessarily NUL-terminated. */
4852 memcpy(buf, str, len);
4853 memset((uint8_t *)buf + len, 0x00, size - len);
4855 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4856 /* Save address if requested. */
4857 if (arg_addr->size) {
4858 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4860 (uint8_t *)ctx->object + arg_data->offset
4864 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4866 (uint8_t *)ctx->objmask + arg_data->offset
4872 push_args(ctx, arg_addr);
4873 push_args(ctx, arg_len);
4874 push_args(ctx, arg_data);
4879 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4885 /* Check input parameters */
4886 if ((src == NULL) ||
4892 /* Convert chars to bytes */
4893 for (i = 0, len = 0; i < *size; i += 2) {
4894 snprintf(tmp, 3, "%s", src + i);
4895 dst[len++] = strtoul(tmp, &c, 16);
4910 parse_hex(struct context *ctx, const struct token *token,
4911 const char *str, unsigned int len,
4912 void *buf, unsigned int size)
4914 const struct arg *arg_data = pop_args(ctx);
4915 const struct arg *arg_len = pop_args(ctx);
4916 const struct arg *arg_addr = pop_args(ctx);
4917 char tmp[16]; /* Ought to be enough. */
4919 unsigned int hexlen = len;
4920 unsigned int length = 256;
4921 uint8_t hex_tmp[length];
4923 /* Arguments are expected. */
4927 push_args(ctx, arg_data);
4931 push_args(ctx, arg_len);
4932 push_args(ctx, arg_data);
4935 size = arg_data->size;
4936 /* Bit-mask fill is not supported. */
4942 /* translate bytes string to array. */
4943 if (str[0] == '0' && ((str[1] == 'x') ||
4948 if (hexlen > length)
4950 ret = parse_hex_string(str, hex_tmp, &hexlen);
4953 /* Let parse_int() fill length information first. */
4954 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
4957 push_args(ctx, arg_len);
4958 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4963 buf = (uint8_t *)ctx->object + arg_data->offset;
4964 /* Output buffer is not necessarily NUL-terminated. */
4965 memcpy(buf, hex_tmp, hexlen);
4966 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
4968 memset((uint8_t *)ctx->objmask + arg_data->offset,
4970 /* Save address if requested. */
4971 if (arg_addr->size) {
4972 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4974 (uint8_t *)ctx->object + arg_data->offset
4978 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4980 (uint8_t *)ctx->objmask + arg_data->offset
4986 push_args(ctx, arg_addr);
4987 push_args(ctx, arg_len);
4988 push_args(ctx, arg_data);
4994 * Parse a MAC address.
4996 * Last argument (ctx->args) is retrieved to determine storage size and
5000 parse_mac_addr(struct context *ctx, const struct token *token,
5001 const char *str, unsigned int len,
5002 void *buf, unsigned int size)
5004 const struct arg *arg = pop_args(ctx);
5005 struct rte_ether_addr tmp;
5009 /* Argument is expected. */
5013 /* Bit-mask fill is not supported. */
5014 if (arg->mask || size != sizeof(tmp))
5016 /* Only network endian is supported. */
5019 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5020 if (ret < 0 || (unsigned int)ret != len)
5024 buf = (uint8_t *)ctx->object + arg->offset;
5025 memcpy(buf, &tmp, size);
5027 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5030 push_args(ctx, arg);
5035 * Parse an IPv4 address.
5037 * Last argument (ctx->args) is retrieved to determine storage size and
5041 parse_ipv4_addr(struct context *ctx, const struct token *token,
5042 const char *str, unsigned int len,
5043 void *buf, unsigned int size)
5045 const struct arg *arg = pop_args(ctx);
5050 /* Argument is expected. */
5054 /* Bit-mask fill is not supported. */
5055 if (arg->mask || size != sizeof(tmp))
5057 /* Only network endian is supported. */
5060 memcpy(str2, str, len);
5062 ret = inet_pton(AF_INET, str2, &tmp);
5064 /* Attempt integer parsing. */
5065 push_args(ctx, arg);
5066 return parse_int(ctx, token, str, len, buf, size);
5070 buf = (uint8_t *)ctx->object + arg->offset;
5071 memcpy(buf, &tmp, size);
5073 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5076 push_args(ctx, arg);
5081 * Parse an IPv6 address.
5083 * Last argument (ctx->args) is retrieved to determine storage size and
5087 parse_ipv6_addr(struct context *ctx, const struct token *token,
5088 const char *str, unsigned int len,
5089 void *buf, unsigned int size)
5091 const struct arg *arg = pop_args(ctx);
5093 struct in6_addr tmp;
5097 /* Argument is expected. */
5101 /* Bit-mask fill is not supported. */
5102 if (arg->mask || size != sizeof(tmp))
5104 /* Only network endian is supported. */
5107 memcpy(str2, str, len);
5109 ret = inet_pton(AF_INET6, str2, &tmp);
5114 buf = (uint8_t *)ctx->object + arg->offset;
5115 memcpy(buf, &tmp, size);
5117 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5120 push_args(ctx, arg);
5124 /** Boolean values (even indices stand for false). */
5125 static const char *const boolean_name[] = {
5135 * Parse a boolean value.
5137 * Last argument (ctx->args) is retrieved to determine storage size and
5141 parse_boolean(struct context *ctx, const struct token *token,
5142 const char *str, unsigned int len,
5143 void *buf, unsigned int size)
5145 const struct arg *arg = pop_args(ctx);
5149 /* Argument is expected. */
5152 for (i = 0; boolean_name[i]; ++i)
5153 if (!strcmp_partial(boolean_name[i], str, len))
5155 /* Process token as integer. */
5156 if (boolean_name[i])
5157 str = i & 1 ? "1" : "0";
5158 push_args(ctx, arg);
5159 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5160 return ret > 0 ? (int)len : ret;
5163 /** Parse port and update context. */
5165 parse_port(struct context *ctx, const struct token *token,
5166 const char *str, unsigned int len,
5167 void *buf, unsigned int size)
5169 struct buffer *out = &(struct buffer){ .port = 0 };
5177 ctx->objmask = NULL;
5178 size = sizeof(*out);
5180 ret = parse_int(ctx, token, str, len, out, size);
5182 ctx->port = out->port;
5188 /** Parse set command, initialize output buffer for subsequent tokens. */
5190 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5191 const char *str, unsigned int len,
5192 void *buf, unsigned int size)
5194 struct buffer *out = buf;
5196 /* Token name must match. */
5197 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5199 /* Nothing else to do if there is no buffer. */
5202 /* Make sure buffer is large enough. */
5203 if (size < sizeof(*out))
5206 ctx->objmask = NULL;
5209 out->command = ctx->curr;
5214 * Parse set raw_encap/raw_decap command,
5215 * initialize output buffer for subsequent tokens.
5218 parse_set_init(struct context *ctx, const struct token *token,
5219 const char *str, unsigned int len,
5220 void *buf, unsigned int size)
5222 struct buffer *out = buf;
5224 /* Token name must match. */
5225 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5227 /* Nothing else to do if there is no buffer. */
5230 /* Make sure buffer is large enough. */
5231 if (size < sizeof(*out))
5233 /* Initialize buffer. */
5234 memset(out, 0x00, sizeof(*out));
5235 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5238 ctx->objmask = NULL;
5239 if (!out->command) {
5240 if (ctx->curr != SET)
5242 if (sizeof(*out) > size)
5244 out->command = ctx->curr;
5245 out->args.vc.data = (uint8_t *)out + size;
5246 /* All we need is pattern */
5247 out->args.vc.pattern =
5248 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5250 ctx->object = out->args.vc.pattern;
5255 /** No completion. */
5257 comp_none(struct context *ctx, const struct token *token,
5258 unsigned int ent, char *buf, unsigned int size)
5268 /** Complete boolean values. */
5270 comp_boolean(struct context *ctx, const struct token *token,
5271 unsigned int ent, char *buf, unsigned int size)
5277 for (i = 0; boolean_name[i]; ++i)
5278 if (buf && i == ent)
5279 return strlcpy(buf, boolean_name[i], size);
5285 /** Complete action names. */
5287 comp_action(struct context *ctx, const struct token *token,
5288 unsigned int ent, char *buf, unsigned int size)
5294 for (i = 0; next_action[i]; ++i)
5295 if (buf && i == ent)
5296 return strlcpy(buf, token_list[next_action[i]].name,
5303 /** Complete available ports. */
5305 comp_port(struct context *ctx, const struct token *token,
5306 unsigned int ent, char *buf, unsigned int size)
5313 RTE_ETH_FOREACH_DEV(p) {
5314 if (buf && i == ent)
5315 return snprintf(buf, size, "%u", p);
5323 /** Complete available rule IDs. */
5325 comp_rule_id(struct context *ctx, const struct token *token,
5326 unsigned int ent, char *buf, unsigned int size)
5329 struct rte_port *port;
5330 struct port_flow *pf;
5333 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5334 ctx->port == (portid_t)RTE_PORT_ALL)
5336 port = &ports[ctx->port];
5337 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5338 if (buf && i == ent)
5339 return snprintf(buf, size, "%u", pf->id);
5347 /** Complete type field for RSS action. */
5349 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5350 unsigned int ent, char *buf, unsigned int size)
5356 for (i = 0; rss_type_table[i].str; ++i)
5361 return strlcpy(buf, rss_type_table[ent].str, size);
5363 return snprintf(buf, size, "end");
5367 /** Complete queue field for RSS action. */
5369 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5370 unsigned int ent, char *buf, unsigned int size)
5377 return snprintf(buf, size, "%u", ent);
5379 return snprintf(buf, size, "end");
5383 /** Internal context. */
5384 static struct context cmd_flow_context;
5386 /** Global parser instance (cmdline API). */
5387 cmdline_parse_inst_t cmd_flow;
5388 cmdline_parse_inst_t cmd_set_raw;
5390 /** Initialize context. */
5392 cmd_flow_context_init(struct context *ctx)
5394 /* A full memset() is not necessary. */
5404 ctx->objmask = NULL;
5407 /** Parse a token (cmdline API). */
5409 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5412 struct context *ctx = &cmd_flow_context;
5413 const struct token *token;
5414 const enum index *list;
5419 token = &token_list[ctx->curr];
5420 /* Check argument length. */
5423 for (len = 0; src[len]; ++len)
5424 if (src[len] == '#' || isspace(src[len]))
5428 /* Last argument and EOL detection. */
5429 for (i = len; src[i]; ++i)
5430 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5432 else if (!isspace(src[i])) {
5437 if (src[i] == '\r' || src[i] == '\n') {
5441 /* Initialize context if necessary. */
5442 if (!ctx->next_num) {
5445 ctx->next[ctx->next_num++] = token->next[0];
5447 /* Process argument through candidates. */
5448 ctx->prev = ctx->curr;
5449 list = ctx->next[ctx->next_num - 1];
5450 for (i = 0; list[i]; ++i) {
5451 const struct token *next = &token_list[list[i]];
5454 ctx->curr = list[i];
5456 tmp = next->call(ctx, next, src, len, result, size);
5458 tmp = parse_default(ctx, next, src, len, result, size);
5459 if (tmp == -1 || tmp != len)
5467 /* Push subsequent tokens if any. */
5469 for (i = 0; token->next[i]; ++i) {
5470 if (ctx->next_num == RTE_DIM(ctx->next))
5472 ctx->next[ctx->next_num++] = token->next[i];
5474 /* Push arguments if any. */
5476 for (i = 0; token->args[i]; ++i) {
5477 if (ctx->args_num == RTE_DIM(ctx->args))
5479 ctx->args[ctx->args_num++] = token->args[i];
5484 /** Return number of completion entries (cmdline API). */
5486 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5488 struct context *ctx = &cmd_flow_context;
5489 const struct token *token = &token_list[ctx->curr];
5490 const enum index *list;
5494 /* Count number of tokens in current list. */
5496 list = ctx->next[ctx->next_num - 1];
5498 list = token->next[0];
5499 for (i = 0; list[i]; ++i)
5504 * If there is a single token, use its completion callback, otherwise
5505 * return the number of entries.
5507 token = &token_list[list[0]];
5508 if (i == 1 && token->comp) {
5509 /* Save index for cmd_flow_get_help(). */
5510 ctx->prev = list[0];
5511 return token->comp(ctx, token, 0, NULL, 0);
5516 /** Return a completion entry (cmdline API). */
5518 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5519 char *dst, unsigned int size)
5521 struct context *ctx = &cmd_flow_context;
5522 const struct token *token = &token_list[ctx->curr];
5523 const enum index *list;
5527 /* Count number of tokens in current list. */
5529 list = ctx->next[ctx->next_num - 1];
5531 list = token->next[0];
5532 for (i = 0; list[i]; ++i)
5536 /* If there is a single token, use its completion callback. */
5537 token = &token_list[list[0]];
5538 if (i == 1 && token->comp) {
5539 /* Save index for cmd_flow_get_help(). */
5540 ctx->prev = list[0];
5541 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5543 /* Otherwise make sure the index is valid and use defaults. */
5546 token = &token_list[list[index]];
5547 strlcpy(dst, token->name, size);
5548 /* Save index for cmd_flow_get_help(). */
5549 ctx->prev = list[index];
5553 /** Populate help strings for current token (cmdline API). */
5555 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5557 struct context *ctx = &cmd_flow_context;
5558 const struct token *token = &token_list[ctx->prev];
5563 /* Set token type and update global help with details. */
5564 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5566 cmd_flow.help_str = token->help;
5568 cmd_flow.help_str = token->name;
5572 /** Token definition template (cmdline API). */
5573 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5574 .ops = &(struct cmdline_token_ops){
5575 .parse = cmd_flow_parse,
5576 .complete_get_nb = cmd_flow_complete_get_nb,
5577 .complete_get_elt = cmd_flow_complete_get_elt,
5578 .get_help = cmd_flow_get_help,
5583 /** Populate the next dynamic token. */
5585 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5586 cmdline_parse_token_hdr_t **hdr_inst)
5588 struct context *ctx = &cmd_flow_context;
5590 /* Always reinitialize context before requesting the first token. */
5591 if (!(hdr_inst - cmd_flow.tokens))
5592 cmd_flow_context_init(ctx);
5593 /* Return NULL when no more tokens are expected. */
5594 if (!ctx->next_num && ctx->curr) {
5598 /* Determine if command should end here. */
5599 if (ctx->eol && ctx->last && ctx->next_num) {
5600 const enum index *list = ctx->next[ctx->next_num - 1];
5603 for (i = 0; list[i]; ++i) {
5610 *hdr = &cmd_flow_token_hdr;
5613 /** Dispatch parsed buffer to function calls. */
5615 cmd_flow_parsed(const struct buffer *in)
5617 switch (in->command) {
5619 port_flow_validate(in->port, &in->args.vc.attr,
5620 in->args.vc.pattern, in->args.vc.actions);
5623 port_flow_create(in->port, &in->args.vc.attr,
5624 in->args.vc.pattern, in->args.vc.actions);
5627 port_flow_destroy(in->port, in->args.destroy.rule_n,
5628 in->args.destroy.rule);
5631 port_flow_flush(in->port);
5634 port_flow_query(in->port, in->args.query.rule,
5635 &in->args.query.action);
5638 port_flow_list(in->port, in->args.list.group_n,
5639 in->args.list.group);
5642 port_flow_isolate(in->port, in->args.isolate.set);
5649 /** Token generator and output processing callback (cmdline API). */
5651 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5654 cmd_flow_tok(arg0, arg2);
5656 cmd_flow_parsed(arg0);
5659 /** Global parser instance (cmdline API). */
5660 cmdline_parse_inst_t cmd_flow = {
5662 .data = NULL, /**< Unused. */
5663 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5666 }, /**< Tokens are returned by cmd_flow_tok(). */
5669 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5672 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5674 struct rte_flow_item_ipv4 *ipv4;
5675 struct rte_flow_item_eth *eth;
5676 struct rte_flow_item_ipv6 *ipv6;
5677 struct rte_flow_item_vxlan *vxlan;
5678 struct rte_flow_item_vxlan_gpe *gpe;
5679 struct rte_flow_item_nvgre *nvgre;
5680 uint32_t ipv6_vtc_flow;
5682 switch (item->type) {
5683 case RTE_FLOW_ITEM_TYPE_ETH:
5684 eth = (struct rte_flow_item_eth *)buf;
5686 eth->type = rte_cpu_to_be_16(next_proto);
5688 case RTE_FLOW_ITEM_TYPE_IPV4:
5689 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5690 ipv4->hdr.version_ihl = 0x45;
5691 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
5693 case RTE_FLOW_ITEM_TYPE_IPV6:
5694 ipv6 = (struct rte_flow_item_ipv6 *)buf;
5695 ipv6->hdr.proto = (uint8_t)next_proto;
5696 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
5697 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
5698 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
5699 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
5701 case RTE_FLOW_ITEM_TYPE_VXLAN:
5702 vxlan = (struct rte_flow_item_vxlan *)buf;
5703 vxlan->flags = 0x08;
5705 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5706 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
5709 case RTE_FLOW_ITEM_TYPE_NVGRE:
5710 nvgre = (struct rte_flow_item_nvgre *)buf;
5711 nvgre->protocol = rte_cpu_to_be_16(0x6558);
5712 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
5719 /** Helper of get item's default mask. */
5721 flow_item_default_mask(const struct rte_flow_item *item)
5723 const void *mask = NULL;
5724 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5726 switch (item->type) {
5727 case RTE_FLOW_ITEM_TYPE_ANY:
5728 mask = &rte_flow_item_any_mask;
5730 case RTE_FLOW_ITEM_TYPE_VF:
5731 mask = &rte_flow_item_vf_mask;
5733 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5734 mask = &rte_flow_item_port_id_mask;
5736 case RTE_FLOW_ITEM_TYPE_RAW:
5737 mask = &rte_flow_item_raw_mask;
5739 case RTE_FLOW_ITEM_TYPE_ETH:
5740 mask = &rte_flow_item_eth_mask;
5742 case RTE_FLOW_ITEM_TYPE_VLAN:
5743 mask = &rte_flow_item_vlan_mask;
5745 case RTE_FLOW_ITEM_TYPE_IPV4:
5746 mask = &rte_flow_item_ipv4_mask;
5748 case RTE_FLOW_ITEM_TYPE_IPV6:
5749 mask = &rte_flow_item_ipv6_mask;
5751 case RTE_FLOW_ITEM_TYPE_ICMP:
5752 mask = &rte_flow_item_icmp_mask;
5754 case RTE_FLOW_ITEM_TYPE_UDP:
5755 mask = &rte_flow_item_udp_mask;
5757 case RTE_FLOW_ITEM_TYPE_TCP:
5758 mask = &rte_flow_item_tcp_mask;
5760 case RTE_FLOW_ITEM_TYPE_SCTP:
5761 mask = &rte_flow_item_sctp_mask;
5763 case RTE_FLOW_ITEM_TYPE_VXLAN:
5764 mask = &rte_flow_item_vxlan_mask;
5766 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5767 mask = &rte_flow_item_vxlan_gpe_mask;
5769 case RTE_FLOW_ITEM_TYPE_E_TAG:
5770 mask = &rte_flow_item_e_tag_mask;
5772 case RTE_FLOW_ITEM_TYPE_NVGRE:
5773 mask = &rte_flow_item_nvgre_mask;
5775 case RTE_FLOW_ITEM_TYPE_MPLS:
5776 mask = &rte_flow_item_mpls_mask;
5778 case RTE_FLOW_ITEM_TYPE_GRE:
5779 mask = &rte_flow_item_gre_mask;
5781 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5782 mask = &gre_key_default_mask;
5784 case RTE_FLOW_ITEM_TYPE_META:
5785 mask = &rte_flow_item_meta_mask;
5787 case RTE_FLOW_ITEM_TYPE_FUZZY:
5788 mask = &rte_flow_item_fuzzy_mask;
5790 case RTE_FLOW_ITEM_TYPE_GTP:
5791 mask = &rte_flow_item_gtp_mask;
5793 case RTE_FLOW_ITEM_TYPE_ESP:
5794 mask = &rte_flow_item_esp_mask;
5796 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5797 mask = &rte_flow_item_gtp_psc_mask;
5807 /** Dispatch parsed buffer to function calls. */
5809 cmd_set_raw_parsed(const struct buffer *in)
5811 uint32_t n = in->args.vc.pattern_n;
5813 struct rte_flow_item *item = NULL;
5815 uint8_t *data = NULL;
5816 uint8_t *data_tail = NULL;
5817 size_t *total_size = NULL;
5818 uint16_t upper_layer = 0;
5821 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
5822 in->command == SET_RAW_DECAP);
5823 if (in->command == SET_RAW_ENCAP) {
5824 total_size = &raw_encap_conf.size;
5825 data = (uint8_t *)&raw_encap_conf.data;
5827 total_size = &raw_decap_conf.size;
5828 data = (uint8_t *)&raw_decap_conf.data;
5831 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5832 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
5833 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
5834 for (i = n - 1 ; i >= 0; --i) {
5835 item = in->args.vc.pattern + i;
5836 if (item->spec == NULL)
5837 item->spec = flow_item_default_mask(item);
5838 switch (item->type) {
5839 case RTE_FLOW_ITEM_TYPE_ETH:
5840 size = sizeof(struct rte_flow_item_eth);
5842 case RTE_FLOW_ITEM_TYPE_VLAN:
5843 size = sizeof(struct rte_flow_item_vlan);
5844 proto = RTE_ETHER_TYPE_VLAN;
5846 case RTE_FLOW_ITEM_TYPE_IPV4:
5847 size = sizeof(struct rte_flow_item_ipv4);
5848 proto = RTE_ETHER_TYPE_IPV4;
5850 case RTE_FLOW_ITEM_TYPE_IPV6:
5851 size = sizeof(struct rte_flow_item_ipv6);
5852 proto = RTE_ETHER_TYPE_IPV6;
5854 case RTE_FLOW_ITEM_TYPE_UDP:
5855 size = sizeof(struct rte_flow_item_udp);
5858 case RTE_FLOW_ITEM_TYPE_TCP:
5859 size = sizeof(struct rte_flow_item_tcp);
5862 case RTE_FLOW_ITEM_TYPE_VXLAN:
5863 size = sizeof(struct rte_flow_item_vxlan);
5865 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5866 size = sizeof(struct rte_flow_item_vxlan_gpe);
5868 case RTE_FLOW_ITEM_TYPE_GRE:
5869 size = sizeof(struct rte_flow_item_gre);
5872 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5873 size = sizeof(rte_be32_t);
5875 case RTE_FLOW_ITEM_TYPE_MPLS:
5876 size = sizeof(struct rte_flow_item_mpls);
5878 case RTE_FLOW_ITEM_TYPE_NVGRE:
5879 size = sizeof(struct rte_flow_item_nvgre);
5883 printf("Error - Not supported item\n");
5885 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5888 *total_size += size;
5889 rte_memcpy(data_tail - (*total_size), item->spec, size);
5890 /* update some fields which cannot be set by cmdline */
5891 update_fields((data_tail - (*total_size)), item,
5893 upper_layer = proto;
5895 if (verbose_level & 0x1)
5896 printf("total data size is %zu\n", (*total_size));
5897 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
5900 /** Populate help strings for current token (cmdline API). */
5902 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
5905 struct context *ctx = &cmd_flow_context;
5906 const struct token *token = &token_list[ctx->prev];
5911 /* Set token type and update global help with details. */
5912 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
5914 cmd_set_raw.help_str = token->help;
5916 cmd_set_raw.help_str = token->name;
5920 /** Token definition template (cmdline API). */
5921 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
5922 .ops = &(struct cmdline_token_ops){
5923 .parse = cmd_flow_parse,
5924 .complete_get_nb = cmd_flow_complete_get_nb,
5925 .complete_get_elt = cmd_flow_complete_get_elt,
5926 .get_help = cmd_set_raw_get_help,
5931 /** Populate the next dynamic token. */
5933 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
5934 cmdline_parse_token_hdr_t **hdr_inst)
5936 struct context *ctx = &cmd_flow_context;
5938 /* Always reinitialize context before requesting the first token. */
5939 if (!(hdr_inst - cmd_set_raw.tokens)) {
5940 cmd_flow_context_init(ctx);
5941 ctx->curr = START_SET;
5943 /* Return NULL when no more tokens are expected. */
5944 if (!ctx->next_num && (ctx->curr != START_SET)) {
5948 /* Determine if command should end here. */
5949 if (ctx->eol && ctx->last && ctx->next_num) {
5950 const enum index *list = ctx->next[ctx->next_num - 1];
5953 for (i = 0; list[i]; ++i) {
5960 *hdr = &cmd_set_raw_token_hdr;
5963 /** Token generator and output processing callback (cmdline API). */
5965 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
5968 cmd_set_raw_tok(arg0, arg2);
5970 cmd_set_raw_parsed(arg0);
5973 /** Global parser instance (cmdline API). */
5974 cmdline_parse_inst_t cmd_set_raw = {
5975 .f = cmd_set_raw_cb,
5976 .data = NULL, /**< Unused. */
5977 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5980 }, /**< Tokens are returned by cmd_flow_tok(). */