1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
49 /* Top-level command. */
51 /* Sub-leve commands. */
55 /* Top-level command. */
57 /* Sub-level commands. */
66 /* Destroy arguments. */
69 /* Query arguments. */
75 /* Validate/create arguments. */
82 /* Validate/create pattern. */
119 ITEM_VLAN_INNER_TYPE,
151 ITEM_E_TAG_GRP_ECID_B,
160 ITEM_GRE_C_RSVD0_VER,
176 ITEM_ARP_ETH_IPV4_SHA,
177 ITEM_ARP_ETH_IPV4_SPA,
178 ITEM_ARP_ETH_IPV4_THA,
179 ITEM_ARP_ETH_IPV4_TPA,
181 ITEM_IPV6_EXT_NEXT_HDR,
186 ITEM_ICMP6_ND_NS_TARGET_ADDR,
188 ITEM_ICMP6_ND_NA_TARGET_ADDR,
190 ITEM_ICMP6_ND_OPT_TYPE,
191 ITEM_ICMP6_ND_OPT_SLA_ETH,
192 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
193 ITEM_ICMP6_ND_OPT_TLA_ETH,
194 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
207 ITEM_HIGIG2_CLASSIFICATION,
210 /* Validate/create actions. */
230 ACTION_RSS_FUNC_DEFAULT,
231 ACTION_RSS_FUNC_TOEPLITZ,
232 ACTION_RSS_FUNC_SIMPLE_XOR,
233 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ,
245 ACTION_PHY_PORT_ORIGINAL,
246 ACTION_PHY_PORT_INDEX,
248 ACTION_PORT_ID_ORIGINAL,
252 ACTION_OF_SET_MPLS_TTL,
253 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
254 ACTION_OF_DEC_MPLS_TTL,
255 ACTION_OF_SET_NW_TTL,
256 ACTION_OF_SET_NW_TTL_NW_TTL,
257 ACTION_OF_DEC_NW_TTL,
258 ACTION_OF_COPY_TTL_OUT,
259 ACTION_OF_COPY_TTL_IN,
262 ACTION_OF_PUSH_VLAN_ETHERTYPE,
263 ACTION_OF_SET_VLAN_VID,
264 ACTION_OF_SET_VLAN_VID_VLAN_VID,
265 ACTION_OF_SET_VLAN_PCP,
266 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
268 ACTION_OF_POP_MPLS_ETHERTYPE,
270 ACTION_OF_PUSH_MPLS_ETHERTYPE,
277 ACTION_MPLSOGRE_ENCAP,
278 ACTION_MPLSOGRE_DECAP,
279 ACTION_MPLSOUDP_ENCAP,
280 ACTION_MPLSOUDP_DECAP,
282 ACTION_SET_IPV4_SRC_IPV4_SRC,
284 ACTION_SET_IPV4_DST_IPV4_DST,
286 ACTION_SET_IPV6_SRC_IPV6_SRC,
288 ACTION_SET_IPV6_DST_IPV6_DST,
290 ACTION_SET_TP_SRC_TP_SRC,
292 ACTION_SET_TP_DST_TP_DST,
298 ACTION_SET_MAC_SRC_MAC_SRC,
300 ACTION_SET_MAC_DST_MAC_DST,
302 ACTION_INC_TCP_SEQ_VALUE,
304 ACTION_DEC_TCP_SEQ_VALUE,
306 ACTION_INC_TCP_ACK_VALUE,
308 ACTION_DEC_TCP_ACK_VALUE,
313 /** Maximum size for pattern in struct rte_flow_item_raw. */
314 #define ITEM_RAW_PATTERN_SIZE 40
316 /** Storage size for struct rte_flow_item_raw including pattern. */
317 #define ITEM_RAW_SIZE \
318 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
320 /** Maximum number of queue indices in struct rte_flow_action_rss. */
321 #define ACTION_RSS_QUEUE_NUM 128
323 /** Storage for struct rte_flow_action_rss including external data. */
324 struct action_rss_data {
325 struct rte_flow_action_rss conf;
326 uint8_t key[RSS_HASH_KEY_LENGTH];
327 uint16_t queue[ACTION_RSS_QUEUE_NUM];
330 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
331 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
333 #define ACTION_RAW_ENCAP_MAX_DATA 128
335 /** Storage for struct rte_flow_action_raw_encap. */
336 struct raw_encap_conf {
337 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
338 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
342 struct raw_encap_conf raw_encap_conf = {.size = 0};
344 /** Storage for struct rte_flow_action_raw_decap. */
345 struct raw_decap_conf {
346 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
350 struct raw_decap_conf raw_decap_conf = {.size = 0};
352 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
353 struct action_vxlan_encap_data {
354 struct rte_flow_action_vxlan_encap conf;
355 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
356 struct rte_flow_item_eth item_eth;
357 struct rte_flow_item_vlan item_vlan;
359 struct rte_flow_item_ipv4 item_ipv4;
360 struct rte_flow_item_ipv6 item_ipv6;
362 struct rte_flow_item_udp item_udp;
363 struct rte_flow_item_vxlan item_vxlan;
366 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
367 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
369 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
370 struct action_nvgre_encap_data {
371 struct rte_flow_action_nvgre_encap conf;
372 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
373 struct rte_flow_item_eth item_eth;
374 struct rte_flow_item_vlan item_vlan;
376 struct rte_flow_item_ipv4 item_ipv4;
377 struct rte_flow_item_ipv6 item_ipv6;
379 struct rte_flow_item_nvgre item_nvgre;
382 /** Maximum data size in struct rte_flow_action_raw_encap. */
383 #define ACTION_RAW_ENCAP_MAX_DATA 128
385 /** Storage for struct rte_flow_action_raw_encap including external data. */
386 struct action_raw_encap_data {
387 struct rte_flow_action_raw_encap conf;
388 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
389 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
392 /** Storage for struct rte_flow_action_raw_decap including external data. */
393 struct action_raw_decap_data {
394 struct rte_flow_action_raw_decap conf;
395 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
398 /** Maximum number of subsequent tokens and arguments on the stack. */
399 #define CTX_STACK_SIZE 16
401 /** Parser context. */
403 /** Stack of subsequent token lists to process. */
404 const enum index *next[CTX_STACK_SIZE];
405 /** Arguments for stacked tokens. */
406 const void *args[CTX_STACK_SIZE];
407 enum index curr; /**< Current token index. */
408 enum index prev; /**< Index of the last token seen. */
409 int next_num; /**< Number of entries in next[]. */
410 int args_num; /**< Number of entries in args[]. */
411 uint32_t eol:1; /**< EOL has been detected. */
412 uint32_t last:1; /**< No more arguments. */
413 portid_t port; /**< Current port ID (for completions). */
414 uint32_t objdata; /**< Object-specific data. */
415 void *object; /**< Address of current object for relative offsets. */
416 void *objmask; /**< Object a full mask must be written to. */
419 /** Token argument. */
421 uint32_t hton:1; /**< Use network byte ordering. */
422 uint32_t sign:1; /**< Value is signed. */
423 uint32_t bounded:1; /**< Value is bounded. */
424 uintmax_t min; /**< Minimum value if bounded. */
425 uintmax_t max; /**< Maximum value if bounded. */
426 uint32_t offset; /**< Relative offset from ctx->object. */
427 uint32_t size; /**< Field size. */
428 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
431 /** Parser token definition. */
433 /** Type displayed during completion (defaults to "TOKEN"). */
435 /** Help displayed during completion (defaults to token name). */
437 /** Private data used by parser functions. */
440 * Lists of subsequent tokens to push on the stack. Each call to the
441 * parser consumes the last entry of that stack.
443 const enum index *const *next;
444 /** Arguments stack for subsequent tokens that need them. */
445 const struct arg *const *args;
447 * Token-processing callback, returns -1 in case of error, the
448 * length of the matched string otherwise. If NULL, attempts to
449 * match the token name.
451 * If buf is not NULL, the result should be stored in it according
452 * to context. An error is returned if not large enough.
454 int (*call)(struct context *ctx, const struct token *token,
455 const char *str, unsigned int len,
456 void *buf, unsigned int size);
458 * Callback that provides possible values for this token, used for
459 * completion. Returns -1 in case of error, the number of possible
460 * values otherwise. If NULL, the token name is used.
462 * If buf is not NULL, entry index ent is written to buf and the
463 * full length of the entry is returned (same behavior as
466 int (*comp)(struct context *ctx, const struct token *token,
467 unsigned int ent, char *buf, unsigned int size);
468 /** Mandatory token name, no default value. */
472 /** Static initializer for the next field. */
473 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
475 /** Static initializer for a NEXT() entry. */
476 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
478 /** Static initializer for the args field. */
479 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
481 /** Static initializer for ARGS() to target a field. */
482 #define ARGS_ENTRY(s, f) \
483 (&(const struct arg){ \
484 .offset = offsetof(s, f), \
485 .size = sizeof(((s *)0)->f), \
488 /** Static initializer for ARGS() to target a bit-field. */
489 #define ARGS_ENTRY_BF(s, f, b) \
490 (&(const struct arg){ \
492 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
495 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
496 #define ARGS_ENTRY_MASK(s, f, m) \
497 (&(const struct arg){ \
498 .offset = offsetof(s, f), \
499 .size = sizeof(((s *)0)->f), \
500 .mask = (const void *)(m), \
503 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
504 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
505 (&(const struct arg){ \
507 .offset = offsetof(s, f), \
508 .size = sizeof(((s *)0)->f), \
509 .mask = (const void *)(m), \
512 /** Static initializer for ARGS() to target a pointer. */
513 #define ARGS_ENTRY_PTR(s, f) \
514 (&(const struct arg){ \
515 .size = sizeof(*((s *)0)->f), \
518 /** Static initializer for ARGS() with arbitrary offset and size. */
519 #define ARGS_ENTRY_ARB(o, s) \
520 (&(const struct arg){ \
525 /** Same as ARGS_ENTRY_ARB() with bounded values. */
526 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
527 (&(const struct arg){ \
535 /** Same as ARGS_ENTRY() using network byte ordering. */
536 #define ARGS_ENTRY_HTON(s, f) \
537 (&(const struct arg){ \
539 .offset = offsetof(s, f), \
540 .size = sizeof(((s *)0)->f), \
543 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
544 #define ARG_ENTRY_HTON(s) \
545 (&(const struct arg){ \
551 /** Parser output buffer layout expected by cmd_flow_parsed(). */
553 enum index command; /**< Flow command. */
554 portid_t port; /**< Affected port ID. */
557 struct rte_flow_attr attr;
558 struct rte_flow_item *pattern;
559 struct rte_flow_action *actions;
563 } vc; /**< Validate/create arguments. */
567 } destroy; /**< Destroy arguments. */
570 struct rte_flow_action action;
571 } query; /**< Query arguments. */
575 } list; /**< List arguments. */
578 } isolate; /**< Isolated mode arguments. */
579 } args; /**< Command arguments. */
582 /** Private data for pattern items. */
583 struct parse_item_priv {
584 enum rte_flow_item_type type; /**< Item type. */
585 uint32_t size; /**< Size of item specification structure. */
588 #define PRIV_ITEM(t, s) \
589 (&(const struct parse_item_priv){ \
590 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
594 /** Private data for actions. */
595 struct parse_action_priv {
596 enum rte_flow_action_type type; /**< Action type. */
597 uint32_t size; /**< Size of action configuration structure. */
600 #define PRIV_ACTION(t, s) \
601 (&(const struct parse_action_priv){ \
602 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
606 static const enum index next_vc_attr[] = {
616 static const enum index next_destroy_attr[] = {
622 static const enum index next_list_attr[] = {
628 static const enum index item_param[] = {
637 static const enum index next_item[] = {
673 ITEM_ICMP6_ND_OPT_SLA_ETH,
674 ITEM_ICMP6_ND_OPT_TLA_ETH,
686 static const enum index item_fuzzy[] = {
692 static const enum index item_any[] = {
698 static const enum index item_vf[] = {
704 static const enum index item_phy_port[] = {
710 static const enum index item_port_id[] = {
716 static const enum index item_mark[] = {
722 static const enum index item_raw[] = {
732 static const enum index item_eth[] = {
740 static const enum index item_vlan[] = {
745 ITEM_VLAN_INNER_TYPE,
750 static const enum index item_ipv4[] = {
760 static const enum index item_ipv6[] = {
771 static const enum index item_icmp[] = {
778 static const enum index item_udp[] = {
785 static const enum index item_tcp[] = {
793 static const enum index item_sctp[] = {
802 static const enum index item_vxlan[] = {
808 static const enum index item_e_tag[] = {
809 ITEM_E_TAG_GRP_ECID_B,
814 static const enum index item_nvgre[] = {
820 static const enum index item_mpls[] = {
828 static const enum index item_gre[] = {
830 ITEM_GRE_C_RSVD0_VER,
838 static const enum index item_gre_key[] = {
844 static const enum index item_gtp[] = {
850 static const enum index item_geneve[] = {
857 static const enum index item_vxlan_gpe[] = {
863 static const enum index item_arp_eth_ipv4[] = {
864 ITEM_ARP_ETH_IPV4_SHA,
865 ITEM_ARP_ETH_IPV4_SPA,
866 ITEM_ARP_ETH_IPV4_THA,
867 ITEM_ARP_ETH_IPV4_TPA,
872 static const enum index item_ipv6_ext[] = {
873 ITEM_IPV6_EXT_NEXT_HDR,
878 static const enum index item_icmp6[] = {
885 static const enum index item_icmp6_nd_ns[] = {
886 ITEM_ICMP6_ND_NS_TARGET_ADDR,
891 static const enum index item_icmp6_nd_na[] = {
892 ITEM_ICMP6_ND_NA_TARGET_ADDR,
897 static const enum index item_icmp6_nd_opt[] = {
898 ITEM_ICMP6_ND_OPT_TYPE,
903 static const enum index item_icmp6_nd_opt_sla_eth[] = {
904 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
909 static const enum index item_icmp6_nd_opt_tla_eth[] = {
910 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
915 static const enum index item_meta[] = {
921 static const enum index item_gtp_psc[] = {
928 static const enum index item_pppoed[] = {
934 static const enum index item_pppoes[] = {
940 static const enum index item_pppoe_proto_id[] = {
946 static const enum index item_higig2[] = {
947 ITEM_HIGIG2_CLASSIFICATION,
953 static const enum index next_action[] = {
969 ACTION_OF_SET_MPLS_TTL,
970 ACTION_OF_DEC_MPLS_TTL,
971 ACTION_OF_SET_NW_TTL,
972 ACTION_OF_DEC_NW_TTL,
973 ACTION_OF_COPY_TTL_OUT,
974 ACTION_OF_COPY_TTL_IN,
977 ACTION_OF_SET_VLAN_VID,
978 ACTION_OF_SET_VLAN_PCP,
987 ACTION_MPLSOGRE_ENCAP,
988 ACTION_MPLSOGRE_DECAP,
989 ACTION_MPLSOUDP_ENCAP,
990 ACTION_MPLSOUDP_DECAP,
1011 static const enum index action_mark[] = {
1017 static const enum index action_queue[] = {
1023 static const enum index action_count[] = {
1025 ACTION_COUNT_SHARED,
1030 static const enum index action_rss[] = {
1041 static const enum index action_vf[] = {
1048 static const enum index action_phy_port[] = {
1049 ACTION_PHY_PORT_ORIGINAL,
1050 ACTION_PHY_PORT_INDEX,
1055 static const enum index action_port_id[] = {
1056 ACTION_PORT_ID_ORIGINAL,
1062 static const enum index action_meter[] = {
1068 static const enum index action_of_set_mpls_ttl[] = {
1069 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1074 static const enum index action_of_set_nw_ttl[] = {
1075 ACTION_OF_SET_NW_TTL_NW_TTL,
1080 static const enum index action_of_push_vlan[] = {
1081 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1086 static const enum index action_of_set_vlan_vid[] = {
1087 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1092 static const enum index action_of_set_vlan_pcp[] = {
1093 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1098 static const enum index action_of_pop_mpls[] = {
1099 ACTION_OF_POP_MPLS_ETHERTYPE,
1104 static const enum index action_of_push_mpls[] = {
1105 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1110 static const enum index action_set_ipv4_src[] = {
1111 ACTION_SET_IPV4_SRC_IPV4_SRC,
1116 static const enum index action_set_mac_src[] = {
1117 ACTION_SET_MAC_SRC_MAC_SRC,
1122 static const enum index action_set_ipv4_dst[] = {
1123 ACTION_SET_IPV4_DST_IPV4_DST,
1128 static const enum index action_set_ipv6_src[] = {
1129 ACTION_SET_IPV6_SRC_IPV6_SRC,
1134 static const enum index action_set_ipv6_dst[] = {
1135 ACTION_SET_IPV6_DST_IPV6_DST,
1140 static const enum index action_set_tp_src[] = {
1141 ACTION_SET_TP_SRC_TP_SRC,
1146 static const enum index action_set_tp_dst[] = {
1147 ACTION_SET_TP_DST_TP_DST,
1152 static const enum index action_set_ttl[] = {
1158 static const enum index action_jump[] = {
1164 static const enum index action_set_mac_dst[] = {
1165 ACTION_SET_MAC_DST_MAC_DST,
1170 static const enum index action_inc_tcp_seq[] = {
1171 ACTION_INC_TCP_SEQ_VALUE,
1176 static const enum index action_dec_tcp_seq[] = {
1177 ACTION_DEC_TCP_SEQ_VALUE,
1182 static const enum index action_inc_tcp_ack[] = {
1183 ACTION_INC_TCP_ACK_VALUE,
1188 static const enum index action_dec_tcp_ack[] = {
1189 ACTION_DEC_TCP_ACK_VALUE,
1194 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1195 const char *, unsigned int,
1196 void *, unsigned int);
1197 static int parse_set_init(struct context *, const struct token *,
1198 const char *, unsigned int,
1199 void *, unsigned int);
1200 static int parse_init(struct context *, const struct token *,
1201 const char *, unsigned int,
1202 void *, unsigned int);
1203 static int parse_vc(struct context *, const struct token *,
1204 const char *, unsigned int,
1205 void *, unsigned int);
1206 static int parse_vc_spec(struct context *, const struct token *,
1207 const char *, unsigned int, void *, unsigned int);
1208 static int parse_vc_conf(struct context *, const struct token *,
1209 const char *, unsigned int, void *, unsigned int);
1210 static int parse_vc_action_rss(struct context *, const struct token *,
1211 const char *, unsigned int, void *,
1213 static int parse_vc_action_rss_func(struct context *, const struct token *,
1214 const char *, unsigned int, void *,
1216 static int parse_vc_action_rss_type(struct context *, const struct token *,
1217 const char *, unsigned int, void *,
1219 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1220 const char *, unsigned int, void *,
1222 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1223 const char *, unsigned int, void *,
1225 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1226 const char *, unsigned int, void *,
1228 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1229 const char *, unsigned int, void *,
1231 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1232 const char *, unsigned int, void *,
1234 static int parse_vc_action_mplsogre_encap(struct context *,
1235 const struct token *, const char *,
1236 unsigned int, void *, unsigned int);
1237 static int parse_vc_action_mplsogre_decap(struct context *,
1238 const struct token *, const char *,
1239 unsigned int, void *, unsigned int);
1240 static int parse_vc_action_mplsoudp_encap(struct context *,
1241 const struct token *, const char *,
1242 unsigned int, void *, unsigned int);
1243 static int parse_vc_action_mplsoudp_decap(struct context *,
1244 const struct token *, const char *,
1245 unsigned int, void *, unsigned int);
1246 static int parse_vc_action_raw_encap(struct context *,
1247 const struct token *, const char *,
1248 unsigned int, void *, unsigned int);
1249 static int parse_vc_action_raw_decap(struct context *,
1250 const struct token *, const char *,
1251 unsigned int, void *, unsigned int);
1252 static int parse_destroy(struct context *, const struct token *,
1253 const char *, unsigned int,
1254 void *, unsigned int);
1255 static int parse_flush(struct context *, const struct token *,
1256 const char *, unsigned int,
1257 void *, unsigned int);
1258 static int parse_query(struct context *, const struct token *,
1259 const char *, unsigned int,
1260 void *, unsigned int);
1261 static int parse_action(struct context *, const struct token *,
1262 const char *, unsigned int,
1263 void *, unsigned int);
1264 static int parse_list(struct context *, const struct token *,
1265 const char *, unsigned int,
1266 void *, unsigned int);
1267 static int parse_isolate(struct context *, const struct token *,
1268 const char *, unsigned int,
1269 void *, unsigned int);
1270 static int parse_int(struct context *, const struct token *,
1271 const char *, unsigned int,
1272 void *, unsigned int);
1273 static int parse_prefix(struct context *, const struct token *,
1274 const char *, unsigned int,
1275 void *, unsigned int);
1276 static int parse_boolean(struct context *, const struct token *,
1277 const char *, unsigned int,
1278 void *, unsigned int);
1279 static int parse_string(struct context *, const struct token *,
1280 const char *, unsigned int,
1281 void *, unsigned int);
1282 static int parse_hex(struct context *ctx, const struct token *token,
1283 const char *str, unsigned int len,
1284 void *buf, unsigned int size);
1285 static int parse_mac_addr(struct context *, const struct token *,
1286 const char *, unsigned int,
1287 void *, unsigned int);
1288 static int parse_ipv4_addr(struct context *, const struct token *,
1289 const char *, unsigned int,
1290 void *, unsigned int);
1291 static int parse_ipv6_addr(struct context *, const struct token *,
1292 const char *, unsigned int,
1293 void *, unsigned int);
1294 static int parse_port(struct context *, const struct token *,
1295 const char *, unsigned int,
1296 void *, unsigned int);
1297 static int comp_none(struct context *, const struct token *,
1298 unsigned int, char *, unsigned int);
1299 static int comp_boolean(struct context *, const struct token *,
1300 unsigned int, char *, unsigned int);
1301 static int comp_action(struct context *, const struct token *,
1302 unsigned int, char *, unsigned int);
1303 static int comp_port(struct context *, const struct token *,
1304 unsigned int, char *, unsigned int);
1305 static int comp_rule_id(struct context *, const struct token *,
1306 unsigned int, char *, unsigned int);
1307 static int comp_vc_action_rss_type(struct context *, const struct token *,
1308 unsigned int, char *, unsigned int);
1309 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1310 unsigned int, char *, unsigned int);
1312 /** Token definitions. */
1313 static const struct token token_list[] = {
1314 /* Special tokens. */
1317 .help = "null entry, abused as the entry point",
1318 .next = NEXT(NEXT_ENTRY(FLOW)),
1323 .help = "command may end here",
1326 .name = "START_SET",
1327 .help = "null entry, abused as the entry point for set",
1328 .next = NEXT(NEXT_ENTRY(SET)),
1333 .help = "set command may end here",
1335 /* Common tokens. */
1339 .help = "integer value",
1344 .name = "{unsigned}",
1346 .help = "unsigned integer value",
1353 .help = "prefix length for bit-mask",
1354 .call = parse_prefix,
1358 .name = "{boolean}",
1360 .help = "any boolean value",
1361 .call = parse_boolean,
1362 .comp = comp_boolean,
1367 .help = "fixed string",
1368 .call = parse_string,
1374 .help = "fixed string",
1379 .name = "{MAC address}",
1381 .help = "standard MAC address notation",
1382 .call = parse_mac_addr,
1386 .name = "{IPv4 address}",
1387 .type = "IPV4 ADDRESS",
1388 .help = "standard IPv4 address notation",
1389 .call = parse_ipv4_addr,
1393 .name = "{IPv6 address}",
1394 .type = "IPV6 ADDRESS",
1395 .help = "standard IPv6 address notation",
1396 .call = parse_ipv6_addr,
1400 .name = "{rule id}",
1402 .help = "rule identifier",
1404 .comp = comp_rule_id,
1407 .name = "{port_id}",
1409 .help = "port identifier",
1414 .name = "{group_id}",
1416 .help = "group identifier",
1420 [PRIORITY_LEVEL] = {
1423 .help = "priority level",
1427 /* Top-level command. */
1430 .type = "{command} {port_id} [{arg} [...]]",
1431 .help = "manage ingress/egress flow rules",
1432 .next = NEXT(NEXT_ENTRY
1442 /* Sub-level commands. */
1445 .help = "check whether a flow rule can be created",
1446 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1447 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1452 .help = "create a flow rule",
1453 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1454 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1459 .help = "destroy specific flow rules",
1460 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1461 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1462 .call = parse_destroy,
1466 .help = "destroy all flow rules",
1467 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1468 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1469 .call = parse_flush,
1473 .help = "query an existing flow rule",
1474 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1475 NEXT_ENTRY(RULE_ID),
1476 NEXT_ENTRY(PORT_ID)),
1477 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1478 ARGS_ENTRY(struct buffer, args.query.rule),
1479 ARGS_ENTRY(struct buffer, port)),
1480 .call = parse_query,
1484 .help = "list existing flow rules",
1485 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1486 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1491 .help = "restrict ingress traffic to the defined flow rules",
1492 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1493 NEXT_ENTRY(PORT_ID)),
1494 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1495 ARGS_ENTRY(struct buffer, port)),
1496 .call = parse_isolate,
1498 /* Destroy arguments. */
1501 .help = "specify a rule identifier",
1502 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1503 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1504 .call = parse_destroy,
1506 /* Query arguments. */
1510 .help = "action to query, must be part of the rule",
1511 .call = parse_action,
1512 .comp = comp_action,
1514 /* List arguments. */
1517 .help = "specify a group",
1518 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1519 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1522 /* Validate/create attributes. */
1525 .help = "specify a group",
1526 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1527 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1532 .help = "specify a priority level",
1533 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1534 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1539 .help = "affect rule to ingress",
1540 .next = NEXT(next_vc_attr),
1545 .help = "affect rule to egress",
1546 .next = NEXT(next_vc_attr),
1551 .help = "apply rule directly to endpoints found in pattern",
1552 .next = NEXT(next_vc_attr),
1555 /* Validate/create pattern. */
1558 .help = "submit a list of pattern items",
1559 .next = NEXT(next_item),
1564 .help = "match value perfectly (with full bit-mask)",
1565 .call = parse_vc_spec,
1567 [ITEM_PARAM_SPEC] = {
1569 .help = "match value according to configured bit-mask",
1570 .call = parse_vc_spec,
1572 [ITEM_PARAM_LAST] = {
1574 .help = "specify upper bound to establish a range",
1575 .call = parse_vc_spec,
1577 [ITEM_PARAM_MASK] = {
1579 .help = "specify bit-mask with relevant bits set to one",
1580 .call = parse_vc_spec,
1582 [ITEM_PARAM_PREFIX] = {
1584 .help = "generate bit-mask from a prefix length",
1585 .call = parse_vc_spec,
1589 .help = "specify next pattern item",
1590 .next = NEXT(next_item),
1594 .help = "end list of pattern items",
1595 .priv = PRIV_ITEM(END, 0),
1596 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1601 .help = "no-op pattern item",
1602 .priv = PRIV_ITEM(VOID, 0),
1603 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1608 .help = "perform actions when pattern does not match",
1609 .priv = PRIV_ITEM(INVERT, 0),
1610 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1615 .help = "match any protocol for the current layer",
1616 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1617 .next = NEXT(item_any),
1622 .help = "number of layers covered",
1623 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1624 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1628 .help = "match traffic from/to the physical function",
1629 .priv = PRIV_ITEM(PF, 0),
1630 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1635 .help = "match traffic from/to a virtual function ID",
1636 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1637 .next = NEXT(item_vf),
1643 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1644 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1648 .help = "match traffic from/to a specific physical port",
1649 .priv = PRIV_ITEM(PHY_PORT,
1650 sizeof(struct rte_flow_item_phy_port)),
1651 .next = NEXT(item_phy_port),
1654 [ITEM_PHY_PORT_INDEX] = {
1656 .help = "physical port index",
1657 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1658 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1662 .help = "match traffic from/to a given DPDK port ID",
1663 .priv = PRIV_ITEM(PORT_ID,
1664 sizeof(struct rte_flow_item_port_id)),
1665 .next = NEXT(item_port_id),
1668 [ITEM_PORT_ID_ID] = {
1670 .help = "DPDK port ID",
1671 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1672 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1676 .help = "match traffic against value set in previously matched rule",
1677 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1678 .next = NEXT(item_mark),
1683 .help = "Integer value to match against",
1684 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1685 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1689 .help = "match an arbitrary byte string",
1690 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1691 .next = NEXT(item_raw),
1694 [ITEM_RAW_RELATIVE] = {
1696 .help = "look for pattern after the previous item",
1697 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1698 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1701 [ITEM_RAW_SEARCH] = {
1703 .help = "search pattern from offset (see also limit)",
1704 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1705 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1708 [ITEM_RAW_OFFSET] = {
1710 .help = "absolute or relative offset for pattern",
1711 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1712 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1714 [ITEM_RAW_LIMIT] = {
1716 .help = "search area limit for start of pattern",
1717 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1718 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1720 [ITEM_RAW_PATTERN] = {
1722 .help = "byte string to look for",
1723 .next = NEXT(item_raw,
1725 NEXT_ENTRY(ITEM_PARAM_IS,
1728 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1729 ARGS_ENTRY(struct rte_flow_item_raw, length),
1730 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1731 ITEM_RAW_PATTERN_SIZE)),
1735 .help = "match Ethernet header",
1736 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1737 .next = NEXT(item_eth),
1742 .help = "destination MAC",
1743 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1744 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1748 .help = "source MAC",
1749 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1750 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1754 .help = "EtherType",
1755 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1756 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1760 .help = "match 802.1Q/ad VLAN tag",
1761 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1762 .next = NEXT(item_vlan),
1767 .help = "tag control information",
1768 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1769 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1773 .help = "priority code point",
1774 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1775 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1780 .help = "drop eligible indicator",
1781 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1782 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1787 .help = "VLAN identifier",
1788 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1789 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1792 [ITEM_VLAN_INNER_TYPE] = {
1793 .name = "inner_type",
1794 .help = "inner EtherType",
1795 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1796 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1801 .help = "match IPv4 header",
1802 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1803 .next = NEXT(item_ipv4),
1808 .help = "type of service",
1809 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1810 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1811 hdr.type_of_service)),
1815 .help = "time to live",
1816 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1817 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1820 [ITEM_IPV4_PROTO] = {
1822 .help = "next protocol ID",
1823 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1824 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1825 hdr.next_proto_id)),
1829 .help = "source address",
1830 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1831 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1836 .help = "destination address",
1837 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1838 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1843 .help = "match IPv6 header",
1844 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1845 .next = NEXT(item_ipv6),
1850 .help = "traffic class",
1851 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1852 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1854 "\x0f\xf0\x00\x00")),
1856 [ITEM_IPV6_FLOW] = {
1858 .help = "flow label",
1859 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1860 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1862 "\x00\x0f\xff\xff")),
1864 [ITEM_IPV6_PROTO] = {
1866 .help = "protocol (next header)",
1867 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1868 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1873 .help = "hop limit",
1874 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1875 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1880 .help = "source address",
1881 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1882 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1887 .help = "destination address",
1888 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1889 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1894 .help = "match ICMP header",
1895 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1896 .next = NEXT(item_icmp),
1899 [ITEM_ICMP_TYPE] = {
1901 .help = "ICMP packet type",
1902 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1903 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1906 [ITEM_ICMP_CODE] = {
1908 .help = "ICMP packet code",
1909 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1910 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1915 .help = "match UDP header",
1916 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1917 .next = NEXT(item_udp),
1922 .help = "UDP source port",
1923 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1924 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1929 .help = "UDP destination port",
1930 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1931 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1936 .help = "match TCP header",
1937 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1938 .next = NEXT(item_tcp),
1943 .help = "TCP source port",
1944 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1945 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1950 .help = "TCP destination port",
1951 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1952 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1955 [ITEM_TCP_FLAGS] = {
1957 .help = "TCP flags",
1958 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1959 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1964 .help = "match SCTP header",
1965 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1966 .next = NEXT(item_sctp),
1971 .help = "SCTP source port",
1972 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1973 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1978 .help = "SCTP destination port",
1979 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1980 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1985 .help = "validation tag",
1986 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1987 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1990 [ITEM_SCTP_CKSUM] = {
1993 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1994 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1999 .help = "match VXLAN header",
2000 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
2001 .next = NEXT(item_vxlan),
2004 [ITEM_VXLAN_VNI] = {
2006 .help = "VXLAN identifier",
2007 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
2008 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2012 .help = "match E-Tag header",
2013 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2014 .next = NEXT(item_e_tag),
2017 [ITEM_E_TAG_GRP_ECID_B] = {
2018 .name = "grp_ecid_b",
2019 .help = "GRP and E-CID base",
2020 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2021 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2027 .help = "match NVGRE header",
2028 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2029 .next = NEXT(item_nvgre),
2032 [ITEM_NVGRE_TNI] = {
2034 .help = "virtual subnet ID",
2035 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2036 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2040 .help = "match MPLS header",
2041 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2042 .next = NEXT(item_mpls),
2045 [ITEM_MPLS_LABEL] = {
2047 .help = "MPLS label",
2048 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2049 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2055 .help = "MPLS Traffic Class",
2056 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2057 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2063 .help = "MPLS Bottom-of-Stack",
2064 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2065 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2071 .help = "match GRE header",
2072 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2073 .next = NEXT(item_gre),
2076 [ITEM_GRE_PROTO] = {
2078 .help = "GRE protocol type",
2079 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2080 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2083 [ITEM_GRE_C_RSVD0_VER] = {
2084 .name = "c_rsvd0_ver",
2086 "checksum (1b), undefined (1b), key bit (1b),"
2087 " sequence number (1b), reserved 0 (9b),"
2089 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2090 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2093 [ITEM_GRE_C_BIT] = {
2095 .help = "checksum bit (C)",
2096 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2097 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2099 "\x80\x00\x00\x00")),
2101 [ITEM_GRE_S_BIT] = {
2103 .help = "sequence number bit (S)",
2104 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2105 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2107 "\x10\x00\x00\x00")),
2109 [ITEM_GRE_K_BIT] = {
2111 .help = "key bit (K)",
2112 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2113 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2115 "\x20\x00\x00\x00")),
2119 .help = "fuzzy pattern match, expect faster than default",
2120 .priv = PRIV_ITEM(FUZZY,
2121 sizeof(struct rte_flow_item_fuzzy)),
2122 .next = NEXT(item_fuzzy),
2125 [ITEM_FUZZY_THRESH] = {
2127 .help = "match accuracy threshold",
2128 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2129 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2134 .help = "match GTP header",
2135 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2136 .next = NEXT(item_gtp),
2141 .help = "tunnel endpoint identifier",
2142 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2143 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2147 .help = "match GTP header",
2148 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2149 .next = NEXT(item_gtp),
2154 .help = "match GTP header",
2155 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2156 .next = NEXT(item_gtp),
2161 .help = "match GENEVE header",
2162 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2163 .next = NEXT(item_geneve),
2166 [ITEM_GENEVE_VNI] = {
2168 .help = "virtual network identifier",
2169 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2170 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2172 [ITEM_GENEVE_PROTO] = {
2174 .help = "GENEVE protocol type",
2175 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2176 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2179 [ITEM_VXLAN_GPE] = {
2180 .name = "vxlan-gpe",
2181 .help = "match VXLAN-GPE header",
2182 .priv = PRIV_ITEM(VXLAN_GPE,
2183 sizeof(struct rte_flow_item_vxlan_gpe)),
2184 .next = NEXT(item_vxlan_gpe),
2187 [ITEM_VXLAN_GPE_VNI] = {
2189 .help = "VXLAN-GPE identifier",
2190 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2191 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2194 [ITEM_ARP_ETH_IPV4] = {
2195 .name = "arp_eth_ipv4",
2196 .help = "match ARP header for Ethernet/IPv4",
2197 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2198 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2199 .next = NEXT(item_arp_eth_ipv4),
2202 [ITEM_ARP_ETH_IPV4_SHA] = {
2204 .help = "sender hardware address",
2205 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2207 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2210 [ITEM_ARP_ETH_IPV4_SPA] = {
2212 .help = "sender IPv4 address",
2213 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2215 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2218 [ITEM_ARP_ETH_IPV4_THA] = {
2220 .help = "target hardware address",
2221 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2223 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2226 [ITEM_ARP_ETH_IPV4_TPA] = {
2228 .help = "target IPv4 address",
2229 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2231 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2236 .help = "match presence of any IPv6 extension header",
2237 .priv = PRIV_ITEM(IPV6_EXT,
2238 sizeof(struct rte_flow_item_ipv6_ext)),
2239 .next = NEXT(item_ipv6_ext),
2242 [ITEM_IPV6_EXT_NEXT_HDR] = {
2244 .help = "next header",
2245 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2246 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2251 .help = "match any ICMPv6 header",
2252 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2253 .next = NEXT(item_icmp6),
2256 [ITEM_ICMP6_TYPE] = {
2258 .help = "ICMPv6 type",
2259 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2260 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2263 [ITEM_ICMP6_CODE] = {
2265 .help = "ICMPv6 code",
2266 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2267 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2270 [ITEM_ICMP6_ND_NS] = {
2271 .name = "icmp6_nd_ns",
2272 .help = "match ICMPv6 neighbor discovery solicitation",
2273 .priv = PRIV_ITEM(ICMP6_ND_NS,
2274 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2275 .next = NEXT(item_icmp6_nd_ns),
2278 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2279 .name = "target_addr",
2280 .help = "target address",
2281 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2283 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2286 [ITEM_ICMP6_ND_NA] = {
2287 .name = "icmp6_nd_na",
2288 .help = "match ICMPv6 neighbor discovery advertisement",
2289 .priv = PRIV_ITEM(ICMP6_ND_NA,
2290 sizeof(struct rte_flow_item_icmp6_nd_na)),
2291 .next = NEXT(item_icmp6_nd_na),
2294 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2295 .name = "target_addr",
2296 .help = "target address",
2297 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2299 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2302 [ITEM_ICMP6_ND_OPT] = {
2303 .name = "icmp6_nd_opt",
2304 .help = "match presence of any ICMPv6 neighbor discovery"
2306 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2307 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2308 .next = NEXT(item_icmp6_nd_opt),
2311 [ITEM_ICMP6_ND_OPT_TYPE] = {
2313 .help = "ND option type",
2314 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2316 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2319 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2320 .name = "icmp6_nd_opt_sla_eth",
2321 .help = "match ICMPv6 neighbor discovery source Ethernet"
2322 " link-layer address option",
2324 (ICMP6_ND_OPT_SLA_ETH,
2325 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2326 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2329 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2331 .help = "source Ethernet LLA",
2332 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2334 .args = ARGS(ARGS_ENTRY_HTON
2335 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2337 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2338 .name = "icmp6_nd_opt_tla_eth",
2339 .help = "match ICMPv6 neighbor discovery target Ethernet"
2340 " link-layer address option",
2342 (ICMP6_ND_OPT_TLA_ETH,
2343 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2344 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2347 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2349 .help = "target Ethernet LLA",
2350 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2352 .args = ARGS(ARGS_ENTRY_HTON
2353 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2357 .help = "match metadata header",
2358 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2359 .next = NEXT(item_meta),
2362 [ITEM_META_DATA] = {
2364 .help = "metadata value",
2365 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2366 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2367 data, "\xff\xff\xff\xff")),
2371 .help = "match GRE key",
2372 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2373 .next = NEXT(item_gre_key),
2376 [ITEM_GRE_KEY_VALUE] = {
2378 .help = "key value",
2379 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2380 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2384 .help = "match GTP extension header with type 0x85",
2385 .priv = PRIV_ITEM(GTP_PSC,
2386 sizeof(struct rte_flow_item_gtp_psc)),
2387 .next = NEXT(item_gtp_psc),
2390 [ITEM_GTP_PSC_QFI] = {
2392 .help = "QoS flow identifier",
2393 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2394 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2397 [ITEM_GTP_PSC_PDU_T] = {
2400 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2401 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2406 .help = "match PPPoE session header",
2407 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2408 .next = NEXT(item_pppoes),
2413 .help = "match PPPoE discovery header",
2414 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2415 .next = NEXT(item_pppoed),
2418 [ITEM_PPPOE_SEID] = {
2420 .help = "session identifier",
2421 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2422 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2425 [ITEM_PPPOE_PROTO_ID] = {
2427 .help = "match PPPoE session protocol identifier",
2428 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2429 sizeof(struct rte_flow_item_pppoe_proto_id)),
2430 .next = NEXT(item_pppoe_proto_id),
2435 .help = "matches higig2 header",
2436 .priv = PRIV_ITEM(HIGIG2,
2437 sizeof(struct rte_flow_item_higig2_hdr)),
2438 .next = NEXT(item_higig2),
2441 [ITEM_HIGIG2_CLASSIFICATION] = {
2442 .name = "classification",
2443 .help = "matches classification of higig2 header",
2444 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2445 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2446 hdr.ppt1.classification)),
2448 [ITEM_HIGIG2_VID] = {
2450 .help = "matches vid of higig2 header",
2451 .next = NEXT(item_higig2, NEXT_ENTRY(UNSIGNED), item_param),
2452 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_higig2_hdr,
2455 /* Validate/create actions. */
2458 .help = "submit a list of associated actions",
2459 .next = NEXT(next_action),
2464 .help = "specify next action",
2465 .next = NEXT(next_action),
2469 .help = "end list of actions",
2470 .priv = PRIV_ACTION(END, 0),
2475 .help = "no-op action",
2476 .priv = PRIV_ACTION(VOID, 0),
2477 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2480 [ACTION_PASSTHRU] = {
2482 .help = "let subsequent rule process matched packets",
2483 .priv = PRIV_ACTION(PASSTHRU, 0),
2484 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2489 .help = "redirect traffic to a given group",
2490 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2491 .next = NEXT(action_jump),
2494 [ACTION_JUMP_GROUP] = {
2496 .help = "group to redirect traffic to",
2497 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2498 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2499 .call = parse_vc_conf,
2503 .help = "attach 32 bit value to packets",
2504 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2505 .next = NEXT(action_mark),
2508 [ACTION_MARK_ID] = {
2510 .help = "32 bit value to return with packets",
2511 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2512 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2513 .call = parse_vc_conf,
2517 .help = "flag packets",
2518 .priv = PRIV_ACTION(FLAG, 0),
2519 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2524 .help = "assign packets to a given queue index",
2525 .priv = PRIV_ACTION(QUEUE,
2526 sizeof(struct rte_flow_action_queue)),
2527 .next = NEXT(action_queue),
2530 [ACTION_QUEUE_INDEX] = {
2532 .help = "queue index to use",
2533 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2534 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2535 .call = parse_vc_conf,
2539 .help = "drop packets (note: passthru has priority)",
2540 .priv = PRIV_ACTION(DROP, 0),
2541 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2546 .help = "enable counters for this rule",
2547 .priv = PRIV_ACTION(COUNT,
2548 sizeof(struct rte_flow_action_count)),
2549 .next = NEXT(action_count),
2552 [ACTION_COUNT_ID] = {
2553 .name = "identifier",
2554 .help = "counter identifier to use",
2555 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2556 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2557 .call = parse_vc_conf,
2559 [ACTION_COUNT_SHARED] = {
2561 .help = "shared counter",
2562 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2563 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2565 .call = parse_vc_conf,
2569 .help = "spread packets among several queues",
2570 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2571 .next = NEXT(action_rss),
2572 .call = parse_vc_action_rss,
2574 [ACTION_RSS_FUNC] = {
2576 .help = "RSS hash function to apply",
2577 .next = NEXT(action_rss,
2578 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2579 ACTION_RSS_FUNC_TOEPLITZ,
2580 ACTION_RSS_FUNC_SIMPLE_XOR,
2581 ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ)),
2583 [ACTION_RSS_FUNC_DEFAULT] = {
2585 .help = "default hash function",
2586 .call = parse_vc_action_rss_func,
2588 [ACTION_RSS_FUNC_TOEPLITZ] = {
2590 .help = "Toeplitz hash function",
2591 .call = parse_vc_action_rss_func,
2593 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2594 .name = "simple_xor",
2595 .help = "simple XOR hash function",
2596 .call = parse_vc_action_rss_func,
2598 [ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ] = {
2599 .name = "symmetric_toeplitz",
2600 .help = "Symmetric Toeplitz hash function",
2601 .call = parse_vc_action_rss_func,
2603 [ACTION_RSS_LEVEL] = {
2605 .help = "encapsulation level for \"types\"",
2606 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2607 .args = ARGS(ARGS_ENTRY_ARB
2608 (offsetof(struct action_rss_data, conf) +
2609 offsetof(struct rte_flow_action_rss, level),
2610 sizeof(((struct rte_flow_action_rss *)0)->
2613 [ACTION_RSS_TYPES] = {
2615 .help = "specific RSS hash types",
2616 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2618 [ACTION_RSS_TYPE] = {
2620 .help = "RSS hash type",
2621 .call = parse_vc_action_rss_type,
2622 .comp = comp_vc_action_rss_type,
2624 [ACTION_RSS_KEY] = {
2626 .help = "RSS hash key",
2627 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2628 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2630 (offsetof(struct action_rss_data, conf) +
2631 offsetof(struct rte_flow_action_rss, key_len),
2632 sizeof(((struct rte_flow_action_rss *)0)->
2634 ARGS_ENTRY(struct action_rss_data, key)),
2636 [ACTION_RSS_KEY_LEN] = {
2638 .help = "RSS hash key length in bytes",
2639 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2640 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2641 (offsetof(struct action_rss_data, conf) +
2642 offsetof(struct rte_flow_action_rss, key_len),
2643 sizeof(((struct rte_flow_action_rss *)0)->
2646 RSS_HASH_KEY_LENGTH)),
2648 [ACTION_RSS_QUEUES] = {
2650 .help = "queue indices to use",
2651 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2652 .call = parse_vc_conf,
2654 [ACTION_RSS_QUEUE] = {
2656 .help = "queue index",
2657 .call = parse_vc_action_rss_queue,
2658 .comp = comp_vc_action_rss_queue,
2662 .help = "direct traffic to physical function",
2663 .priv = PRIV_ACTION(PF, 0),
2664 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2669 .help = "direct traffic to a virtual function ID",
2670 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2671 .next = NEXT(action_vf),
2674 [ACTION_VF_ORIGINAL] = {
2676 .help = "use original VF ID if possible",
2677 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2678 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2680 .call = parse_vc_conf,
2685 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2686 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2687 .call = parse_vc_conf,
2689 [ACTION_PHY_PORT] = {
2691 .help = "direct packets to physical port index",
2692 .priv = PRIV_ACTION(PHY_PORT,
2693 sizeof(struct rte_flow_action_phy_port)),
2694 .next = NEXT(action_phy_port),
2697 [ACTION_PHY_PORT_ORIGINAL] = {
2699 .help = "use original port index if possible",
2700 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2701 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2703 .call = parse_vc_conf,
2705 [ACTION_PHY_PORT_INDEX] = {
2707 .help = "physical port index",
2708 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2709 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2711 .call = parse_vc_conf,
2713 [ACTION_PORT_ID] = {
2715 .help = "direct matching traffic to a given DPDK port ID",
2716 .priv = PRIV_ACTION(PORT_ID,
2717 sizeof(struct rte_flow_action_port_id)),
2718 .next = NEXT(action_port_id),
2721 [ACTION_PORT_ID_ORIGINAL] = {
2723 .help = "use original DPDK port ID if possible",
2724 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2725 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2727 .call = parse_vc_conf,
2729 [ACTION_PORT_ID_ID] = {
2731 .help = "DPDK port ID",
2732 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2733 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2734 .call = parse_vc_conf,
2738 .help = "meter the directed packets at given id",
2739 .priv = PRIV_ACTION(METER,
2740 sizeof(struct rte_flow_action_meter)),
2741 .next = NEXT(action_meter),
2744 [ACTION_METER_ID] = {
2746 .help = "meter id to use",
2747 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2748 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2749 .call = parse_vc_conf,
2751 [ACTION_OF_SET_MPLS_TTL] = {
2752 .name = "of_set_mpls_ttl",
2753 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2756 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2757 .next = NEXT(action_of_set_mpls_ttl),
2760 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2763 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2764 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2766 .call = parse_vc_conf,
2768 [ACTION_OF_DEC_MPLS_TTL] = {
2769 .name = "of_dec_mpls_ttl",
2770 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2771 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2772 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2775 [ACTION_OF_SET_NW_TTL] = {
2776 .name = "of_set_nw_ttl",
2777 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2780 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2781 .next = NEXT(action_of_set_nw_ttl),
2784 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2787 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2788 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2790 .call = parse_vc_conf,
2792 [ACTION_OF_DEC_NW_TTL] = {
2793 .name = "of_dec_nw_ttl",
2794 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2795 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2796 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2799 [ACTION_OF_COPY_TTL_OUT] = {
2800 .name = "of_copy_ttl_out",
2801 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2802 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2803 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2806 [ACTION_OF_COPY_TTL_IN] = {
2807 .name = "of_copy_ttl_in",
2808 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2809 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2810 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2813 [ACTION_OF_POP_VLAN] = {
2814 .name = "of_pop_vlan",
2815 .help = "OpenFlow's OFPAT_POP_VLAN",
2816 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2817 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2820 [ACTION_OF_PUSH_VLAN] = {
2821 .name = "of_push_vlan",
2822 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2825 sizeof(struct rte_flow_action_of_push_vlan)),
2826 .next = NEXT(action_of_push_vlan),
2829 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2830 .name = "ethertype",
2831 .help = "EtherType",
2832 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2833 .args = ARGS(ARGS_ENTRY_HTON
2834 (struct rte_flow_action_of_push_vlan,
2836 .call = parse_vc_conf,
2838 [ACTION_OF_SET_VLAN_VID] = {
2839 .name = "of_set_vlan_vid",
2840 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2843 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2844 .next = NEXT(action_of_set_vlan_vid),
2847 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2850 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2851 .args = ARGS(ARGS_ENTRY_HTON
2852 (struct rte_flow_action_of_set_vlan_vid,
2854 .call = parse_vc_conf,
2856 [ACTION_OF_SET_VLAN_PCP] = {
2857 .name = "of_set_vlan_pcp",
2858 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2861 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2862 .next = NEXT(action_of_set_vlan_pcp),
2865 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2867 .help = "VLAN priority",
2868 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2869 .args = ARGS(ARGS_ENTRY_HTON
2870 (struct rte_flow_action_of_set_vlan_pcp,
2872 .call = parse_vc_conf,
2874 [ACTION_OF_POP_MPLS] = {
2875 .name = "of_pop_mpls",
2876 .help = "OpenFlow's OFPAT_POP_MPLS",
2877 .priv = PRIV_ACTION(OF_POP_MPLS,
2878 sizeof(struct rte_flow_action_of_pop_mpls)),
2879 .next = NEXT(action_of_pop_mpls),
2882 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2883 .name = "ethertype",
2884 .help = "EtherType",
2885 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2886 .args = ARGS(ARGS_ENTRY_HTON
2887 (struct rte_flow_action_of_pop_mpls,
2889 .call = parse_vc_conf,
2891 [ACTION_OF_PUSH_MPLS] = {
2892 .name = "of_push_mpls",
2893 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2896 sizeof(struct rte_flow_action_of_push_mpls)),
2897 .next = NEXT(action_of_push_mpls),
2900 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2901 .name = "ethertype",
2902 .help = "EtherType",
2903 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2904 .args = ARGS(ARGS_ENTRY_HTON
2905 (struct rte_flow_action_of_push_mpls,
2907 .call = parse_vc_conf,
2909 [ACTION_VXLAN_ENCAP] = {
2910 .name = "vxlan_encap",
2911 .help = "VXLAN encapsulation, uses configuration set by \"set"
2913 .priv = PRIV_ACTION(VXLAN_ENCAP,
2914 sizeof(struct action_vxlan_encap_data)),
2915 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2916 .call = parse_vc_action_vxlan_encap,
2918 [ACTION_VXLAN_DECAP] = {
2919 .name = "vxlan_decap",
2920 .help = "Performs a decapsulation action by stripping all"
2921 " headers of the VXLAN tunnel network overlay from the"
2923 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2924 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2927 [ACTION_NVGRE_ENCAP] = {
2928 .name = "nvgre_encap",
2929 .help = "NVGRE encapsulation, uses configuration set by \"set"
2931 .priv = PRIV_ACTION(NVGRE_ENCAP,
2932 sizeof(struct action_nvgre_encap_data)),
2933 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2934 .call = parse_vc_action_nvgre_encap,
2936 [ACTION_NVGRE_DECAP] = {
2937 .name = "nvgre_decap",
2938 .help = "Performs a decapsulation action by stripping all"
2939 " headers of the NVGRE tunnel network overlay from the"
2941 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2942 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2945 [ACTION_L2_ENCAP] = {
2947 .help = "l2 encap, uses configuration set by"
2948 " \"set l2_encap\"",
2949 .priv = PRIV_ACTION(RAW_ENCAP,
2950 sizeof(struct action_raw_encap_data)),
2951 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2952 .call = parse_vc_action_l2_encap,
2954 [ACTION_L2_DECAP] = {
2956 .help = "l2 decap, uses configuration set by"
2957 " \"set l2_decap\"",
2958 .priv = PRIV_ACTION(RAW_DECAP,
2959 sizeof(struct action_raw_decap_data)),
2960 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2961 .call = parse_vc_action_l2_decap,
2963 [ACTION_MPLSOGRE_ENCAP] = {
2964 .name = "mplsogre_encap",
2965 .help = "mplsogre encapsulation, uses configuration set by"
2966 " \"set mplsogre_encap\"",
2967 .priv = PRIV_ACTION(RAW_ENCAP,
2968 sizeof(struct action_raw_encap_data)),
2969 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2970 .call = parse_vc_action_mplsogre_encap,
2972 [ACTION_MPLSOGRE_DECAP] = {
2973 .name = "mplsogre_decap",
2974 .help = "mplsogre decapsulation, uses configuration set by"
2975 " \"set mplsogre_decap\"",
2976 .priv = PRIV_ACTION(RAW_DECAP,
2977 sizeof(struct action_raw_decap_data)),
2978 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2979 .call = parse_vc_action_mplsogre_decap,
2981 [ACTION_MPLSOUDP_ENCAP] = {
2982 .name = "mplsoudp_encap",
2983 .help = "mplsoudp encapsulation, uses configuration set by"
2984 " \"set mplsoudp_encap\"",
2985 .priv = PRIV_ACTION(RAW_ENCAP,
2986 sizeof(struct action_raw_encap_data)),
2987 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2988 .call = parse_vc_action_mplsoudp_encap,
2990 [ACTION_MPLSOUDP_DECAP] = {
2991 .name = "mplsoudp_decap",
2992 .help = "mplsoudp decapsulation, uses configuration set by"
2993 " \"set mplsoudp_decap\"",
2994 .priv = PRIV_ACTION(RAW_DECAP,
2995 sizeof(struct action_raw_decap_data)),
2996 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2997 .call = parse_vc_action_mplsoudp_decap,
2999 [ACTION_SET_IPV4_SRC] = {
3000 .name = "set_ipv4_src",
3001 .help = "Set a new IPv4 source address in the outermost"
3003 .priv = PRIV_ACTION(SET_IPV4_SRC,
3004 sizeof(struct rte_flow_action_set_ipv4)),
3005 .next = NEXT(action_set_ipv4_src),
3008 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
3009 .name = "ipv4_addr",
3010 .help = "new IPv4 source address to set",
3011 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
3012 .args = ARGS(ARGS_ENTRY_HTON
3013 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3014 .call = parse_vc_conf,
3016 [ACTION_SET_IPV4_DST] = {
3017 .name = "set_ipv4_dst",
3018 .help = "Set a new IPv4 destination address in the outermost"
3020 .priv = PRIV_ACTION(SET_IPV4_DST,
3021 sizeof(struct rte_flow_action_set_ipv4)),
3022 .next = NEXT(action_set_ipv4_dst),
3025 [ACTION_SET_IPV4_DST_IPV4_DST] = {
3026 .name = "ipv4_addr",
3027 .help = "new IPv4 destination address to set",
3028 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
3029 .args = ARGS(ARGS_ENTRY_HTON
3030 (struct rte_flow_action_set_ipv4, ipv4_addr)),
3031 .call = parse_vc_conf,
3033 [ACTION_SET_IPV6_SRC] = {
3034 .name = "set_ipv6_src",
3035 .help = "Set a new IPv6 source address in the outermost"
3037 .priv = PRIV_ACTION(SET_IPV6_SRC,
3038 sizeof(struct rte_flow_action_set_ipv6)),
3039 .next = NEXT(action_set_ipv6_src),
3042 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3043 .name = "ipv6_addr",
3044 .help = "new IPv6 source address to set",
3045 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3046 .args = ARGS(ARGS_ENTRY_HTON
3047 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3048 .call = parse_vc_conf,
3050 [ACTION_SET_IPV6_DST] = {
3051 .name = "set_ipv6_dst",
3052 .help = "Set a new IPv6 destination address in the outermost"
3054 .priv = PRIV_ACTION(SET_IPV6_DST,
3055 sizeof(struct rte_flow_action_set_ipv6)),
3056 .next = NEXT(action_set_ipv6_dst),
3059 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3060 .name = "ipv6_addr",
3061 .help = "new IPv6 destination address to set",
3062 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3063 .args = ARGS(ARGS_ENTRY_HTON
3064 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3065 .call = parse_vc_conf,
3067 [ACTION_SET_TP_SRC] = {
3068 .name = "set_tp_src",
3069 .help = "set a new source port number in the outermost"
3071 .priv = PRIV_ACTION(SET_TP_SRC,
3072 sizeof(struct rte_flow_action_set_tp)),
3073 .next = NEXT(action_set_tp_src),
3076 [ACTION_SET_TP_SRC_TP_SRC] = {
3078 .help = "new source port number to set",
3079 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3080 .args = ARGS(ARGS_ENTRY_HTON
3081 (struct rte_flow_action_set_tp, port)),
3082 .call = parse_vc_conf,
3084 [ACTION_SET_TP_DST] = {
3085 .name = "set_tp_dst",
3086 .help = "set a new destination port number in the outermost"
3088 .priv = PRIV_ACTION(SET_TP_DST,
3089 sizeof(struct rte_flow_action_set_tp)),
3090 .next = NEXT(action_set_tp_dst),
3093 [ACTION_SET_TP_DST_TP_DST] = {
3095 .help = "new destination port number to set",
3096 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3097 .args = ARGS(ARGS_ENTRY_HTON
3098 (struct rte_flow_action_set_tp, port)),
3099 .call = parse_vc_conf,
3101 [ACTION_MAC_SWAP] = {
3103 .help = "Swap the source and destination MAC addresses"
3104 " in the outermost Ethernet header",
3105 .priv = PRIV_ACTION(MAC_SWAP, 0),
3106 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3109 [ACTION_DEC_TTL] = {
3111 .help = "decrease network TTL if available",
3112 .priv = PRIV_ACTION(DEC_TTL, 0),
3113 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3116 [ACTION_SET_TTL] = {
3118 .help = "set ttl value",
3119 .priv = PRIV_ACTION(SET_TTL,
3120 sizeof(struct rte_flow_action_set_ttl)),
3121 .next = NEXT(action_set_ttl),
3124 [ACTION_SET_TTL_TTL] = {
3125 .name = "ttl_value",
3126 .help = "new ttl value to set",
3127 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3128 .args = ARGS(ARGS_ENTRY_HTON
3129 (struct rte_flow_action_set_ttl, ttl_value)),
3130 .call = parse_vc_conf,
3132 [ACTION_SET_MAC_SRC] = {
3133 .name = "set_mac_src",
3134 .help = "set source mac address",
3135 .priv = PRIV_ACTION(SET_MAC_SRC,
3136 sizeof(struct rte_flow_action_set_mac)),
3137 .next = NEXT(action_set_mac_src),
3140 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3142 .help = "new source mac address",
3143 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3144 .args = ARGS(ARGS_ENTRY_HTON
3145 (struct rte_flow_action_set_mac, mac_addr)),
3146 .call = parse_vc_conf,
3148 [ACTION_SET_MAC_DST] = {
3149 .name = "set_mac_dst",
3150 .help = "set destination mac address",
3151 .priv = PRIV_ACTION(SET_MAC_DST,
3152 sizeof(struct rte_flow_action_set_mac)),
3153 .next = NEXT(action_set_mac_dst),
3156 [ACTION_SET_MAC_DST_MAC_DST] = {
3158 .help = "new destination mac address to set",
3159 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3160 .args = ARGS(ARGS_ENTRY_HTON
3161 (struct rte_flow_action_set_mac, mac_addr)),
3162 .call = parse_vc_conf,
3164 [ACTION_INC_TCP_SEQ] = {
3165 .name = "inc_tcp_seq",
3166 .help = "increase TCP sequence number",
3167 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3168 .next = NEXT(action_inc_tcp_seq),
3171 [ACTION_INC_TCP_SEQ_VALUE] = {
3173 .help = "the value to increase TCP sequence number by",
3174 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3175 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3176 .call = parse_vc_conf,
3178 [ACTION_DEC_TCP_SEQ] = {
3179 .name = "dec_tcp_seq",
3180 .help = "decrease TCP sequence number",
3181 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3182 .next = NEXT(action_dec_tcp_seq),
3185 [ACTION_DEC_TCP_SEQ_VALUE] = {
3187 .help = "the value to decrease TCP sequence number by",
3188 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3189 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3190 .call = parse_vc_conf,
3192 [ACTION_INC_TCP_ACK] = {
3193 .name = "inc_tcp_ack",
3194 .help = "increase TCP acknowledgment number",
3195 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3196 .next = NEXT(action_inc_tcp_ack),
3199 [ACTION_INC_TCP_ACK_VALUE] = {
3201 .help = "the value to increase TCP acknowledgment number by",
3202 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3203 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3204 .call = parse_vc_conf,
3206 [ACTION_DEC_TCP_ACK] = {
3207 .name = "dec_tcp_ack",
3208 .help = "decrease TCP acknowledgment number",
3209 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3210 .next = NEXT(action_dec_tcp_ack),
3213 [ACTION_DEC_TCP_ACK_VALUE] = {
3215 .help = "the value to decrease TCP acknowledgment number by",
3216 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3217 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3218 .call = parse_vc_conf,
3220 [ACTION_RAW_ENCAP] = {
3221 .name = "raw_encap",
3222 .help = "encapsulation data, defined by set raw_encap",
3223 .priv = PRIV_ACTION(RAW_ENCAP,
3224 sizeof(struct rte_flow_action_raw_encap)),
3225 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3226 .call = parse_vc_action_raw_encap,
3228 [ACTION_RAW_DECAP] = {
3229 .name = "raw_decap",
3230 .help = "decapsulation data, defined by set raw_encap",
3231 .priv = PRIV_ACTION(RAW_DECAP,
3232 sizeof(struct rte_flow_action_raw_decap)),
3233 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3234 .call = parse_vc_action_raw_decap,
3236 /* Top level command. */
3239 .help = "set raw encap/decap data",
3240 .type = "set raw_encap|raw_decap <pattern>",
3241 .next = NEXT(NEXT_ENTRY
3244 .call = parse_set_init,
3246 /* Sub-level commands. */
3248 .name = "raw_encap",
3249 .help = "set raw encap data",
3250 .next = NEXT(next_item),
3251 .call = parse_set_raw_encap_decap,
3254 .name = "raw_decap",
3255 .help = "set raw decap data",
3256 .next = NEXT(next_item),
3257 .call = parse_set_raw_encap_decap,
3261 /** Remove and return last entry from argument stack. */
3262 static const struct arg *
3263 pop_args(struct context *ctx)
3265 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3268 /** Add entry on top of the argument stack. */
3270 push_args(struct context *ctx, const struct arg *arg)
3272 if (ctx->args_num == CTX_STACK_SIZE)
3274 ctx->args[ctx->args_num++] = arg;
3278 /** Spread value into buffer according to bit-mask. */
3280 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3282 uint32_t i = arg->size;
3290 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3299 unsigned int shift = 0;
3300 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3302 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3303 if (!(arg->mask[i] & (1 << shift)))
3308 *buf &= ~(1 << shift);
3309 *buf |= (val & 1) << shift;
3317 /** Compare a string with a partial one of a given length. */
3319 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3321 int r = strncmp(full, partial, partial_len);
3325 if (strlen(full) <= partial_len)
3327 return full[partial_len];
3331 * Parse a prefix length and generate a bit-mask.
3333 * Last argument (ctx->args) is retrieved to determine mask size, storage
3334 * location and whether the result must use network byte ordering.
3337 parse_prefix(struct context *ctx, const struct token *token,
3338 const char *str, unsigned int len,
3339 void *buf, unsigned int size)
3341 const struct arg *arg = pop_args(ctx);
3342 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3349 /* Argument is expected. */
3353 u = strtoumax(str, &end, 0);
3354 if (errno || (size_t)(end - str) != len)
3359 extra = arg_entry_bf_fill(NULL, 0, arg);
3368 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3369 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3376 if (bytes > size || bytes + !!extra > size)
3380 buf = (uint8_t *)ctx->object + arg->offset;
3381 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3383 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3384 memset(buf, 0x00, size - bytes);
3386 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3390 memset(buf, 0xff, bytes);
3391 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3393 ((uint8_t *)buf)[bytes] = conv[extra];
3396 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3399 push_args(ctx, arg);
3403 /** Default parsing function for token name matching. */
3405 parse_default(struct context *ctx, const struct token *token,
3406 const char *str, unsigned int len,
3407 void *buf, unsigned int size)
3412 if (strcmp_partial(token->name, str, len))
3417 /** Parse flow command, initialize output buffer for subsequent tokens. */
3419 parse_init(struct context *ctx, const struct token *token,
3420 const char *str, unsigned int len,
3421 void *buf, unsigned int size)
3423 struct buffer *out = buf;
3425 /* Token name must match. */
3426 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3428 /* Nothing else to do if there is no buffer. */
3431 /* Make sure buffer is large enough. */
3432 if (size < sizeof(*out))
3434 /* Initialize buffer. */
3435 memset(out, 0x00, sizeof(*out));
3436 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3439 ctx->objmask = NULL;
3443 /** Parse tokens for validate/create commands. */
3445 parse_vc(struct context *ctx, const struct token *token,
3446 const char *str, unsigned int len,
3447 void *buf, unsigned int size)
3449 struct buffer *out = buf;
3453 /* Token name must match. */
3454 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3456 /* Nothing else to do if there is no buffer. */
3459 if (!out->command) {
3460 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3462 if (sizeof(*out) > size)
3464 out->command = ctx->curr;
3467 ctx->objmask = NULL;
3468 out->args.vc.data = (uint8_t *)out + size;
3472 ctx->object = &out->args.vc.attr;
3473 ctx->objmask = NULL;
3474 switch (ctx->curr) {
3479 out->args.vc.attr.ingress = 1;
3482 out->args.vc.attr.egress = 1;
3485 out->args.vc.attr.transfer = 1;
3488 out->args.vc.pattern =
3489 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3491 ctx->object = out->args.vc.pattern;
3492 ctx->objmask = NULL;
3495 out->args.vc.actions =
3496 (void *)RTE_ALIGN_CEIL((uintptr_t)
3497 (out->args.vc.pattern +
3498 out->args.vc.pattern_n),
3500 ctx->object = out->args.vc.actions;
3501 ctx->objmask = NULL;
3508 if (!out->args.vc.actions) {
3509 const struct parse_item_priv *priv = token->priv;
3510 struct rte_flow_item *item =
3511 out->args.vc.pattern + out->args.vc.pattern_n;
3513 data_size = priv->size * 3; /* spec, last, mask */
3514 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3515 (out->args.vc.data - data_size),
3517 if ((uint8_t *)item + sizeof(*item) > data)
3519 *item = (struct rte_flow_item){
3522 ++out->args.vc.pattern_n;
3524 ctx->objmask = NULL;
3526 const struct parse_action_priv *priv = token->priv;
3527 struct rte_flow_action *action =
3528 out->args.vc.actions + out->args.vc.actions_n;
3530 data_size = priv->size; /* configuration */
3531 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3532 (out->args.vc.data - data_size),
3534 if ((uint8_t *)action + sizeof(*action) > data)
3536 *action = (struct rte_flow_action){
3538 .conf = data_size ? data : NULL,
3540 ++out->args.vc.actions_n;
3541 ctx->object = action;
3542 ctx->objmask = NULL;
3544 memset(data, 0, data_size);
3545 out->args.vc.data = data;
3546 ctx->objdata = data_size;
3550 /** Parse pattern item parameter type. */
3552 parse_vc_spec(struct context *ctx, const struct token *token,
3553 const char *str, unsigned int len,
3554 void *buf, unsigned int size)
3556 struct buffer *out = buf;
3557 struct rte_flow_item *item;
3563 /* Token name must match. */
3564 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3566 /* Parse parameter types. */
3567 switch (ctx->curr) {
3568 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3574 case ITEM_PARAM_SPEC:
3577 case ITEM_PARAM_LAST:
3580 case ITEM_PARAM_PREFIX:
3581 /* Modify next token to expect a prefix. */
3582 if (ctx->next_num < 2)
3584 ctx->next[ctx->next_num - 2] = prefix;
3586 case ITEM_PARAM_MASK:
3592 /* Nothing else to do if there is no buffer. */
3595 if (!out->args.vc.pattern_n)
3597 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3598 data_size = ctx->objdata / 3; /* spec, last, mask */
3599 /* Point to selected object. */
3600 ctx->object = out->args.vc.data + (data_size * index);
3602 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3603 item->mask = ctx->objmask;
3605 ctx->objmask = NULL;
3606 /* Update relevant item pointer. */
3607 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3612 /** Parse action configuration field. */
3614 parse_vc_conf(struct context *ctx, const struct token *token,
3615 const char *str, unsigned int len,
3616 void *buf, unsigned int size)
3618 struct buffer *out = buf;
3621 /* Token name must match. */
3622 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3624 /* Nothing else to do if there is no buffer. */
3627 /* Point to selected object. */
3628 ctx->object = out->args.vc.data;
3629 ctx->objmask = NULL;
3633 /** Parse RSS action. */
3635 parse_vc_action_rss(struct context *ctx, const struct token *token,
3636 const char *str, unsigned int len,
3637 void *buf, unsigned int size)
3639 struct buffer *out = buf;
3640 struct rte_flow_action *action;
3641 struct action_rss_data *action_rss_data;
3645 ret = parse_vc(ctx, token, str, len, buf, size);
3648 /* Nothing else to do if there is no buffer. */
3651 if (!out->args.vc.actions_n)
3653 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3654 /* Point to selected object. */
3655 ctx->object = out->args.vc.data;
3656 ctx->objmask = NULL;
3657 /* Set up default configuration. */
3658 action_rss_data = ctx->object;
3659 *action_rss_data = (struct action_rss_data){
3660 .conf = (struct rte_flow_action_rss){
3661 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3664 .key_len = sizeof(action_rss_data->key),
3665 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3666 .key = action_rss_data->key,
3667 .queue = action_rss_data->queue,
3669 .key = "testpmd's default RSS hash key, "
3670 "override it for better balancing",
3673 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3674 action_rss_data->queue[i] = i;
3675 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3676 ctx->port != (portid_t)RTE_PORT_ALL) {
3677 struct rte_eth_dev_info info;
3680 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3684 action_rss_data->conf.key_len =
3685 RTE_MIN(sizeof(action_rss_data->key),
3686 info.hash_key_size);
3688 action->conf = &action_rss_data->conf;
3693 * Parse func field for RSS action.
3695 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3696 * ACTION_RSS_FUNC_* index that called this function.
3699 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3700 const char *str, unsigned int len,
3701 void *buf, unsigned int size)
3703 struct action_rss_data *action_rss_data;
3704 enum rte_eth_hash_function func;
3708 /* Token name must match. */
3709 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3711 switch (ctx->curr) {
3712 case ACTION_RSS_FUNC_DEFAULT:
3713 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3715 case ACTION_RSS_FUNC_TOEPLITZ:
3716 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3718 case ACTION_RSS_FUNC_SIMPLE_XOR:
3719 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3721 case ACTION_RSS_FUNC_SYMMETRIC_TOEPLITZ:
3722 func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
3729 action_rss_data = ctx->object;
3730 action_rss_data->conf.func = func;
3735 * Parse type field for RSS action.
3737 * Valid tokens are type field names and the "end" token.
3740 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3741 const char *str, unsigned int len,
3742 void *buf, unsigned int size)
3744 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3745 struct action_rss_data *action_rss_data;
3751 if (ctx->curr != ACTION_RSS_TYPE)
3753 if (!(ctx->objdata >> 16) && ctx->object) {
3754 action_rss_data = ctx->object;
3755 action_rss_data->conf.types = 0;
3757 if (!strcmp_partial("end", str, len)) {
3758 ctx->objdata &= 0xffff;
3761 for (i = 0; rss_type_table[i].str; ++i)
3762 if (!strcmp_partial(rss_type_table[i].str, str, len))
3764 if (!rss_type_table[i].str)
3766 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3768 if (ctx->next_num == RTE_DIM(ctx->next))
3770 ctx->next[ctx->next_num++] = next;
3773 action_rss_data = ctx->object;
3774 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3779 * Parse queue field for RSS action.
3781 * Valid tokens are queue indices and the "end" token.
3784 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3785 const char *str, unsigned int len,
3786 void *buf, unsigned int size)
3788 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3789 struct action_rss_data *action_rss_data;
3790 const struct arg *arg;
3797 if (ctx->curr != ACTION_RSS_QUEUE)
3799 i = ctx->objdata >> 16;
3800 if (!strcmp_partial("end", str, len)) {
3801 ctx->objdata &= 0xffff;
3804 if (i >= ACTION_RSS_QUEUE_NUM)
3806 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3807 i * sizeof(action_rss_data->queue[i]),
3808 sizeof(action_rss_data->queue[i]));
3809 if (push_args(ctx, arg))
3811 ret = parse_int(ctx, token, str, len, NULL, 0);
3817 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3819 if (ctx->next_num == RTE_DIM(ctx->next))
3821 ctx->next[ctx->next_num++] = next;
3825 action_rss_data = ctx->object;
3826 action_rss_data->conf.queue_num = i;
3827 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3831 /** Parse VXLAN encap action. */
3833 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3834 const char *str, unsigned int len,
3835 void *buf, unsigned int size)
3837 struct buffer *out = buf;
3838 struct rte_flow_action *action;
3839 struct action_vxlan_encap_data *action_vxlan_encap_data;
3842 ret = parse_vc(ctx, token, str, len, buf, size);
3845 /* Nothing else to do if there is no buffer. */
3848 if (!out->args.vc.actions_n)
3850 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3851 /* Point to selected object. */
3852 ctx->object = out->args.vc.data;
3853 ctx->objmask = NULL;
3854 /* Set up default configuration. */
3855 action_vxlan_encap_data = ctx->object;
3856 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3857 .conf = (struct rte_flow_action_vxlan_encap){
3858 .definition = action_vxlan_encap_data->items,
3862 .type = RTE_FLOW_ITEM_TYPE_ETH,
3863 .spec = &action_vxlan_encap_data->item_eth,
3864 .mask = &rte_flow_item_eth_mask,
3867 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3868 .spec = &action_vxlan_encap_data->item_vlan,
3869 .mask = &rte_flow_item_vlan_mask,
3872 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3873 .spec = &action_vxlan_encap_data->item_ipv4,
3874 .mask = &rte_flow_item_ipv4_mask,
3877 .type = RTE_FLOW_ITEM_TYPE_UDP,
3878 .spec = &action_vxlan_encap_data->item_udp,
3879 .mask = &rte_flow_item_udp_mask,
3882 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3883 .spec = &action_vxlan_encap_data->item_vxlan,
3884 .mask = &rte_flow_item_vxlan_mask,
3887 .type = RTE_FLOW_ITEM_TYPE_END,
3892 .tci = vxlan_encap_conf.vlan_tci,
3896 .src_addr = vxlan_encap_conf.ipv4_src,
3897 .dst_addr = vxlan_encap_conf.ipv4_dst,
3900 .src_port = vxlan_encap_conf.udp_src,
3901 .dst_port = vxlan_encap_conf.udp_dst,
3903 .item_vxlan.flags = 0,
3905 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3906 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3907 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3908 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3909 if (!vxlan_encap_conf.select_ipv4) {
3910 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3911 &vxlan_encap_conf.ipv6_src,
3912 sizeof(vxlan_encap_conf.ipv6_src));
3913 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3914 &vxlan_encap_conf.ipv6_dst,
3915 sizeof(vxlan_encap_conf.ipv6_dst));
3916 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3917 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3918 .spec = &action_vxlan_encap_data->item_ipv6,
3919 .mask = &rte_flow_item_ipv6_mask,
3922 if (!vxlan_encap_conf.select_vlan)
3923 action_vxlan_encap_data->items[1].type =
3924 RTE_FLOW_ITEM_TYPE_VOID;
3925 if (vxlan_encap_conf.select_tos_ttl) {
3926 if (vxlan_encap_conf.select_ipv4) {
3927 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3929 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3930 sizeof(ipv4_mask_tos));
3931 ipv4_mask_tos.hdr.type_of_service = 0xff;
3932 ipv4_mask_tos.hdr.time_to_live = 0xff;
3933 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3934 vxlan_encap_conf.ip_tos;
3935 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3936 vxlan_encap_conf.ip_ttl;
3937 action_vxlan_encap_data->items[2].mask =
3940 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3942 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3943 sizeof(ipv6_mask_tos));
3944 ipv6_mask_tos.hdr.vtc_flow |=
3945 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3946 ipv6_mask_tos.hdr.hop_limits = 0xff;
3947 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3949 ((uint32_t)vxlan_encap_conf.ip_tos <<
3950 RTE_IPV6_HDR_TC_SHIFT);
3951 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3952 vxlan_encap_conf.ip_ttl;
3953 action_vxlan_encap_data->items[2].mask =
3957 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3958 RTE_DIM(vxlan_encap_conf.vni));
3959 action->conf = &action_vxlan_encap_data->conf;
3963 /** Parse NVGRE encap action. */
3965 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3966 const char *str, unsigned int len,
3967 void *buf, unsigned int size)
3969 struct buffer *out = buf;
3970 struct rte_flow_action *action;
3971 struct action_nvgre_encap_data *action_nvgre_encap_data;
3974 ret = parse_vc(ctx, token, str, len, buf, size);
3977 /* Nothing else to do if there is no buffer. */
3980 if (!out->args.vc.actions_n)
3982 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3983 /* Point to selected object. */
3984 ctx->object = out->args.vc.data;
3985 ctx->objmask = NULL;
3986 /* Set up default configuration. */
3987 action_nvgre_encap_data = ctx->object;
3988 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3989 .conf = (struct rte_flow_action_nvgre_encap){
3990 .definition = action_nvgre_encap_data->items,
3994 .type = RTE_FLOW_ITEM_TYPE_ETH,
3995 .spec = &action_nvgre_encap_data->item_eth,
3996 .mask = &rte_flow_item_eth_mask,
3999 .type = RTE_FLOW_ITEM_TYPE_VLAN,
4000 .spec = &action_nvgre_encap_data->item_vlan,
4001 .mask = &rte_flow_item_vlan_mask,
4004 .type = RTE_FLOW_ITEM_TYPE_IPV4,
4005 .spec = &action_nvgre_encap_data->item_ipv4,
4006 .mask = &rte_flow_item_ipv4_mask,
4009 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
4010 .spec = &action_nvgre_encap_data->item_nvgre,
4011 .mask = &rte_flow_item_nvgre_mask,
4014 .type = RTE_FLOW_ITEM_TYPE_END,
4019 .tci = nvgre_encap_conf.vlan_tci,
4023 .src_addr = nvgre_encap_conf.ipv4_src,
4024 .dst_addr = nvgre_encap_conf.ipv4_dst,
4026 .item_nvgre.flow_id = 0,
4028 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
4029 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4030 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
4031 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4032 if (!nvgre_encap_conf.select_ipv4) {
4033 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
4034 &nvgre_encap_conf.ipv6_src,
4035 sizeof(nvgre_encap_conf.ipv6_src));
4036 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
4037 &nvgre_encap_conf.ipv6_dst,
4038 sizeof(nvgre_encap_conf.ipv6_dst));
4039 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
4040 .type = RTE_FLOW_ITEM_TYPE_IPV6,
4041 .spec = &action_nvgre_encap_data->item_ipv6,
4042 .mask = &rte_flow_item_ipv6_mask,
4045 if (!nvgre_encap_conf.select_vlan)
4046 action_nvgre_encap_data->items[1].type =
4047 RTE_FLOW_ITEM_TYPE_VOID;
4048 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4049 RTE_DIM(nvgre_encap_conf.tni));
4050 action->conf = &action_nvgre_encap_data->conf;
4054 /** Parse l2 encap action. */
4056 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4057 const char *str, unsigned int len,
4058 void *buf, unsigned int size)
4060 struct buffer *out = buf;
4061 struct rte_flow_action *action;
4062 struct action_raw_encap_data *action_encap_data;
4063 struct rte_flow_item_eth eth = { .type = 0, };
4064 struct rte_flow_item_vlan vlan = {
4065 .tci = mplsoudp_encap_conf.vlan_tci,
4071 ret = parse_vc(ctx, token, str, len, buf, size);
4074 /* Nothing else to do if there is no buffer. */
4077 if (!out->args.vc.actions_n)
4079 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4080 /* Point to selected object. */
4081 ctx->object = out->args.vc.data;
4082 ctx->objmask = NULL;
4083 /* Copy the headers to the buffer. */
4084 action_encap_data = ctx->object;
4085 *action_encap_data = (struct action_raw_encap_data) {
4086 .conf = (struct rte_flow_action_raw_encap){
4087 .data = action_encap_data->data,
4091 header = action_encap_data->data;
4092 if (l2_encap_conf.select_vlan)
4093 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4094 else if (l2_encap_conf.select_ipv4)
4095 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4097 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4098 memcpy(eth.dst.addr_bytes,
4099 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4100 memcpy(eth.src.addr_bytes,
4101 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4102 memcpy(header, ð, sizeof(eth));
4103 header += sizeof(eth);
4104 if (l2_encap_conf.select_vlan) {
4105 if (l2_encap_conf.select_ipv4)
4106 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4108 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4109 memcpy(header, &vlan, sizeof(vlan));
4110 header += sizeof(vlan);
4112 action_encap_data->conf.size = header -
4113 action_encap_data->data;
4114 action->conf = &action_encap_data->conf;
4118 /** Parse l2 decap action. */
4120 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4121 const char *str, unsigned int len,
4122 void *buf, unsigned int size)
4124 struct buffer *out = buf;
4125 struct rte_flow_action *action;
4126 struct action_raw_decap_data *action_decap_data;
4127 struct rte_flow_item_eth eth = { .type = 0, };
4128 struct rte_flow_item_vlan vlan = {
4129 .tci = mplsoudp_encap_conf.vlan_tci,
4135 ret = parse_vc(ctx, token, str, len, buf, size);
4138 /* Nothing else to do if there is no buffer. */
4141 if (!out->args.vc.actions_n)
4143 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4144 /* Point to selected object. */
4145 ctx->object = out->args.vc.data;
4146 ctx->objmask = NULL;
4147 /* Copy the headers to the buffer. */
4148 action_decap_data = ctx->object;
4149 *action_decap_data = (struct action_raw_decap_data) {
4150 .conf = (struct rte_flow_action_raw_decap){
4151 .data = action_decap_data->data,
4155 header = action_decap_data->data;
4156 if (l2_decap_conf.select_vlan)
4157 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4158 memcpy(header, ð, sizeof(eth));
4159 header += sizeof(eth);
4160 if (l2_decap_conf.select_vlan) {
4161 memcpy(header, &vlan, sizeof(vlan));
4162 header += sizeof(vlan);
4164 action_decap_data->conf.size = header -
4165 action_decap_data->data;
4166 action->conf = &action_decap_data->conf;
4170 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4172 /** Parse MPLSOGRE encap action. */
4174 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4175 const char *str, unsigned int len,
4176 void *buf, unsigned int size)
4178 struct buffer *out = buf;
4179 struct rte_flow_action *action;
4180 struct action_raw_encap_data *action_encap_data;
4181 struct rte_flow_item_eth eth = { .type = 0, };
4182 struct rte_flow_item_vlan vlan = {
4183 .tci = mplsogre_encap_conf.vlan_tci,
4186 struct rte_flow_item_ipv4 ipv4 = {
4188 .src_addr = mplsogre_encap_conf.ipv4_src,
4189 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4190 .next_proto_id = IPPROTO_GRE,
4191 .version_ihl = RTE_IPV4_VHL_DEF,
4192 .time_to_live = IPDEFTTL,
4195 struct rte_flow_item_ipv6 ipv6 = {
4197 .proto = IPPROTO_GRE,
4198 .hop_limits = IPDEFTTL,
4201 struct rte_flow_item_gre gre = {
4202 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4204 struct rte_flow_item_mpls mpls;
4208 ret = parse_vc(ctx, token, str, len, buf, size);
4211 /* Nothing else to do if there is no buffer. */
4214 if (!out->args.vc.actions_n)
4216 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4217 /* Point to selected object. */
4218 ctx->object = out->args.vc.data;
4219 ctx->objmask = NULL;
4220 /* Copy the headers to the buffer. */
4221 action_encap_data = ctx->object;
4222 *action_encap_data = (struct action_raw_encap_data) {
4223 .conf = (struct rte_flow_action_raw_encap){
4224 .data = action_encap_data->data,
4229 header = action_encap_data->data;
4230 if (mplsogre_encap_conf.select_vlan)
4231 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4232 else if (mplsogre_encap_conf.select_ipv4)
4233 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4235 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4236 memcpy(eth.dst.addr_bytes,
4237 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4238 memcpy(eth.src.addr_bytes,
4239 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4240 memcpy(header, ð, sizeof(eth));
4241 header += sizeof(eth);
4242 if (mplsogre_encap_conf.select_vlan) {
4243 if (mplsogre_encap_conf.select_ipv4)
4244 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4246 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4247 memcpy(header, &vlan, sizeof(vlan));
4248 header += sizeof(vlan);
4250 if (mplsogre_encap_conf.select_ipv4) {
4251 memcpy(header, &ipv4, sizeof(ipv4));
4252 header += sizeof(ipv4);
4254 memcpy(&ipv6.hdr.src_addr,
4255 &mplsogre_encap_conf.ipv6_src,
4256 sizeof(mplsogre_encap_conf.ipv6_src));
4257 memcpy(&ipv6.hdr.dst_addr,
4258 &mplsogre_encap_conf.ipv6_dst,
4259 sizeof(mplsogre_encap_conf.ipv6_dst));
4260 memcpy(header, &ipv6, sizeof(ipv6));
4261 header += sizeof(ipv6);
4263 memcpy(header, &gre, sizeof(gre));
4264 header += sizeof(gre);
4265 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4266 RTE_DIM(mplsogre_encap_conf.label));
4267 mpls.label_tc_s[2] |= 0x1;
4268 memcpy(header, &mpls, sizeof(mpls));
4269 header += sizeof(mpls);
4270 action_encap_data->conf.size = header -
4271 action_encap_data->data;
4272 action->conf = &action_encap_data->conf;
4276 /** Parse MPLSOGRE decap action. */
4278 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4279 const char *str, unsigned int len,
4280 void *buf, unsigned int size)
4282 struct buffer *out = buf;
4283 struct rte_flow_action *action;
4284 struct action_raw_decap_data *action_decap_data;
4285 struct rte_flow_item_eth eth = { .type = 0, };
4286 struct rte_flow_item_vlan vlan = {.tci = 0};
4287 struct rte_flow_item_ipv4 ipv4 = {
4289 .next_proto_id = IPPROTO_GRE,
4292 struct rte_flow_item_ipv6 ipv6 = {
4294 .proto = IPPROTO_GRE,
4297 struct rte_flow_item_gre gre = {
4298 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4300 struct rte_flow_item_mpls mpls;
4304 ret = parse_vc(ctx, token, str, len, buf, size);
4307 /* Nothing else to do if there is no buffer. */
4310 if (!out->args.vc.actions_n)
4312 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4313 /* Point to selected object. */
4314 ctx->object = out->args.vc.data;
4315 ctx->objmask = NULL;
4316 /* Copy the headers to the buffer. */
4317 action_decap_data = ctx->object;
4318 *action_decap_data = (struct action_raw_decap_data) {
4319 .conf = (struct rte_flow_action_raw_decap){
4320 .data = action_decap_data->data,
4324 header = action_decap_data->data;
4325 if (mplsogre_decap_conf.select_vlan)
4326 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4327 else if (mplsogre_encap_conf.select_ipv4)
4328 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4330 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4331 memcpy(eth.dst.addr_bytes,
4332 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4333 memcpy(eth.src.addr_bytes,
4334 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4335 memcpy(header, ð, sizeof(eth));
4336 header += sizeof(eth);
4337 if (mplsogre_encap_conf.select_vlan) {
4338 if (mplsogre_encap_conf.select_ipv4)
4339 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4341 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4342 memcpy(header, &vlan, sizeof(vlan));
4343 header += sizeof(vlan);
4345 if (mplsogre_encap_conf.select_ipv4) {
4346 memcpy(header, &ipv4, sizeof(ipv4));
4347 header += sizeof(ipv4);
4349 memcpy(header, &ipv6, sizeof(ipv6));
4350 header += sizeof(ipv6);
4352 memcpy(header, &gre, sizeof(gre));
4353 header += sizeof(gre);
4354 memset(&mpls, 0, sizeof(mpls));
4355 memcpy(header, &mpls, sizeof(mpls));
4356 header += sizeof(mpls);
4357 action_decap_data->conf.size = header -
4358 action_decap_data->data;
4359 action->conf = &action_decap_data->conf;
4363 /** Parse MPLSOUDP encap action. */
4365 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4366 const char *str, unsigned int len,
4367 void *buf, unsigned int size)
4369 struct buffer *out = buf;
4370 struct rte_flow_action *action;
4371 struct action_raw_encap_data *action_encap_data;
4372 struct rte_flow_item_eth eth = { .type = 0, };
4373 struct rte_flow_item_vlan vlan = {
4374 .tci = mplsoudp_encap_conf.vlan_tci,
4377 struct rte_flow_item_ipv4 ipv4 = {
4379 .src_addr = mplsoudp_encap_conf.ipv4_src,
4380 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4381 .next_proto_id = IPPROTO_UDP,
4382 .version_ihl = RTE_IPV4_VHL_DEF,
4383 .time_to_live = IPDEFTTL,
4386 struct rte_flow_item_ipv6 ipv6 = {
4388 .proto = IPPROTO_UDP,
4389 .hop_limits = IPDEFTTL,
4392 struct rte_flow_item_udp udp = {
4394 .src_port = mplsoudp_encap_conf.udp_src,
4395 .dst_port = mplsoudp_encap_conf.udp_dst,
4398 struct rte_flow_item_mpls mpls;
4402 ret = parse_vc(ctx, token, str, len, buf, size);
4405 /* Nothing else to do if there is no buffer. */
4408 if (!out->args.vc.actions_n)
4410 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4411 /* Point to selected object. */
4412 ctx->object = out->args.vc.data;
4413 ctx->objmask = NULL;
4414 /* Copy the headers to the buffer. */
4415 action_encap_data = ctx->object;
4416 *action_encap_data = (struct action_raw_encap_data) {
4417 .conf = (struct rte_flow_action_raw_encap){
4418 .data = action_encap_data->data,
4423 header = action_encap_data->data;
4424 if (mplsoudp_encap_conf.select_vlan)
4425 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4426 else if (mplsoudp_encap_conf.select_ipv4)
4427 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4429 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4430 memcpy(eth.dst.addr_bytes,
4431 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4432 memcpy(eth.src.addr_bytes,
4433 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4434 memcpy(header, ð, sizeof(eth));
4435 header += sizeof(eth);
4436 if (mplsoudp_encap_conf.select_vlan) {
4437 if (mplsoudp_encap_conf.select_ipv4)
4438 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4440 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4441 memcpy(header, &vlan, sizeof(vlan));
4442 header += sizeof(vlan);
4444 if (mplsoudp_encap_conf.select_ipv4) {
4445 memcpy(header, &ipv4, sizeof(ipv4));
4446 header += sizeof(ipv4);
4448 memcpy(&ipv6.hdr.src_addr,
4449 &mplsoudp_encap_conf.ipv6_src,
4450 sizeof(mplsoudp_encap_conf.ipv6_src));
4451 memcpy(&ipv6.hdr.dst_addr,
4452 &mplsoudp_encap_conf.ipv6_dst,
4453 sizeof(mplsoudp_encap_conf.ipv6_dst));
4454 memcpy(header, &ipv6, sizeof(ipv6));
4455 header += sizeof(ipv6);
4457 memcpy(header, &udp, sizeof(udp));
4458 header += sizeof(udp);
4459 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4460 RTE_DIM(mplsoudp_encap_conf.label));
4461 mpls.label_tc_s[2] |= 0x1;
4462 memcpy(header, &mpls, sizeof(mpls));
4463 header += sizeof(mpls);
4464 action_encap_data->conf.size = header -
4465 action_encap_data->data;
4466 action->conf = &action_encap_data->conf;
4470 /** Parse MPLSOUDP decap action. */
4472 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4473 const char *str, unsigned int len,
4474 void *buf, unsigned int size)
4476 struct buffer *out = buf;
4477 struct rte_flow_action *action;
4478 struct action_raw_decap_data *action_decap_data;
4479 struct rte_flow_item_eth eth = { .type = 0, };
4480 struct rte_flow_item_vlan vlan = {.tci = 0};
4481 struct rte_flow_item_ipv4 ipv4 = {
4483 .next_proto_id = IPPROTO_UDP,
4486 struct rte_flow_item_ipv6 ipv6 = {
4488 .proto = IPPROTO_UDP,
4491 struct rte_flow_item_udp udp = {
4493 .dst_port = rte_cpu_to_be_16(6635),
4496 struct rte_flow_item_mpls mpls;
4500 ret = parse_vc(ctx, token, str, len, buf, size);
4503 /* Nothing else to do if there is no buffer. */
4506 if (!out->args.vc.actions_n)
4508 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4509 /* Point to selected object. */
4510 ctx->object = out->args.vc.data;
4511 ctx->objmask = NULL;
4512 /* Copy the headers to the buffer. */
4513 action_decap_data = ctx->object;
4514 *action_decap_data = (struct action_raw_decap_data) {
4515 .conf = (struct rte_flow_action_raw_decap){
4516 .data = action_decap_data->data,
4520 header = action_decap_data->data;
4521 if (mplsoudp_decap_conf.select_vlan)
4522 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4523 else if (mplsoudp_encap_conf.select_ipv4)
4524 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4526 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4527 memcpy(eth.dst.addr_bytes,
4528 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4529 memcpy(eth.src.addr_bytes,
4530 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4531 memcpy(header, ð, sizeof(eth));
4532 header += sizeof(eth);
4533 if (mplsoudp_encap_conf.select_vlan) {
4534 if (mplsoudp_encap_conf.select_ipv4)
4535 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4537 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4538 memcpy(header, &vlan, sizeof(vlan));
4539 header += sizeof(vlan);
4541 if (mplsoudp_encap_conf.select_ipv4) {
4542 memcpy(header, &ipv4, sizeof(ipv4));
4543 header += sizeof(ipv4);
4545 memcpy(header, &ipv6, sizeof(ipv6));
4546 header += sizeof(ipv6);
4548 memcpy(header, &udp, sizeof(udp));
4549 header += sizeof(udp);
4550 memset(&mpls, 0, sizeof(mpls));
4551 memcpy(header, &mpls, sizeof(mpls));
4552 header += sizeof(mpls);
4553 action_decap_data->conf.size = header -
4554 action_decap_data->data;
4555 action->conf = &action_decap_data->conf;
4560 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4561 const char *str, unsigned int len, void *buf,
4564 struct buffer *out = buf;
4565 struct rte_flow_action *action;
4566 struct rte_flow_action_raw_encap *action_raw_encap_conf = NULL;
4567 uint8_t *data = NULL;
4570 ret = parse_vc(ctx, token, str, len, buf, size);
4573 /* Nothing else to do if there is no buffer. */
4576 if (!out->args.vc.actions_n)
4578 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4579 /* Point to selected object. */
4580 ctx->object = out->args.vc.data;
4581 ctx->objmask = NULL;
4582 /* Copy the headers to the buffer. */
4583 action_raw_encap_conf = ctx->object;
4584 /* data stored from tail of data buffer */
4585 data = (uint8_t *)&(raw_encap_conf.data) +
4586 ACTION_RAW_ENCAP_MAX_DATA - raw_encap_conf.size;
4587 action_raw_encap_conf->data = data;
4588 action_raw_encap_conf->preserve = NULL;
4589 action_raw_encap_conf->size = raw_encap_conf.size;
4590 action->conf = action_raw_encap_conf;
4595 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4596 const char *str, unsigned int len, void *buf,
4599 struct buffer *out = buf;
4600 struct rte_flow_action *action;
4601 struct rte_flow_action_raw_decap *action_raw_decap_conf = NULL;
4602 uint8_t *data = NULL;
4605 ret = parse_vc(ctx, token, str, len, buf, size);
4608 /* Nothing else to do if there is no buffer. */
4611 if (!out->args.vc.actions_n)
4613 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4614 /* Point to selected object. */
4615 ctx->object = out->args.vc.data;
4616 ctx->objmask = NULL;
4617 /* Copy the headers to the buffer. */
4618 action_raw_decap_conf = ctx->object;
4619 /* data stored from tail of data buffer */
4620 data = (uint8_t *)&(raw_decap_conf.data) +
4621 ACTION_RAW_ENCAP_MAX_DATA - raw_decap_conf.size;
4622 action_raw_decap_conf->data = data;
4623 action_raw_decap_conf->size = raw_decap_conf.size;
4624 action->conf = action_raw_decap_conf;
4628 /** Parse tokens for destroy command. */
4630 parse_destroy(struct context *ctx, const struct token *token,
4631 const char *str, unsigned int len,
4632 void *buf, unsigned int size)
4634 struct buffer *out = buf;
4636 /* Token name must match. */
4637 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4639 /* Nothing else to do if there is no buffer. */
4642 if (!out->command) {
4643 if (ctx->curr != DESTROY)
4645 if (sizeof(*out) > size)
4647 out->command = ctx->curr;
4650 ctx->objmask = NULL;
4651 out->args.destroy.rule =
4652 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4656 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4657 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4660 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4661 ctx->objmask = NULL;
4665 /** Parse tokens for flush command. */
4667 parse_flush(struct context *ctx, const struct token *token,
4668 const char *str, unsigned int len,
4669 void *buf, unsigned int size)
4671 struct buffer *out = buf;
4673 /* Token name must match. */
4674 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4676 /* Nothing else to do if there is no buffer. */
4679 if (!out->command) {
4680 if (ctx->curr != FLUSH)
4682 if (sizeof(*out) > size)
4684 out->command = ctx->curr;
4687 ctx->objmask = NULL;
4692 /** Parse tokens for query command. */
4694 parse_query(struct context *ctx, const struct token *token,
4695 const char *str, unsigned int len,
4696 void *buf, unsigned int size)
4698 struct buffer *out = buf;
4700 /* Token name must match. */
4701 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4703 /* Nothing else to do if there is no buffer. */
4706 if (!out->command) {
4707 if (ctx->curr != QUERY)
4709 if (sizeof(*out) > size)
4711 out->command = ctx->curr;
4714 ctx->objmask = NULL;
4719 /** Parse action names. */
4721 parse_action(struct context *ctx, const struct token *token,
4722 const char *str, unsigned int len,
4723 void *buf, unsigned int size)
4725 struct buffer *out = buf;
4726 const struct arg *arg = pop_args(ctx);
4730 /* Argument is expected. */
4733 /* Parse action name. */
4734 for (i = 0; next_action[i]; ++i) {
4735 const struct parse_action_priv *priv;
4737 token = &token_list[next_action[i]];
4738 if (strcmp_partial(token->name, str, len))
4744 memcpy((uint8_t *)ctx->object + arg->offset,
4750 push_args(ctx, arg);
4754 /** Parse tokens for list command. */
4756 parse_list(struct context *ctx, const struct token *token,
4757 const char *str, unsigned int len,
4758 void *buf, unsigned int size)
4760 struct buffer *out = buf;
4762 /* Token name must match. */
4763 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4765 /* Nothing else to do if there is no buffer. */
4768 if (!out->command) {
4769 if (ctx->curr != LIST)
4771 if (sizeof(*out) > size)
4773 out->command = ctx->curr;
4776 ctx->objmask = NULL;
4777 out->args.list.group =
4778 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4782 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4783 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4786 ctx->object = out->args.list.group + out->args.list.group_n++;
4787 ctx->objmask = NULL;
4791 /** Parse tokens for isolate command. */
4793 parse_isolate(struct context *ctx, const struct token *token,
4794 const char *str, unsigned int len,
4795 void *buf, unsigned int size)
4797 struct buffer *out = buf;
4799 /* Token name must match. */
4800 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4802 /* Nothing else to do if there is no buffer. */
4805 if (!out->command) {
4806 if (ctx->curr != ISOLATE)
4808 if (sizeof(*out) > size)
4810 out->command = ctx->curr;
4813 ctx->objmask = NULL;
4819 * Parse signed/unsigned integers 8 to 64-bit long.
4821 * Last argument (ctx->args) is retrieved to determine integer type and
4825 parse_int(struct context *ctx, const struct token *token,
4826 const char *str, unsigned int len,
4827 void *buf, unsigned int size)
4829 const struct arg *arg = pop_args(ctx);
4834 /* Argument is expected. */
4839 (uintmax_t)strtoimax(str, &end, 0) :
4840 strtoumax(str, &end, 0);
4841 if (errno || (size_t)(end - str) != len)
4844 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4845 (intmax_t)u > (intmax_t)arg->max)) ||
4846 (!arg->sign && (u < arg->min || u > arg->max))))
4851 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4852 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4856 buf = (uint8_t *)ctx->object + arg->offset;
4858 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4862 case sizeof(uint8_t):
4863 *(uint8_t *)buf = u;
4865 case sizeof(uint16_t):
4866 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4868 case sizeof(uint8_t [3]):
4869 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4871 ((uint8_t *)buf)[0] = u;
4872 ((uint8_t *)buf)[1] = u >> 8;
4873 ((uint8_t *)buf)[2] = u >> 16;
4877 ((uint8_t *)buf)[0] = u >> 16;
4878 ((uint8_t *)buf)[1] = u >> 8;
4879 ((uint8_t *)buf)[2] = u;
4881 case sizeof(uint32_t):
4882 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4884 case sizeof(uint64_t):
4885 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4890 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4892 buf = (uint8_t *)ctx->objmask + arg->offset;
4897 push_args(ctx, arg);
4904 * Three arguments (ctx->args) are retrieved from the stack to store data,
4905 * its actual length and address (in that order).
4908 parse_string(struct context *ctx, const struct token *token,
4909 const char *str, unsigned int len,
4910 void *buf, unsigned int size)
4912 const struct arg *arg_data = pop_args(ctx);
4913 const struct arg *arg_len = pop_args(ctx);
4914 const struct arg *arg_addr = pop_args(ctx);
4915 char tmp[16]; /* Ought to be enough. */
4918 /* Arguments are expected. */
4922 push_args(ctx, arg_data);
4926 push_args(ctx, arg_len);
4927 push_args(ctx, arg_data);
4930 size = arg_data->size;
4931 /* Bit-mask fill is not supported. */
4932 if (arg_data->mask || size < len)
4936 /* Let parse_int() fill length information first. */
4937 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4940 push_args(ctx, arg_len);
4941 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4946 buf = (uint8_t *)ctx->object + arg_data->offset;
4947 /* Output buffer is not necessarily NUL-terminated. */
4948 memcpy(buf, str, len);
4949 memset((uint8_t *)buf + len, 0x00, size - len);
4951 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4952 /* Save address if requested. */
4953 if (arg_addr->size) {
4954 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4956 (uint8_t *)ctx->object + arg_data->offset
4960 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4962 (uint8_t *)ctx->objmask + arg_data->offset
4968 push_args(ctx, arg_addr);
4969 push_args(ctx, arg_len);
4970 push_args(ctx, arg_data);
4975 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4981 /* Check input parameters */
4982 if ((src == NULL) ||
4988 /* Convert chars to bytes */
4989 for (i = 0, len = 0; i < *size; i += 2) {
4990 snprintf(tmp, 3, "%s", src + i);
4991 dst[len++] = strtoul(tmp, &c, 16);
5006 parse_hex(struct context *ctx, const struct token *token,
5007 const char *str, unsigned int len,
5008 void *buf, unsigned int size)
5010 const struct arg *arg_data = pop_args(ctx);
5011 const struct arg *arg_len = pop_args(ctx);
5012 const struct arg *arg_addr = pop_args(ctx);
5013 char tmp[16]; /* Ought to be enough. */
5015 unsigned int hexlen = len;
5016 unsigned int length = 256;
5017 uint8_t hex_tmp[length];
5019 /* Arguments are expected. */
5023 push_args(ctx, arg_data);
5027 push_args(ctx, arg_len);
5028 push_args(ctx, arg_data);
5031 size = arg_data->size;
5032 /* Bit-mask fill is not supported. */
5038 /* translate bytes string to array. */
5039 if (str[0] == '0' && ((str[1] == 'x') ||
5044 if (hexlen > length)
5046 ret = parse_hex_string(str, hex_tmp, &hexlen);
5049 /* Let parse_int() fill length information first. */
5050 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5053 push_args(ctx, arg_len);
5054 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5059 buf = (uint8_t *)ctx->object + arg_data->offset;
5060 /* Output buffer is not necessarily NUL-terminated. */
5061 memcpy(buf, hex_tmp, hexlen);
5062 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5064 memset((uint8_t *)ctx->objmask + arg_data->offset,
5066 /* Save address if requested. */
5067 if (arg_addr->size) {
5068 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5070 (uint8_t *)ctx->object + arg_data->offset
5074 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5076 (uint8_t *)ctx->objmask + arg_data->offset
5082 push_args(ctx, arg_addr);
5083 push_args(ctx, arg_len);
5084 push_args(ctx, arg_data);
5090 * Parse a MAC address.
5092 * Last argument (ctx->args) is retrieved to determine storage size and
5096 parse_mac_addr(struct context *ctx, const struct token *token,
5097 const char *str, unsigned int len,
5098 void *buf, unsigned int size)
5100 const struct arg *arg = pop_args(ctx);
5101 struct rte_ether_addr tmp;
5105 /* Argument is expected. */
5109 /* Bit-mask fill is not supported. */
5110 if (arg->mask || size != sizeof(tmp))
5112 /* Only network endian is supported. */
5115 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5116 if (ret < 0 || (unsigned int)ret != len)
5120 buf = (uint8_t *)ctx->object + arg->offset;
5121 memcpy(buf, &tmp, size);
5123 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5126 push_args(ctx, arg);
5131 * Parse an IPv4 address.
5133 * Last argument (ctx->args) is retrieved to determine storage size and
5137 parse_ipv4_addr(struct context *ctx, const struct token *token,
5138 const char *str, unsigned int len,
5139 void *buf, unsigned int size)
5141 const struct arg *arg = pop_args(ctx);
5146 /* Argument is expected. */
5150 /* Bit-mask fill is not supported. */
5151 if (arg->mask || size != sizeof(tmp))
5153 /* Only network endian is supported. */
5156 memcpy(str2, str, len);
5158 ret = inet_pton(AF_INET, str2, &tmp);
5160 /* Attempt integer parsing. */
5161 push_args(ctx, arg);
5162 return parse_int(ctx, token, str, len, buf, size);
5166 buf = (uint8_t *)ctx->object + arg->offset;
5167 memcpy(buf, &tmp, size);
5169 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5172 push_args(ctx, arg);
5177 * Parse an IPv6 address.
5179 * Last argument (ctx->args) is retrieved to determine storage size and
5183 parse_ipv6_addr(struct context *ctx, const struct token *token,
5184 const char *str, unsigned int len,
5185 void *buf, unsigned int size)
5187 const struct arg *arg = pop_args(ctx);
5189 struct in6_addr tmp;
5193 /* Argument is expected. */
5197 /* Bit-mask fill is not supported. */
5198 if (arg->mask || size != sizeof(tmp))
5200 /* Only network endian is supported. */
5203 memcpy(str2, str, len);
5205 ret = inet_pton(AF_INET6, str2, &tmp);
5210 buf = (uint8_t *)ctx->object + arg->offset;
5211 memcpy(buf, &tmp, size);
5213 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5216 push_args(ctx, arg);
5220 /** Boolean values (even indices stand for false). */
5221 static const char *const boolean_name[] = {
5231 * Parse a boolean value.
5233 * Last argument (ctx->args) is retrieved to determine storage size and
5237 parse_boolean(struct context *ctx, const struct token *token,
5238 const char *str, unsigned int len,
5239 void *buf, unsigned int size)
5241 const struct arg *arg = pop_args(ctx);
5245 /* Argument is expected. */
5248 for (i = 0; boolean_name[i]; ++i)
5249 if (!strcmp_partial(boolean_name[i], str, len))
5251 /* Process token as integer. */
5252 if (boolean_name[i])
5253 str = i & 1 ? "1" : "0";
5254 push_args(ctx, arg);
5255 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5256 return ret > 0 ? (int)len : ret;
5259 /** Parse port and update context. */
5261 parse_port(struct context *ctx, const struct token *token,
5262 const char *str, unsigned int len,
5263 void *buf, unsigned int size)
5265 struct buffer *out = &(struct buffer){ .port = 0 };
5273 ctx->objmask = NULL;
5274 size = sizeof(*out);
5276 ret = parse_int(ctx, token, str, len, out, size);
5278 ctx->port = out->port;
5284 /** Parse set command, initialize output buffer for subsequent tokens. */
5286 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5287 const char *str, unsigned int len,
5288 void *buf, unsigned int size)
5290 struct buffer *out = buf;
5292 /* Token name must match. */
5293 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5295 /* Nothing else to do if there is no buffer. */
5298 /* Make sure buffer is large enough. */
5299 if (size < sizeof(*out))
5302 ctx->objmask = NULL;
5305 out->command = ctx->curr;
5310 * Parse set raw_encap/raw_decap command,
5311 * initialize output buffer for subsequent tokens.
5314 parse_set_init(struct context *ctx, const struct token *token,
5315 const char *str, unsigned int len,
5316 void *buf, unsigned int size)
5318 struct buffer *out = buf;
5320 /* Token name must match. */
5321 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5323 /* Nothing else to do if there is no buffer. */
5326 /* Make sure buffer is large enough. */
5327 if (size < sizeof(*out))
5329 /* Initialize buffer. */
5330 memset(out, 0x00, sizeof(*out));
5331 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5334 ctx->objmask = NULL;
5335 if (!out->command) {
5336 if (ctx->curr != SET)
5338 if (sizeof(*out) > size)
5340 out->command = ctx->curr;
5341 out->args.vc.data = (uint8_t *)out + size;
5342 /* All we need is pattern */
5343 out->args.vc.pattern =
5344 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5346 ctx->object = out->args.vc.pattern;
5351 /** No completion. */
5353 comp_none(struct context *ctx, const struct token *token,
5354 unsigned int ent, char *buf, unsigned int size)
5364 /** Complete boolean values. */
5366 comp_boolean(struct context *ctx, const struct token *token,
5367 unsigned int ent, char *buf, unsigned int size)
5373 for (i = 0; boolean_name[i]; ++i)
5374 if (buf && i == ent)
5375 return strlcpy(buf, boolean_name[i], size);
5381 /** Complete action names. */
5383 comp_action(struct context *ctx, const struct token *token,
5384 unsigned int ent, char *buf, unsigned int size)
5390 for (i = 0; next_action[i]; ++i)
5391 if (buf && i == ent)
5392 return strlcpy(buf, token_list[next_action[i]].name,
5399 /** Complete available ports. */
5401 comp_port(struct context *ctx, const struct token *token,
5402 unsigned int ent, char *buf, unsigned int size)
5409 RTE_ETH_FOREACH_DEV(p) {
5410 if (buf && i == ent)
5411 return snprintf(buf, size, "%u", p);
5419 /** Complete available rule IDs. */
5421 comp_rule_id(struct context *ctx, const struct token *token,
5422 unsigned int ent, char *buf, unsigned int size)
5425 struct rte_port *port;
5426 struct port_flow *pf;
5429 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5430 ctx->port == (portid_t)RTE_PORT_ALL)
5432 port = &ports[ctx->port];
5433 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5434 if (buf && i == ent)
5435 return snprintf(buf, size, "%u", pf->id);
5443 /** Complete type field for RSS action. */
5445 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5446 unsigned int ent, char *buf, unsigned int size)
5452 for (i = 0; rss_type_table[i].str; ++i)
5457 return strlcpy(buf, rss_type_table[ent].str, size);
5459 return snprintf(buf, size, "end");
5463 /** Complete queue field for RSS action. */
5465 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5466 unsigned int ent, char *buf, unsigned int size)
5473 return snprintf(buf, size, "%u", ent);
5475 return snprintf(buf, size, "end");
5479 /** Internal context. */
5480 static struct context cmd_flow_context;
5482 /** Global parser instance (cmdline API). */
5483 cmdline_parse_inst_t cmd_flow;
5484 cmdline_parse_inst_t cmd_set_raw;
5486 /** Initialize context. */
5488 cmd_flow_context_init(struct context *ctx)
5490 /* A full memset() is not necessary. */
5500 ctx->objmask = NULL;
5503 /** Parse a token (cmdline API). */
5505 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5508 struct context *ctx = &cmd_flow_context;
5509 const struct token *token;
5510 const enum index *list;
5515 token = &token_list[ctx->curr];
5516 /* Check argument length. */
5519 for (len = 0; src[len]; ++len)
5520 if (src[len] == '#' || isspace(src[len]))
5524 /* Last argument and EOL detection. */
5525 for (i = len; src[i]; ++i)
5526 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5528 else if (!isspace(src[i])) {
5533 if (src[i] == '\r' || src[i] == '\n') {
5537 /* Initialize context if necessary. */
5538 if (!ctx->next_num) {
5541 ctx->next[ctx->next_num++] = token->next[0];
5543 /* Process argument through candidates. */
5544 ctx->prev = ctx->curr;
5545 list = ctx->next[ctx->next_num - 1];
5546 for (i = 0; list[i]; ++i) {
5547 const struct token *next = &token_list[list[i]];
5550 ctx->curr = list[i];
5552 tmp = next->call(ctx, next, src, len, result, size);
5554 tmp = parse_default(ctx, next, src, len, result, size);
5555 if (tmp == -1 || tmp != len)
5563 /* Push subsequent tokens if any. */
5565 for (i = 0; token->next[i]; ++i) {
5566 if (ctx->next_num == RTE_DIM(ctx->next))
5568 ctx->next[ctx->next_num++] = token->next[i];
5570 /* Push arguments if any. */
5572 for (i = 0; token->args[i]; ++i) {
5573 if (ctx->args_num == RTE_DIM(ctx->args))
5575 ctx->args[ctx->args_num++] = token->args[i];
5580 /** Return number of completion entries (cmdline API). */
5582 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5584 struct context *ctx = &cmd_flow_context;
5585 const struct token *token = &token_list[ctx->curr];
5586 const enum index *list;
5590 /* Count number of tokens in current list. */
5592 list = ctx->next[ctx->next_num - 1];
5594 list = token->next[0];
5595 for (i = 0; list[i]; ++i)
5600 * If there is a single token, use its completion callback, otherwise
5601 * return the number of entries.
5603 token = &token_list[list[0]];
5604 if (i == 1 && token->comp) {
5605 /* Save index for cmd_flow_get_help(). */
5606 ctx->prev = list[0];
5607 return token->comp(ctx, token, 0, NULL, 0);
5612 /** Return a completion entry (cmdline API). */
5614 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5615 char *dst, unsigned int size)
5617 struct context *ctx = &cmd_flow_context;
5618 const struct token *token = &token_list[ctx->curr];
5619 const enum index *list;
5623 /* Count number of tokens in current list. */
5625 list = ctx->next[ctx->next_num - 1];
5627 list = token->next[0];
5628 for (i = 0; list[i]; ++i)
5632 /* If there is a single token, use its completion callback. */
5633 token = &token_list[list[0]];
5634 if (i == 1 && token->comp) {
5635 /* Save index for cmd_flow_get_help(). */
5636 ctx->prev = list[0];
5637 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5639 /* Otherwise make sure the index is valid and use defaults. */
5642 token = &token_list[list[index]];
5643 strlcpy(dst, token->name, size);
5644 /* Save index for cmd_flow_get_help(). */
5645 ctx->prev = list[index];
5649 /** Populate help strings for current token (cmdline API). */
5651 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5653 struct context *ctx = &cmd_flow_context;
5654 const struct token *token = &token_list[ctx->prev];
5659 /* Set token type and update global help with details. */
5660 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5662 cmd_flow.help_str = token->help;
5664 cmd_flow.help_str = token->name;
5668 /** Token definition template (cmdline API). */
5669 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5670 .ops = &(struct cmdline_token_ops){
5671 .parse = cmd_flow_parse,
5672 .complete_get_nb = cmd_flow_complete_get_nb,
5673 .complete_get_elt = cmd_flow_complete_get_elt,
5674 .get_help = cmd_flow_get_help,
5679 /** Populate the next dynamic token. */
5681 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5682 cmdline_parse_token_hdr_t **hdr_inst)
5684 struct context *ctx = &cmd_flow_context;
5686 /* Always reinitialize context before requesting the first token. */
5687 if (!(hdr_inst - cmd_flow.tokens))
5688 cmd_flow_context_init(ctx);
5689 /* Return NULL when no more tokens are expected. */
5690 if (!ctx->next_num && ctx->curr) {
5694 /* Determine if command should end here. */
5695 if (ctx->eol && ctx->last && ctx->next_num) {
5696 const enum index *list = ctx->next[ctx->next_num - 1];
5699 for (i = 0; list[i]; ++i) {
5706 *hdr = &cmd_flow_token_hdr;
5709 /** Dispatch parsed buffer to function calls. */
5711 cmd_flow_parsed(const struct buffer *in)
5713 switch (in->command) {
5715 port_flow_validate(in->port, &in->args.vc.attr,
5716 in->args.vc.pattern, in->args.vc.actions);
5719 port_flow_create(in->port, &in->args.vc.attr,
5720 in->args.vc.pattern, in->args.vc.actions);
5723 port_flow_destroy(in->port, in->args.destroy.rule_n,
5724 in->args.destroy.rule);
5727 port_flow_flush(in->port);
5730 port_flow_query(in->port, in->args.query.rule,
5731 &in->args.query.action);
5734 port_flow_list(in->port, in->args.list.group_n,
5735 in->args.list.group);
5738 port_flow_isolate(in->port, in->args.isolate.set);
5745 /** Token generator and output processing callback (cmdline API). */
5747 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5750 cmd_flow_tok(arg0, arg2);
5752 cmd_flow_parsed(arg0);
5755 /** Global parser instance (cmdline API). */
5756 cmdline_parse_inst_t cmd_flow = {
5758 .data = NULL, /**< Unused. */
5759 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5762 }, /**< Tokens are returned by cmd_flow_tok(). */
5765 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5768 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5770 struct rte_flow_item_ipv4 *ipv4;
5771 struct rte_flow_item_eth *eth;
5772 struct rte_flow_item_ipv6 *ipv6;
5773 struct rte_flow_item_vxlan *vxlan;
5774 struct rte_flow_item_vxlan_gpe *gpe;
5775 struct rte_flow_item_nvgre *nvgre;
5776 uint32_t ipv6_vtc_flow;
5778 switch (item->type) {
5779 case RTE_FLOW_ITEM_TYPE_ETH:
5780 eth = (struct rte_flow_item_eth *)buf;
5782 eth->type = rte_cpu_to_be_16(next_proto);
5784 case RTE_FLOW_ITEM_TYPE_IPV4:
5785 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5786 ipv4->hdr.version_ihl = 0x45;
5787 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
5789 case RTE_FLOW_ITEM_TYPE_IPV6:
5790 ipv6 = (struct rte_flow_item_ipv6 *)buf;
5791 ipv6->hdr.proto = (uint8_t)next_proto;
5792 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
5793 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
5794 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
5795 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
5797 case RTE_FLOW_ITEM_TYPE_VXLAN:
5798 vxlan = (struct rte_flow_item_vxlan *)buf;
5799 vxlan->flags = 0x08;
5801 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5802 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
5805 case RTE_FLOW_ITEM_TYPE_NVGRE:
5806 nvgre = (struct rte_flow_item_nvgre *)buf;
5807 nvgre->protocol = rte_cpu_to_be_16(0x6558);
5808 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
5815 /** Helper of get item's default mask. */
5817 flow_item_default_mask(const struct rte_flow_item *item)
5819 const void *mask = NULL;
5820 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5822 switch (item->type) {
5823 case RTE_FLOW_ITEM_TYPE_ANY:
5824 mask = &rte_flow_item_any_mask;
5826 case RTE_FLOW_ITEM_TYPE_VF:
5827 mask = &rte_flow_item_vf_mask;
5829 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5830 mask = &rte_flow_item_port_id_mask;
5832 case RTE_FLOW_ITEM_TYPE_RAW:
5833 mask = &rte_flow_item_raw_mask;
5835 case RTE_FLOW_ITEM_TYPE_ETH:
5836 mask = &rte_flow_item_eth_mask;
5838 case RTE_FLOW_ITEM_TYPE_VLAN:
5839 mask = &rte_flow_item_vlan_mask;
5841 case RTE_FLOW_ITEM_TYPE_IPV4:
5842 mask = &rte_flow_item_ipv4_mask;
5844 case RTE_FLOW_ITEM_TYPE_IPV6:
5845 mask = &rte_flow_item_ipv6_mask;
5847 case RTE_FLOW_ITEM_TYPE_ICMP:
5848 mask = &rte_flow_item_icmp_mask;
5850 case RTE_FLOW_ITEM_TYPE_UDP:
5851 mask = &rte_flow_item_udp_mask;
5853 case RTE_FLOW_ITEM_TYPE_TCP:
5854 mask = &rte_flow_item_tcp_mask;
5856 case RTE_FLOW_ITEM_TYPE_SCTP:
5857 mask = &rte_flow_item_sctp_mask;
5859 case RTE_FLOW_ITEM_TYPE_VXLAN:
5860 mask = &rte_flow_item_vxlan_mask;
5862 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5863 mask = &rte_flow_item_vxlan_gpe_mask;
5865 case RTE_FLOW_ITEM_TYPE_E_TAG:
5866 mask = &rte_flow_item_e_tag_mask;
5868 case RTE_FLOW_ITEM_TYPE_NVGRE:
5869 mask = &rte_flow_item_nvgre_mask;
5871 case RTE_FLOW_ITEM_TYPE_MPLS:
5872 mask = &rte_flow_item_mpls_mask;
5874 case RTE_FLOW_ITEM_TYPE_GRE:
5875 mask = &rte_flow_item_gre_mask;
5877 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5878 mask = &gre_key_default_mask;
5880 case RTE_FLOW_ITEM_TYPE_META:
5881 mask = &rte_flow_item_meta_mask;
5883 case RTE_FLOW_ITEM_TYPE_FUZZY:
5884 mask = &rte_flow_item_fuzzy_mask;
5886 case RTE_FLOW_ITEM_TYPE_GTP:
5887 mask = &rte_flow_item_gtp_mask;
5889 case RTE_FLOW_ITEM_TYPE_ESP:
5890 mask = &rte_flow_item_esp_mask;
5892 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5893 mask = &rte_flow_item_gtp_psc_mask;
5895 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
5896 mask = &rte_flow_item_pppoe_proto_id_mask;
5905 /** Dispatch parsed buffer to function calls. */
5907 cmd_set_raw_parsed(const struct buffer *in)
5909 uint32_t n = in->args.vc.pattern_n;
5911 struct rte_flow_item *item = NULL;
5913 uint8_t *data = NULL;
5914 uint8_t *data_tail = NULL;
5915 size_t *total_size = NULL;
5916 uint16_t upper_layer = 0;
5919 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
5920 in->command == SET_RAW_DECAP);
5921 if (in->command == SET_RAW_ENCAP) {
5922 total_size = &raw_encap_conf.size;
5923 data = (uint8_t *)&raw_encap_conf.data;
5925 total_size = &raw_decap_conf.size;
5926 data = (uint8_t *)&raw_decap_conf.data;
5929 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5930 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
5931 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
5932 for (i = n - 1 ; i >= 0; --i) {
5933 item = in->args.vc.pattern + i;
5934 if (item->spec == NULL)
5935 item->spec = flow_item_default_mask(item);
5936 switch (item->type) {
5937 case RTE_FLOW_ITEM_TYPE_ETH:
5938 size = sizeof(struct rte_flow_item_eth);
5940 case RTE_FLOW_ITEM_TYPE_VLAN:
5941 size = sizeof(struct rte_flow_item_vlan);
5942 proto = RTE_ETHER_TYPE_VLAN;
5944 case RTE_FLOW_ITEM_TYPE_IPV4:
5945 size = sizeof(struct rte_flow_item_ipv4);
5946 proto = RTE_ETHER_TYPE_IPV4;
5948 case RTE_FLOW_ITEM_TYPE_IPV6:
5949 size = sizeof(struct rte_flow_item_ipv6);
5950 proto = RTE_ETHER_TYPE_IPV6;
5952 case RTE_FLOW_ITEM_TYPE_UDP:
5953 size = sizeof(struct rte_flow_item_udp);
5956 case RTE_FLOW_ITEM_TYPE_TCP:
5957 size = sizeof(struct rte_flow_item_tcp);
5960 case RTE_FLOW_ITEM_TYPE_VXLAN:
5961 size = sizeof(struct rte_flow_item_vxlan);
5963 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5964 size = sizeof(struct rte_flow_item_vxlan_gpe);
5966 case RTE_FLOW_ITEM_TYPE_GRE:
5967 size = sizeof(struct rte_flow_item_gre);
5970 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5971 size = sizeof(rte_be32_t);
5973 case RTE_FLOW_ITEM_TYPE_MPLS:
5974 size = sizeof(struct rte_flow_item_mpls);
5976 case RTE_FLOW_ITEM_TYPE_NVGRE:
5977 size = sizeof(struct rte_flow_item_nvgre);
5981 printf("Error - Not supported item\n");
5983 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5986 *total_size += size;
5987 rte_memcpy(data_tail - (*total_size), item->spec, size);
5988 /* update some fields which cannot be set by cmdline */
5989 update_fields((data_tail - (*total_size)), item,
5991 upper_layer = proto;
5993 if (verbose_level & 0x1)
5994 printf("total data size is %zu\n", (*total_size));
5995 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
5998 /** Populate help strings for current token (cmdline API). */
6000 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
6003 struct context *ctx = &cmd_flow_context;
6004 const struct token *token = &token_list[ctx->prev];
6009 /* Set token type and update global help with details. */
6010 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
6012 cmd_set_raw.help_str = token->help;
6014 cmd_set_raw.help_str = token->name;
6018 /** Token definition template (cmdline API). */
6019 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
6020 .ops = &(struct cmdline_token_ops){
6021 .parse = cmd_flow_parse,
6022 .complete_get_nb = cmd_flow_complete_get_nb,
6023 .complete_get_elt = cmd_flow_complete_get_elt,
6024 .get_help = cmd_set_raw_get_help,
6029 /** Populate the next dynamic token. */
6031 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
6032 cmdline_parse_token_hdr_t **hdr_inst)
6034 struct context *ctx = &cmd_flow_context;
6036 /* Always reinitialize context before requesting the first token. */
6037 if (!(hdr_inst - cmd_set_raw.tokens)) {
6038 cmd_flow_context_init(ctx);
6039 ctx->curr = START_SET;
6041 /* Return NULL when no more tokens are expected. */
6042 if (!ctx->next_num && (ctx->curr != START_SET)) {
6046 /* Determine if command should end here. */
6047 if (ctx->eol && ctx->last && ctx->next_num) {
6048 const enum index *list = ctx->next[ctx->next_num - 1];
6051 for (i = 0; list[i]; ++i) {
6058 *hdr = &cmd_set_raw_token_hdr;
6061 /** Token generator and output processing callback (cmdline API). */
6063 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6066 cmd_set_raw_tok(arg0, arg2);
6068 cmd_set_raw_parsed(arg0);
6071 /** Global parser instance (cmdline API). */
6072 cmdline_parse_inst_t cmd_set_raw = {
6073 .f = cmd_set_raw_cb,
6074 .data = NULL, /**< Unused. */
6075 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6078 }, /**< Tokens are returned by cmd_flow_tok(). */