1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
49 /* Top-level command. */
51 /* Sub-leve commands. */
55 /* Top-level command. */
57 /* Sub-level commands. */
66 /* Destroy arguments. */
69 /* Query arguments. */
75 /* Validate/create arguments. */
82 /* Validate/create pattern. */
119 ITEM_VLAN_INNER_TYPE,
151 ITEM_E_TAG_GRP_ECID_B,
160 ITEM_GRE_C_RSVD0_VER,
176 ITEM_ARP_ETH_IPV4_SHA,
177 ITEM_ARP_ETH_IPV4_SPA,
178 ITEM_ARP_ETH_IPV4_THA,
179 ITEM_ARP_ETH_IPV4_TPA,
181 ITEM_IPV6_EXT_NEXT_HDR,
186 ITEM_ICMP6_ND_NS_TARGET_ADDR,
188 ITEM_ICMP6_ND_NA_TARGET_ADDR,
190 ITEM_ICMP6_ND_OPT_TYPE,
191 ITEM_ICMP6_ND_OPT_SLA_ETH,
192 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
193 ITEM_ICMP6_ND_OPT_TLA_ETH,
194 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
207 /* Validate/create actions. */
227 ACTION_RSS_FUNC_DEFAULT,
228 ACTION_RSS_FUNC_TOEPLITZ,
229 ACTION_RSS_FUNC_SIMPLE_XOR,
241 ACTION_PHY_PORT_ORIGINAL,
242 ACTION_PHY_PORT_INDEX,
244 ACTION_PORT_ID_ORIGINAL,
248 ACTION_OF_SET_MPLS_TTL,
249 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
250 ACTION_OF_DEC_MPLS_TTL,
251 ACTION_OF_SET_NW_TTL,
252 ACTION_OF_SET_NW_TTL_NW_TTL,
253 ACTION_OF_DEC_NW_TTL,
254 ACTION_OF_COPY_TTL_OUT,
255 ACTION_OF_COPY_TTL_IN,
258 ACTION_OF_PUSH_VLAN_ETHERTYPE,
259 ACTION_OF_SET_VLAN_VID,
260 ACTION_OF_SET_VLAN_VID_VLAN_VID,
261 ACTION_OF_SET_VLAN_PCP,
262 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
264 ACTION_OF_POP_MPLS_ETHERTYPE,
266 ACTION_OF_PUSH_MPLS_ETHERTYPE,
273 ACTION_MPLSOGRE_ENCAP,
274 ACTION_MPLSOGRE_DECAP,
275 ACTION_MPLSOUDP_ENCAP,
276 ACTION_MPLSOUDP_DECAP,
278 ACTION_SET_IPV4_SRC_IPV4_SRC,
280 ACTION_SET_IPV4_DST_IPV4_DST,
282 ACTION_SET_IPV6_SRC_IPV6_SRC,
284 ACTION_SET_IPV6_DST_IPV6_DST,
286 ACTION_SET_TP_SRC_TP_SRC,
288 ACTION_SET_TP_DST_TP_DST,
294 ACTION_SET_MAC_SRC_MAC_SRC,
296 ACTION_SET_MAC_DST_MAC_DST,
298 ACTION_INC_TCP_SEQ_VALUE,
300 ACTION_DEC_TCP_SEQ_VALUE,
302 ACTION_INC_TCP_ACK_VALUE,
304 ACTION_DEC_TCP_ACK_VALUE,
309 /** Maximum size for pattern in struct rte_flow_item_raw. */
310 #define ITEM_RAW_PATTERN_SIZE 40
312 /** Storage size for struct rte_flow_item_raw including pattern. */
313 #define ITEM_RAW_SIZE \
314 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
316 /** Maximum number of queue indices in struct rte_flow_action_rss. */
317 #define ACTION_RSS_QUEUE_NUM 32
319 /** Storage for struct rte_flow_action_rss including external data. */
320 struct action_rss_data {
321 struct rte_flow_action_rss conf;
322 uint8_t key[RSS_HASH_KEY_LENGTH];
323 uint16_t queue[ACTION_RSS_QUEUE_NUM];
326 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
327 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
329 #define ACTION_RAW_ENCAP_MAX_DATA 128
331 /** Storage for struct rte_flow_action_raw_encap. */
332 struct raw_encap_conf {
333 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
334 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
338 struct raw_encap_conf raw_encap_conf = {.size = 0};
340 /** Storage for struct rte_flow_action_raw_decap. */
341 struct raw_decap_conf {
342 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
346 struct raw_decap_conf raw_decap_conf = {.size = 0};
348 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
349 struct action_vxlan_encap_data {
350 struct rte_flow_action_vxlan_encap conf;
351 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
352 struct rte_flow_item_eth item_eth;
353 struct rte_flow_item_vlan item_vlan;
355 struct rte_flow_item_ipv4 item_ipv4;
356 struct rte_flow_item_ipv6 item_ipv6;
358 struct rte_flow_item_udp item_udp;
359 struct rte_flow_item_vxlan item_vxlan;
362 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
363 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
365 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
366 struct action_nvgre_encap_data {
367 struct rte_flow_action_nvgre_encap conf;
368 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
369 struct rte_flow_item_eth item_eth;
370 struct rte_flow_item_vlan item_vlan;
372 struct rte_flow_item_ipv4 item_ipv4;
373 struct rte_flow_item_ipv6 item_ipv6;
375 struct rte_flow_item_nvgre item_nvgre;
378 /** Maximum data size in struct rte_flow_action_raw_encap. */
379 #define ACTION_RAW_ENCAP_MAX_DATA 128
381 /** Storage for struct rte_flow_action_raw_encap including external data. */
382 struct action_raw_encap_data {
383 struct rte_flow_action_raw_encap conf;
384 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
385 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
388 /** Storage for struct rte_flow_action_raw_decap including external data. */
389 struct action_raw_decap_data {
390 struct rte_flow_action_raw_decap conf;
391 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
394 /** Maximum number of subsequent tokens and arguments on the stack. */
395 #define CTX_STACK_SIZE 16
397 /** Parser context. */
399 /** Stack of subsequent token lists to process. */
400 const enum index *next[CTX_STACK_SIZE];
401 /** Arguments for stacked tokens. */
402 const void *args[CTX_STACK_SIZE];
403 enum index curr; /**< Current token index. */
404 enum index prev; /**< Index of the last token seen. */
405 int next_num; /**< Number of entries in next[]. */
406 int args_num; /**< Number of entries in args[]. */
407 uint32_t eol:1; /**< EOL has been detected. */
408 uint32_t last:1; /**< No more arguments. */
409 portid_t port; /**< Current port ID (for completions). */
410 uint32_t objdata; /**< Object-specific data. */
411 void *object; /**< Address of current object for relative offsets. */
412 void *objmask; /**< Object a full mask must be written to. */
415 /** Token argument. */
417 uint32_t hton:1; /**< Use network byte ordering. */
418 uint32_t sign:1; /**< Value is signed. */
419 uint32_t bounded:1; /**< Value is bounded. */
420 uintmax_t min; /**< Minimum value if bounded. */
421 uintmax_t max; /**< Maximum value if bounded. */
422 uint32_t offset; /**< Relative offset from ctx->object. */
423 uint32_t size; /**< Field size. */
424 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
427 /** Parser token definition. */
429 /** Type displayed during completion (defaults to "TOKEN"). */
431 /** Help displayed during completion (defaults to token name). */
433 /** Private data used by parser functions. */
436 * Lists of subsequent tokens to push on the stack. Each call to the
437 * parser consumes the last entry of that stack.
439 const enum index *const *next;
440 /** Arguments stack for subsequent tokens that need them. */
441 const struct arg *const *args;
443 * Token-processing callback, returns -1 in case of error, the
444 * length of the matched string otherwise. If NULL, attempts to
445 * match the token name.
447 * If buf is not NULL, the result should be stored in it according
448 * to context. An error is returned if not large enough.
450 int (*call)(struct context *ctx, const struct token *token,
451 const char *str, unsigned int len,
452 void *buf, unsigned int size);
454 * Callback that provides possible values for this token, used for
455 * completion. Returns -1 in case of error, the number of possible
456 * values otherwise. If NULL, the token name is used.
458 * If buf is not NULL, entry index ent is written to buf and the
459 * full length of the entry is returned (same behavior as
462 int (*comp)(struct context *ctx, const struct token *token,
463 unsigned int ent, char *buf, unsigned int size);
464 /** Mandatory token name, no default value. */
468 /** Static initializer for the next field. */
469 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
471 /** Static initializer for a NEXT() entry. */
472 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
474 /** Static initializer for the args field. */
475 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
477 /** Static initializer for ARGS() to target a field. */
478 #define ARGS_ENTRY(s, f) \
479 (&(const struct arg){ \
480 .offset = offsetof(s, f), \
481 .size = sizeof(((s *)0)->f), \
484 /** Static initializer for ARGS() to target a bit-field. */
485 #define ARGS_ENTRY_BF(s, f, b) \
486 (&(const struct arg){ \
488 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
491 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
492 #define ARGS_ENTRY_MASK(s, f, m) \
493 (&(const struct arg){ \
494 .offset = offsetof(s, f), \
495 .size = sizeof(((s *)0)->f), \
496 .mask = (const void *)(m), \
499 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
500 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
501 (&(const struct arg){ \
503 .offset = offsetof(s, f), \
504 .size = sizeof(((s *)0)->f), \
505 .mask = (const void *)(m), \
508 /** Static initializer for ARGS() to target a pointer. */
509 #define ARGS_ENTRY_PTR(s, f) \
510 (&(const struct arg){ \
511 .size = sizeof(*((s *)0)->f), \
514 /** Static initializer for ARGS() with arbitrary offset and size. */
515 #define ARGS_ENTRY_ARB(o, s) \
516 (&(const struct arg){ \
521 /** Same as ARGS_ENTRY_ARB() with bounded values. */
522 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
523 (&(const struct arg){ \
531 /** Same as ARGS_ENTRY() using network byte ordering. */
532 #define ARGS_ENTRY_HTON(s, f) \
533 (&(const struct arg){ \
535 .offset = offsetof(s, f), \
536 .size = sizeof(((s *)0)->f), \
539 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
540 #define ARG_ENTRY_HTON(s) \
541 (&(const struct arg){ \
547 /** Parser output buffer layout expected by cmd_flow_parsed(). */
549 enum index command; /**< Flow command. */
550 portid_t port; /**< Affected port ID. */
553 struct rte_flow_attr attr;
554 struct rte_flow_item *pattern;
555 struct rte_flow_action *actions;
559 } vc; /**< Validate/create arguments. */
563 } destroy; /**< Destroy arguments. */
566 struct rte_flow_action action;
567 } query; /**< Query arguments. */
571 } list; /**< List arguments. */
574 } isolate; /**< Isolated mode arguments. */
575 } args; /**< Command arguments. */
578 /** Private data for pattern items. */
579 struct parse_item_priv {
580 enum rte_flow_item_type type; /**< Item type. */
581 uint32_t size; /**< Size of item specification structure. */
584 #define PRIV_ITEM(t, s) \
585 (&(const struct parse_item_priv){ \
586 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
590 /** Private data for actions. */
591 struct parse_action_priv {
592 enum rte_flow_action_type type; /**< Action type. */
593 uint32_t size; /**< Size of action configuration structure. */
596 #define PRIV_ACTION(t, s) \
597 (&(const struct parse_action_priv){ \
598 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
602 static const enum index next_vc_attr[] = {
612 static const enum index next_destroy_attr[] = {
618 static const enum index next_list_attr[] = {
624 static const enum index item_param[] = {
633 static const enum index next_item[] = {
669 ITEM_ICMP6_ND_OPT_SLA_ETH,
670 ITEM_ICMP6_ND_OPT_TLA_ETH,
681 static const enum index item_fuzzy[] = {
687 static const enum index item_any[] = {
693 static const enum index item_vf[] = {
699 static const enum index item_phy_port[] = {
705 static const enum index item_port_id[] = {
711 static const enum index item_mark[] = {
717 static const enum index item_raw[] = {
727 static const enum index item_eth[] = {
735 static const enum index item_vlan[] = {
740 ITEM_VLAN_INNER_TYPE,
745 static const enum index item_ipv4[] = {
755 static const enum index item_ipv6[] = {
766 static const enum index item_icmp[] = {
773 static const enum index item_udp[] = {
780 static const enum index item_tcp[] = {
788 static const enum index item_sctp[] = {
797 static const enum index item_vxlan[] = {
803 static const enum index item_e_tag[] = {
804 ITEM_E_TAG_GRP_ECID_B,
809 static const enum index item_nvgre[] = {
815 static const enum index item_mpls[] = {
823 static const enum index item_gre[] = {
825 ITEM_GRE_C_RSVD0_VER,
833 static const enum index item_gre_key[] = {
839 static const enum index item_gtp[] = {
845 static const enum index item_geneve[] = {
852 static const enum index item_vxlan_gpe[] = {
858 static const enum index item_arp_eth_ipv4[] = {
859 ITEM_ARP_ETH_IPV4_SHA,
860 ITEM_ARP_ETH_IPV4_SPA,
861 ITEM_ARP_ETH_IPV4_THA,
862 ITEM_ARP_ETH_IPV4_TPA,
867 static const enum index item_ipv6_ext[] = {
868 ITEM_IPV6_EXT_NEXT_HDR,
873 static const enum index item_icmp6[] = {
880 static const enum index item_icmp6_nd_ns[] = {
881 ITEM_ICMP6_ND_NS_TARGET_ADDR,
886 static const enum index item_icmp6_nd_na[] = {
887 ITEM_ICMP6_ND_NA_TARGET_ADDR,
892 static const enum index item_icmp6_nd_opt[] = {
893 ITEM_ICMP6_ND_OPT_TYPE,
898 static const enum index item_icmp6_nd_opt_sla_eth[] = {
899 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
904 static const enum index item_icmp6_nd_opt_tla_eth[] = {
905 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
910 static const enum index item_meta[] = {
916 static const enum index item_gtp_psc[] = {
923 static const enum index item_pppoed[] = {
929 static const enum index item_pppoes[] = {
935 static const enum index item_pppoe_proto_id[] = {
941 static const enum index next_action[] = {
957 ACTION_OF_SET_MPLS_TTL,
958 ACTION_OF_DEC_MPLS_TTL,
959 ACTION_OF_SET_NW_TTL,
960 ACTION_OF_DEC_NW_TTL,
961 ACTION_OF_COPY_TTL_OUT,
962 ACTION_OF_COPY_TTL_IN,
965 ACTION_OF_SET_VLAN_VID,
966 ACTION_OF_SET_VLAN_PCP,
975 ACTION_MPLSOGRE_ENCAP,
976 ACTION_MPLSOGRE_DECAP,
977 ACTION_MPLSOUDP_ENCAP,
978 ACTION_MPLSOUDP_DECAP,
999 static const enum index action_mark[] = {
1005 static const enum index action_queue[] = {
1011 static const enum index action_count[] = {
1013 ACTION_COUNT_SHARED,
1018 static const enum index action_rss[] = {
1029 static const enum index action_vf[] = {
1036 static const enum index action_phy_port[] = {
1037 ACTION_PHY_PORT_ORIGINAL,
1038 ACTION_PHY_PORT_INDEX,
1043 static const enum index action_port_id[] = {
1044 ACTION_PORT_ID_ORIGINAL,
1050 static const enum index action_meter[] = {
1056 static const enum index action_of_set_mpls_ttl[] = {
1057 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
1062 static const enum index action_of_set_nw_ttl[] = {
1063 ACTION_OF_SET_NW_TTL_NW_TTL,
1068 static const enum index action_of_push_vlan[] = {
1069 ACTION_OF_PUSH_VLAN_ETHERTYPE,
1074 static const enum index action_of_set_vlan_vid[] = {
1075 ACTION_OF_SET_VLAN_VID_VLAN_VID,
1080 static const enum index action_of_set_vlan_pcp[] = {
1081 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
1086 static const enum index action_of_pop_mpls[] = {
1087 ACTION_OF_POP_MPLS_ETHERTYPE,
1092 static const enum index action_of_push_mpls[] = {
1093 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1098 static const enum index action_set_ipv4_src[] = {
1099 ACTION_SET_IPV4_SRC_IPV4_SRC,
1104 static const enum index action_set_mac_src[] = {
1105 ACTION_SET_MAC_SRC_MAC_SRC,
1110 static const enum index action_set_ipv4_dst[] = {
1111 ACTION_SET_IPV4_DST_IPV4_DST,
1116 static const enum index action_set_ipv6_src[] = {
1117 ACTION_SET_IPV6_SRC_IPV6_SRC,
1122 static const enum index action_set_ipv6_dst[] = {
1123 ACTION_SET_IPV6_DST_IPV6_DST,
1128 static const enum index action_set_tp_src[] = {
1129 ACTION_SET_TP_SRC_TP_SRC,
1134 static const enum index action_set_tp_dst[] = {
1135 ACTION_SET_TP_DST_TP_DST,
1140 static const enum index action_set_ttl[] = {
1146 static const enum index action_jump[] = {
1152 static const enum index action_set_mac_dst[] = {
1153 ACTION_SET_MAC_DST_MAC_DST,
1158 static const enum index action_inc_tcp_seq[] = {
1159 ACTION_INC_TCP_SEQ_VALUE,
1164 static const enum index action_dec_tcp_seq[] = {
1165 ACTION_DEC_TCP_SEQ_VALUE,
1170 static const enum index action_inc_tcp_ack[] = {
1171 ACTION_INC_TCP_ACK_VALUE,
1176 static const enum index action_dec_tcp_ack[] = {
1177 ACTION_DEC_TCP_ACK_VALUE,
1182 static int parse_set_raw_encap_decap(struct context *, const struct token *,
1183 const char *, unsigned int,
1184 void *, unsigned int);
1185 static int parse_set_init(struct context *, const struct token *,
1186 const char *, unsigned int,
1187 void *, unsigned int);
1188 static int parse_init(struct context *, const struct token *,
1189 const char *, unsigned int,
1190 void *, unsigned int);
1191 static int parse_vc(struct context *, const struct token *,
1192 const char *, unsigned int,
1193 void *, unsigned int);
1194 static int parse_vc_spec(struct context *, const struct token *,
1195 const char *, unsigned int, void *, unsigned int);
1196 static int parse_vc_conf(struct context *, const struct token *,
1197 const char *, unsigned int, void *, unsigned int);
1198 static int parse_vc_action_rss(struct context *, const struct token *,
1199 const char *, unsigned int, void *,
1201 static int parse_vc_action_rss_func(struct context *, const struct token *,
1202 const char *, unsigned int, void *,
1204 static int parse_vc_action_rss_type(struct context *, const struct token *,
1205 const char *, unsigned int, void *,
1207 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1208 const char *, unsigned int, void *,
1210 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1211 const char *, unsigned int, void *,
1213 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1214 const char *, unsigned int, void *,
1216 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1217 const char *, unsigned int, void *,
1219 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1220 const char *, unsigned int, void *,
1222 static int parse_vc_action_mplsogre_encap(struct context *,
1223 const struct token *, const char *,
1224 unsigned int, void *, unsigned int);
1225 static int parse_vc_action_mplsogre_decap(struct context *,
1226 const struct token *, const char *,
1227 unsigned int, void *, unsigned int);
1228 static int parse_vc_action_mplsoudp_encap(struct context *,
1229 const struct token *, const char *,
1230 unsigned int, void *, unsigned int);
1231 static int parse_vc_action_mplsoudp_decap(struct context *,
1232 const struct token *, const char *,
1233 unsigned int, void *, unsigned int);
1234 static int parse_vc_action_raw_encap(struct context *,
1235 const struct token *, const char *,
1236 unsigned int, void *, unsigned int);
1237 static int parse_vc_action_raw_decap(struct context *,
1238 const struct token *, const char *,
1239 unsigned int, void *, unsigned int);
1240 static int parse_destroy(struct context *, const struct token *,
1241 const char *, unsigned int,
1242 void *, unsigned int);
1243 static int parse_flush(struct context *, const struct token *,
1244 const char *, unsigned int,
1245 void *, unsigned int);
1246 static int parse_query(struct context *, const struct token *,
1247 const char *, unsigned int,
1248 void *, unsigned int);
1249 static int parse_action(struct context *, const struct token *,
1250 const char *, unsigned int,
1251 void *, unsigned int);
1252 static int parse_list(struct context *, const struct token *,
1253 const char *, unsigned int,
1254 void *, unsigned int);
1255 static int parse_isolate(struct context *, const struct token *,
1256 const char *, unsigned int,
1257 void *, unsigned int);
1258 static int parse_int(struct context *, const struct token *,
1259 const char *, unsigned int,
1260 void *, unsigned int);
1261 static int parse_prefix(struct context *, const struct token *,
1262 const char *, unsigned int,
1263 void *, unsigned int);
1264 static int parse_boolean(struct context *, const struct token *,
1265 const char *, unsigned int,
1266 void *, unsigned int);
1267 static int parse_string(struct context *, const struct token *,
1268 const char *, unsigned int,
1269 void *, unsigned int);
1270 static int parse_hex(struct context *ctx, const struct token *token,
1271 const char *str, unsigned int len,
1272 void *buf, unsigned int size);
1273 static int parse_mac_addr(struct context *, const struct token *,
1274 const char *, unsigned int,
1275 void *, unsigned int);
1276 static int parse_ipv4_addr(struct context *, const struct token *,
1277 const char *, unsigned int,
1278 void *, unsigned int);
1279 static int parse_ipv6_addr(struct context *, const struct token *,
1280 const char *, unsigned int,
1281 void *, unsigned int);
1282 static int parse_port(struct context *, const struct token *,
1283 const char *, unsigned int,
1284 void *, unsigned int);
1285 static int comp_none(struct context *, const struct token *,
1286 unsigned int, char *, unsigned int);
1287 static int comp_boolean(struct context *, const struct token *,
1288 unsigned int, char *, unsigned int);
1289 static int comp_action(struct context *, const struct token *,
1290 unsigned int, char *, unsigned int);
1291 static int comp_port(struct context *, const struct token *,
1292 unsigned int, char *, unsigned int);
1293 static int comp_rule_id(struct context *, const struct token *,
1294 unsigned int, char *, unsigned int);
1295 static int comp_vc_action_rss_type(struct context *, const struct token *,
1296 unsigned int, char *, unsigned int);
1297 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1298 unsigned int, char *, unsigned int);
1300 /** Token definitions. */
1301 static const struct token token_list[] = {
1302 /* Special tokens. */
1305 .help = "null entry, abused as the entry point",
1306 .next = NEXT(NEXT_ENTRY(FLOW)),
1311 .help = "command may end here",
1314 .name = "START_SET",
1315 .help = "null entry, abused as the entry point for set",
1316 .next = NEXT(NEXT_ENTRY(SET)),
1321 .help = "set command may end here",
1323 /* Common tokens. */
1327 .help = "integer value",
1332 .name = "{unsigned}",
1334 .help = "unsigned integer value",
1341 .help = "prefix length for bit-mask",
1342 .call = parse_prefix,
1346 .name = "{boolean}",
1348 .help = "any boolean value",
1349 .call = parse_boolean,
1350 .comp = comp_boolean,
1355 .help = "fixed string",
1356 .call = parse_string,
1362 .help = "fixed string",
1367 .name = "{MAC address}",
1369 .help = "standard MAC address notation",
1370 .call = parse_mac_addr,
1374 .name = "{IPv4 address}",
1375 .type = "IPV4 ADDRESS",
1376 .help = "standard IPv4 address notation",
1377 .call = parse_ipv4_addr,
1381 .name = "{IPv6 address}",
1382 .type = "IPV6 ADDRESS",
1383 .help = "standard IPv6 address notation",
1384 .call = parse_ipv6_addr,
1388 .name = "{rule id}",
1390 .help = "rule identifier",
1392 .comp = comp_rule_id,
1395 .name = "{port_id}",
1397 .help = "port identifier",
1402 .name = "{group_id}",
1404 .help = "group identifier",
1408 [PRIORITY_LEVEL] = {
1411 .help = "priority level",
1415 /* Top-level command. */
1418 .type = "{command} {port_id} [{arg} [...]]",
1419 .help = "manage ingress/egress flow rules",
1420 .next = NEXT(NEXT_ENTRY
1430 /* Sub-level commands. */
1433 .help = "check whether a flow rule can be created",
1434 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1435 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1440 .help = "create a flow rule",
1441 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1442 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1447 .help = "destroy specific flow rules",
1448 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1449 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1450 .call = parse_destroy,
1454 .help = "destroy all flow rules",
1455 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1456 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1457 .call = parse_flush,
1461 .help = "query an existing flow rule",
1462 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1463 NEXT_ENTRY(RULE_ID),
1464 NEXT_ENTRY(PORT_ID)),
1465 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1466 ARGS_ENTRY(struct buffer, args.query.rule),
1467 ARGS_ENTRY(struct buffer, port)),
1468 .call = parse_query,
1472 .help = "list existing flow rules",
1473 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1474 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1479 .help = "restrict ingress traffic to the defined flow rules",
1480 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1481 NEXT_ENTRY(PORT_ID)),
1482 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1483 ARGS_ENTRY(struct buffer, port)),
1484 .call = parse_isolate,
1486 /* Destroy arguments. */
1489 .help = "specify a rule identifier",
1490 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1491 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1492 .call = parse_destroy,
1494 /* Query arguments. */
1498 .help = "action to query, must be part of the rule",
1499 .call = parse_action,
1500 .comp = comp_action,
1502 /* List arguments. */
1505 .help = "specify a group",
1506 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1507 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1510 /* Validate/create attributes. */
1513 .help = "specify a group",
1514 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1515 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1520 .help = "specify a priority level",
1521 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1522 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1527 .help = "affect rule to ingress",
1528 .next = NEXT(next_vc_attr),
1533 .help = "affect rule to egress",
1534 .next = NEXT(next_vc_attr),
1539 .help = "apply rule directly to endpoints found in pattern",
1540 .next = NEXT(next_vc_attr),
1543 /* Validate/create pattern. */
1546 .help = "submit a list of pattern items",
1547 .next = NEXT(next_item),
1552 .help = "match value perfectly (with full bit-mask)",
1553 .call = parse_vc_spec,
1555 [ITEM_PARAM_SPEC] = {
1557 .help = "match value according to configured bit-mask",
1558 .call = parse_vc_spec,
1560 [ITEM_PARAM_LAST] = {
1562 .help = "specify upper bound to establish a range",
1563 .call = parse_vc_spec,
1565 [ITEM_PARAM_MASK] = {
1567 .help = "specify bit-mask with relevant bits set to one",
1568 .call = parse_vc_spec,
1570 [ITEM_PARAM_PREFIX] = {
1572 .help = "generate bit-mask from a prefix length",
1573 .call = parse_vc_spec,
1577 .help = "specify next pattern item",
1578 .next = NEXT(next_item),
1582 .help = "end list of pattern items",
1583 .priv = PRIV_ITEM(END, 0),
1584 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1589 .help = "no-op pattern item",
1590 .priv = PRIV_ITEM(VOID, 0),
1591 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1596 .help = "perform actions when pattern does not match",
1597 .priv = PRIV_ITEM(INVERT, 0),
1598 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1603 .help = "match any protocol for the current layer",
1604 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1605 .next = NEXT(item_any),
1610 .help = "number of layers covered",
1611 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1612 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1616 .help = "match traffic from/to the physical function",
1617 .priv = PRIV_ITEM(PF, 0),
1618 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1623 .help = "match traffic from/to a virtual function ID",
1624 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1625 .next = NEXT(item_vf),
1631 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1632 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1636 .help = "match traffic from/to a specific physical port",
1637 .priv = PRIV_ITEM(PHY_PORT,
1638 sizeof(struct rte_flow_item_phy_port)),
1639 .next = NEXT(item_phy_port),
1642 [ITEM_PHY_PORT_INDEX] = {
1644 .help = "physical port index",
1645 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1646 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1650 .help = "match traffic from/to a given DPDK port ID",
1651 .priv = PRIV_ITEM(PORT_ID,
1652 sizeof(struct rte_flow_item_port_id)),
1653 .next = NEXT(item_port_id),
1656 [ITEM_PORT_ID_ID] = {
1658 .help = "DPDK port ID",
1659 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1660 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1664 .help = "match traffic against value set in previously matched rule",
1665 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1666 .next = NEXT(item_mark),
1671 .help = "Integer value to match against",
1672 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1673 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1677 .help = "match an arbitrary byte string",
1678 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1679 .next = NEXT(item_raw),
1682 [ITEM_RAW_RELATIVE] = {
1684 .help = "look for pattern after the previous item",
1685 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1686 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1689 [ITEM_RAW_SEARCH] = {
1691 .help = "search pattern from offset (see also limit)",
1692 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1693 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1696 [ITEM_RAW_OFFSET] = {
1698 .help = "absolute or relative offset for pattern",
1699 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1700 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1702 [ITEM_RAW_LIMIT] = {
1704 .help = "search area limit for start of pattern",
1705 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1706 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1708 [ITEM_RAW_PATTERN] = {
1710 .help = "byte string to look for",
1711 .next = NEXT(item_raw,
1713 NEXT_ENTRY(ITEM_PARAM_IS,
1716 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1717 ARGS_ENTRY(struct rte_flow_item_raw, length),
1718 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1719 ITEM_RAW_PATTERN_SIZE)),
1723 .help = "match Ethernet header",
1724 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1725 .next = NEXT(item_eth),
1730 .help = "destination MAC",
1731 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1732 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1736 .help = "source MAC",
1737 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1738 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1742 .help = "EtherType",
1743 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1744 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1748 .help = "match 802.1Q/ad VLAN tag",
1749 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1750 .next = NEXT(item_vlan),
1755 .help = "tag control information",
1756 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1757 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1761 .help = "priority code point",
1762 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1763 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1768 .help = "drop eligible indicator",
1769 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1770 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1775 .help = "VLAN identifier",
1776 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1777 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1780 [ITEM_VLAN_INNER_TYPE] = {
1781 .name = "inner_type",
1782 .help = "inner EtherType",
1783 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1784 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1789 .help = "match IPv4 header",
1790 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1791 .next = NEXT(item_ipv4),
1796 .help = "type of service",
1797 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1798 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1799 hdr.type_of_service)),
1803 .help = "time to live",
1804 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1805 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1808 [ITEM_IPV4_PROTO] = {
1810 .help = "next protocol ID",
1811 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1812 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1813 hdr.next_proto_id)),
1817 .help = "source address",
1818 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1819 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1824 .help = "destination address",
1825 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1826 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1831 .help = "match IPv6 header",
1832 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1833 .next = NEXT(item_ipv6),
1838 .help = "traffic class",
1839 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1840 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1842 "\x0f\xf0\x00\x00")),
1844 [ITEM_IPV6_FLOW] = {
1846 .help = "flow label",
1847 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1848 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1850 "\x00\x0f\xff\xff")),
1852 [ITEM_IPV6_PROTO] = {
1854 .help = "protocol (next header)",
1855 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1856 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1861 .help = "hop limit",
1862 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1863 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1868 .help = "source address",
1869 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1870 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1875 .help = "destination address",
1876 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1877 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1882 .help = "match ICMP header",
1883 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1884 .next = NEXT(item_icmp),
1887 [ITEM_ICMP_TYPE] = {
1889 .help = "ICMP packet type",
1890 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1891 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1894 [ITEM_ICMP_CODE] = {
1896 .help = "ICMP packet code",
1897 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1898 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1903 .help = "match UDP header",
1904 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1905 .next = NEXT(item_udp),
1910 .help = "UDP source port",
1911 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1912 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1917 .help = "UDP destination port",
1918 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1919 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1924 .help = "match TCP header",
1925 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1926 .next = NEXT(item_tcp),
1931 .help = "TCP source port",
1932 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1933 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1938 .help = "TCP destination port",
1939 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1940 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1943 [ITEM_TCP_FLAGS] = {
1945 .help = "TCP flags",
1946 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1947 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1952 .help = "match SCTP header",
1953 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1954 .next = NEXT(item_sctp),
1959 .help = "SCTP source port",
1960 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1961 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1966 .help = "SCTP destination port",
1967 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1968 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1973 .help = "validation tag",
1974 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1975 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1978 [ITEM_SCTP_CKSUM] = {
1981 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1982 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1987 .help = "match VXLAN header",
1988 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1989 .next = NEXT(item_vxlan),
1992 [ITEM_VXLAN_VNI] = {
1994 .help = "VXLAN identifier",
1995 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1996 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
2000 .help = "match E-Tag header",
2001 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
2002 .next = NEXT(item_e_tag),
2005 [ITEM_E_TAG_GRP_ECID_B] = {
2006 .name = "grp_ecid_b",
2007 .help = "GRP and E-CID base",
2008 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
2009 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
2015 .help = "match NVGRE header",
2016 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
2017 .next = NEXT(item_nvgre),
2020 [ITEM_NVGRE_TNI] = {
2022 .help = "virtual subnet ID",
2023 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
2024 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
2028 .help = "match MPLS header",
2029 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
2030 .next = NEXT(item_mpls),
2033 [ITEM_MPLS_LABEL] = {
2035 .help = "MPLS label",
2036 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2037 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2043 .help = "MPLS Traffic Class",
2044 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2045 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2051 .help = "MPLS Bottom-of-Stack",
2052 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
2053 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
2059 .help = "match GRE header",
2060 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
2061 .next = NEXT(item_gre),
2064 [ITEM_GRE_PROTO] = {
2066 .help = "GRE protocol type",
2067 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2068 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2071 [ITEM_GRE_C_RSVD0_VER] = {
2072 .name = "c_rsvd0_ver",
2074 "checksum (1b), undefined (1b), key bit (1b),"
2075 " sequence number (1b), reserved 0 (9b),"
2077 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
2078 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
2081 [ITEM_GRE_C_BIT] = {
2083 .help = "checksum bit (C)",
2084 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2085 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2087 "\x80\x00\x00\x00")),
2089 [ITEM_GRE_S_BIT] = {
2091 .help = "sequence number bit (S)",
2092 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2093 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2095 "\x10\x00\x00\x00")),
2097 [ITEM_GRE_K_BIT] = {
2099 .help = "key bit (K)",
2100 .next = NEXT(item_gre, NEXT_ENTRY(BOOLEAN), item_param),
2101 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_gre,
2103 "\x20\x00\x00\x00")),
2107 .help = "fuzzy pattern match, expect faster than default",
2108 .priv = PRIV_ITEM(FUZZY,
2109 sizeof(struct rte_flow_item_fuzzy)),
2110 .next = NEXT(item_fuzzy),
2113 [ITEM_FUZZY_THRESH] = {
2115 .help = "match accuracy threshold",
2116 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
2117 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
2122 .help = "match GTP header",
2123 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
2124 .next = NEXT(item_gtp),
2129 .help = "tunnel endpoint identifier",
2130 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
2131 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
2135 .help = "match GTP header",
2136 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
2137 .next = NEXT(item_gtp),
2142 .help = "match GTP header",
2143 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
2144 .next = NEXT(item_gtp),
2149 .help = "match GENEVE header",
2150 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
2151 .next = NEXT(item_geneve),
2154 [ITEM_GENEVE_VNI] = {
2156 .help = "virtual network identifier",
2157 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2158 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2160 [ITEM_GENEVE_PROTO] = {
2162 .help = "GENEVE protocol type",
2163 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2164 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2167 [ITEM_VXLAN_GPE] = {
2168 .name = "vxlan-gpe",
2169 .help = "match VXLAN-GPE header",
2170 .priv = PRIV_ITEM(VXLAN_GPE,
2171 sizeof(struct rte_flow_item_vxlan_gpe)),
2172 .next = NEXT(item_vxlan_gpe),
2175 [ITEM_VXLAN_GPE_VNI] = {
2177 .help = "VXLAN-GPE identifier",
2178 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2179 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2182 [ITEM_ARP_ETH_IPV4] = {
2183 .name = "arp_eth_ipv4",
2184 .help = "match ARP header for Ethernet/IPv4",
2185 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2186 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2187 .next = NEXT(item_arp_eth_ipv4),
2190 [ITEM_ARP_ETH_IPV4_SHA] = {
2192 .help = "sender hardware address",
2193 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2195 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2198 [ITEM_ARP_ETH_IPV4_SPA] = {
2200 .help = "sender IPv4 address",
2201 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2203 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2206 [ITEM_ARP_ETH_IPV4_THA] = {
2208 .help = "target hardware address",
2209 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2211 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2214 [ITEM_ARP_ETH_IPV4_TPA] = {
2216 .help = "target IPv4 address",
2217 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2219 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2224 .help = "match presence of any IPv6 extension header",
2225 .priv = PRIV_ITEM(IPV6_EXT,
2226 sizeof(struct rte_flow_item_ipv6_ext)),
2227 .next = NEXT(item_ipv6_ext),
2230 [ITEM_IPV6_EXT_NEXT_HDR] = {
2232 .help = "next header",
2233 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2234 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2239 .help = "match any ICMPv6 header",
2240 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2241 .next = NEXT(item_icmp6),
2244 [ITEM_ICMP6_TYPE] = {
2246 .help = "ICMPv6 type",
2247 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2248 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2251 [ITEM_ICMP6_CODE] = {
2253 .help = "ICMPv6 code",
2254 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2255 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2258 [ITEM_ICMP6_ND_NS] = {
2259 .name = "icmp6_nd_ns",
2260 .help = "match ICMPv6 neighbor discovery solicitation",
2261 .priv = PRIV_ITEM(ICMP6_ND_NS,
2262 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2263 .next = NEXT(item_icmp6_nd_ns),
2266 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2267 .name = "target_addr",
2268 .help = "target address",
2269 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2271 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2274 [ITEM_ICMP6_ND_NA] = {
2275 .name = "icmp6_nd_na",
2276 .help = "match ICMPv6 neighbor discovery advertisement",
2277 .priv = PRIV_ITEM(ICMP6_ND_NA,
2278 sizeof(struct rte_flow_item_icmp6_nd_na)),
2279 .next = NEXT(item_icmp6_nd_na),
2282 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2283 .name = "target_addr",
2284 .help = "target address",
2285 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2287 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2290 [ITEM_ICMP6_ND_OPT] = {
2291 .name = "icmp6_nd_opt",
2292 .help = "match presence of any ICMPv6 neighbor discovery"
2294 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2295 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2296 .next = NEXT(item_icmp6_nd_opt),
2299 [ITEM_ICMP6_ND_OPT_TYPE] = {
2301 .help = "ND option type",
2302 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2304 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2307 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2308 .name = "icmp6_nd_opt_sla_eth",
2309 .help = "match ICMPv6 neighbor discovery source Ethernet"
2310 " link-layer address option",
2312 (ICMP6_ND_OPT_SLA_ETH,
2313 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2314 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2317 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2319 .help = "source Ethernet LLA",
2320 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2322 .args = ARGS(ARGS_ENTRY_HTON
2323 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2325 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2326 .name = "icmp6_nd_opt_tla_eth",
2327 .help = "match ICMPv6 neighbor discovery target Ethernet"
2328 " link-layer address option",
2330 (ICMP6_ND_OPT_TLA_ETH,
2331 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2332 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2335 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2337 .help = "target Ethernet LLA",
2338 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2340 .args = ARGS(ARGS_ENTRY_HTON
2341 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2345 .help = "match metadata header",
2346 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2347 .next = NEXT(item_meta),
2350 [ITEM_META_DATA] = {
2352 .help = "metadata value",
2353 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2354 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2355 data, "\xff\xff\xff\xff")),
2359 .help = "match GRE key",
2360 .priv = PRIV_ITEM(GRE_KEY, sizeof(rte_be32_t)),
2361 .next = NEXT(item_gre_key),
2364 [ITEM_GRE_KEY_VALUE] = {
2366 .help = "key value",
2367 .next = NEXT(item_gre_key, NEXT_ENTRY(UNSIGNED), item_param),
2368 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2372 .help = "match GTP extension header with type 0x85",
2373 .priv = PRIV_ITEM(GTP_PSC,
2374 sizeof(struct rte_flow_item_gtp_psc)),
2375 .next = NEXT(item_gtp_psc),
2378 [ITEM_GTP_PSC_QFI] = {
2380 .help = "QoS flow identifier",
2381 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2382 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2385 [ITEM_GTP_PSC_PDU_T] = {
2388 .next = NEXT(item_gtp_psc, NEXT_ENTRY(UNSIGNED), item_param),
2389 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp_psc,
2394 .help = "match PPPoE session header",
2395 .priv = PRIV_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
2396 .next = NEXT(item_pppoes),
2401 .help = "match PPPoE discovery header",
2402 .priv = PRIV_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
2403 .next = NEXT(item_pppoed),
2406 [ITEM_PPPOE_SEID] = {
2408 .help = "session identifier",
2409 .next = NEXT(item_pppoes, NEXT_ENTRY(UNSIGNED), item_param),
2410 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_pppoe,
2413 [ITEM_PPPOE_PROTO_ID] = {
2415 .help = "match PPPoE session protocol identifier",
2416 .priv = PRIV_ITEM(PPPOE_PROTO_ID,
2417 sizeof(struct rte_flow_item_pppoe_proto_id)),
2418 .next = NEXT(item_pppoe_proto_id),
2421 /* Validate/create actions. */
2424 .help = "submit a list of associated actions",
2425 .next = NEXT(next_action),
2430 .help = "specify next action",
2431 .next = NEXT(next_action),
2435 .help = "end list of actions",
2436 .priv = PRIV_ACTION(END, 0),
2441 .help = "no-op action",
2442 .priv = PRIV_ACTION(VOID, 0),
2443 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2446 [ACTION_PASSTHRU] = {
2448 .help = "let subsequent rule process matched packets",
2449 .priv = PRIV_ACTION(PASSTHRU, 0),
2450 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2455 .help = "redirect traffic to a given group",
2456 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2457 .next = NEXT(action_jump),
2460 [ACTION_JUMP_GROUP] = {
2462 .help = "group to redirect traffic to",
2463 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2464 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2465 .call = parse_vc_conf,
2469 .help = "attach 32 bit value to packets",
2470 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2471 .next = NEXT(action_mark),
2474 [ACTION_MARK_ID] = {
2476 .help = "32 bit value to return with packets",
2477 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2478 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2479 .call = parse_vc_conf,
2483 .help = "flag packets",
2484 .priv = PRIV_ACTION(FLAG, 0),
2485 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2490 .help = "assign packets to a given queue index",
2491 .priv = PRIV_ACTION(QUEUE,
2492 sizeof(struct rte_flow_action_queue)),
2493 .next = NEXT(action_queue),
2496 [ACTION_QUEUE_INDEX] = {
2498 .help = "queue index to use",
2499 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2500 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2501 .call = parse_vc_conf,
2505 .help = "drop packets (note: passthru has priority)",
2506 .priv = PRIV_ACTION(DROP, 0),
2507 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2512 .help = "enable counters for this rule",
2513 .priv = PRIV_ACTION(COUNT,
2514 sizeof(struct rte_flow_action_count)),
2515 .next = NEXT(action_count),
2518 [ACTION_COUNT_ID] = {
2519 .name = "identifier",
2520 .help = "counter identifier to use",
2521 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2522 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2523 .call = parse_vc_conf,
2525 [ACTION_COUNT_SHARED] = {
2527 .help = "shared counter",
2528 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2529 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2531 .call = parse_vc_conf,
2535 .help = "spread packets among several queues",
2536 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2537 .next = NEXT(action_rss),
2538 .call = parse_vc_action_rss,
2540 [ACTION_RSS_FUNC] = {
2542 .help = "RSS hash function to apply",
2543 .next = NEXT(action_rss,
2544 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2545 ACTION_RSS_FUNC_TOEPLITZ,
2546 ACTION_RSS_FUNC_SIMPLE_XOR)),
2548 [ACTION_RSS_FUNC_DEFAULT] = {
2550 .help = "default hash function",
2551 .call = parse_vc_action_rss_func,
2553 [ACTION_RSS_FUNC_TOEPLITZ] = {
2555 .help = "Toeplitz hash function",
2556 .call = parse_vc_action_rss_func,
2558 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2559 .name = "simple_xor",
2560 .help = "simple XOR hash function",
2561 .call = parse_vc_action_rss_func,
2563 [ACTION_RSS_LEVEL] = {
2565 .help = "encapsulation level for \"types\"",
2566 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2567 .args = ARGS(ARGS_ENTRY_ARB
2568 (offsetof(struct action_rss_data, conf) +
2569 offsetof(struct rte_flow_action_rss, level),
2570 sizeof(((struct rte_flow_action_rss *)0)->
2573 [ACTION_RSS_TYPES] = {
2575 .help = "specific RSS hash types",
2576 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2578 [ACTION_RSS_TYPE] = {
2580 .help = "RSS hash type",
2581 .call = parse_vc_action_rss_type,
2582 .comp = comp_vc_action_rss_type,
2584 [ACTION_RSS_KEY] = {
2586 .help = "RSS hash key",
2587 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2588 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2590 (offsetof(struct action_rss_data, conf) +
2591 offsetof(struct rte_flow_action_rss, key_len),
2592 sizeof(((struct rte_flow_action_rss *)0)->
2594 ARGS_ENTRY(struct action_rss_data, key)),
2596 [ACTION_RSS_KEY_LEN] = {
2598 .help = "RSS hash key length in bytes",
2599 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2600 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2601 (offsetof(struct action_rss_data, conf) +
2602 offsetof(struct rte_flow_action_rss, key_len),
2603 sizeof(((struct rte_flow_action_rss *)0)->
2606 RSS_HASH_KEY_LENGTH)),
2608 [ACTION_RSS_QUEUES] = {
2610 .help = "queue indices to use",
2611 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2612 .call = parse_vc_conf,
2614 [ACTION_RSS_QUEUE] = {
2616 .help = "queue index",
2617 .call = parse_vc_action_rss_queue,
2618 .comp = comp_vc_action_rss_queue,
2622 .help = "direct traffic to physical function",
2623 .priv = PRIV_ACTION(PF, 0),
2624 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2629 .help = "direct traffic to a virtual function ID",
2630 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2631 .next = NEXT(action_vf),
2634 [ACTION_VF_ORIGINAL] = {
2636 .help = "use original VF ID if possible",
2637 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2638 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2640 .call = parse_vc_conf,
2645 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2646 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2647 .call = parse_vc_conf,
2649 [ACTION_PHY_PORT] = {
2651 .help = "direct packets to physical port index",
2652 .priv = PRIV_ACTION(PHY_PORT,
2653 sizeof(struct rte_flow_action_phy_port)),
2654 .next = NEXT(action_phy_port),
2657 [ACTION_PHY_PORT_ORIGINAL] = {
2659 .help = "use original port index if possible",
2660 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2661 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2663 .call = parse_vc_conf,
2665 [ACTION_PHY_PORT_INDEX] = {
2667 .help = "physical port index",
2668 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2669 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2671 .call = parse_vc_conf,
2673 [ACTION_PORT_ID] = {
2675 .help = "direct matching traffic to a given DPDK port ID",
2676 .priv = PRIV_ACTION(PORT_ID,
2677 sizeof(struct rte_flow_action_port_id)),
2678 .next = NEXT(action_port_id),
2681 [ACTION_PORT_ID_ORIGINAL] = {
2683 .help = "use original DPDK port ID if possible",
2684 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2685 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2687 .call = parse_vc_conf,
2689 [ACTION_PORT_ID_ID] = {
2691 .help = "DPDK port ID",
2692 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2693 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2694 .call = parse_vc_conf,
2698 .help = "meter the directed packets at given id",
2699 .priv = PRIV_ACTION(METER,
2700 sizeof(struct rte_flow_action_meter)),
2701 .next = NEXT(action_meter),
2704 [ACTION_METER_ID] = {
2706 .help = "meter id to use",
2707 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2708 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2709 .call = parse_vc_conf,
2711 [ACTION_OF_SET_MPLS_TTL] = {
2712 .name = "of_set_mpls_ttl",
2713 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2716 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2717 .next = NEXT(action_of_set_mpls_ttl),
2720 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2723 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2724 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2726 .call = parse_vc_conf,
2728 [ACTION_OF_DEC_MPLS_TTL] = {
2729 .name = "of_dec_mpls_ttl",
2730 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2731 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2732 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2735 [ACTION_OF_SET_NW_TTL] = {
2736 .name = "of_set_nw_ttl",
2737 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2740 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2741 .next = NEXT(action_of_set_nw_ttl),
2744 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2747 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2748 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2750 .call = parse_vc_conf,
2752 [ACTION_OF_DEC_NW_TTL] = {
2753 .name = "of_dec_nw_ttl",
2754 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2755 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2756 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2759 [ACTION_OF_COPY_TTL_OUT] = {
2760 .name = "of_copy_ttl_out",
2761 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2762 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2763 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2766 [ACTION_OF_COPY_TTL_IN] = {
2767 .name = "of_copy_ttl_in",
2768 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2769 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2770 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2773 [ACTION_OF_POP_VLAN] = {
2774 .name = "of_pop_vlan",
2775 .help = "OpenFlow's OFPAT_POP_VLAN",
2776 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2777 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2780 [ACTION_OF_PUSH_VLAN] = {
2781 .name = "of_push_vlan",
2782 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2785 sizeof(struct rte_flow_action_of_push_vlan)),
2786 .next = NEXT(action_of_push_vlan),
2789 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2790 .name = "ethertype",
2791 .help = "EtherType",
2792 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2793 .args = ARGS(ARGS_ENTRY_HTON
2794 (struct rte_flow_action_of_push_vlan,
2796 .call = parse_vc_conf,
2798 [ACTION_OF_SET_VLAN_VID] = {
2799 .name = "of_set_vlan_vid",
2800 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2803 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2804 .next = NEXT(action_of_set_vlan_vid),
2807 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2810 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2811 .args = ARGS(ARGS_ENTRY_HTON
2812 (struct rte_flow_action_of_set_vlan_vid,
2814 .call = parse_vc_conf,
2816 [ACTION_OF_SET_VLAN_PCP] = {
2817 .name = "of_set_vlan_pcp",
2818 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2821 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2822 .next = NEXT(action_of_set_vlan_pcp),
2825 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2827 .help = "VLAN priority",
2828 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2829 .args = ARGS(ARGS_ENTRY_HTON
2830 (struct rte_flow_action_of_set_vlan_pcp,
2832 .call = parse_vc_conf,
2834 [ACTION_OF_POP_MPLS] = {
2835 .name = "of_pop_mpls",
2836 .help = "OpenFlow's OFPAT_POP_MPLS",
2837 .priv = PRIV_ACTION(OF_POP_MPLS,
2838 sizeof(struct rte_flow_action_of_pop_mpls)),
2839 .next = NEXT(action_of_pop_mpls),
2842 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2843 .name = "ethertype",
2844 .help = "EtherType",
2845 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2846 .args = ARGS(ARGS_ENTRY_HTON
2847 (struct rte_flow_action_of_pop_mpls,
2849 .call = parse_vc_conf,
2851 [ACTION_OF_PUSH_MPLS] = {
2852 .name = "of_push_mpls",
2853 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2856 sizeof(struct rte_flow_action_of_push_mpls)),
2857 .next = NEXT(action_of_push_mpls),
2860 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2861 .name = "ethertype",
2862 .help = "EtherType",
2863 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2864 .args = ARGS(ARGS_ENTRY_HTON
2865 (struct rte_flow_action_of_push_mpls,
2867 .call = parse_vc_conf,
2869 [ACTION_VXLAN_ENCAP] = {
2870 .name = "vxlan_encap",
2871 .help = "VXLAN encapsulation, uses configuration set by \"set"
2873 .priv = PRIV_ACTION(VXLAN_ENCAP,
2874 sizeof(struct action_vxlan_encap_data)),
2875 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2876 .call = parse_vc_action_vxlan_encap,
2878 [ACTION_VXLAN_DECAP] = {
2879 .name = "vxlan_decap",
2880 .help = "Performs a decapsulation action by stripping all"
2881 " headers of the VXLAN tunnel network overlay from the"
2883 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2884 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2887 [ACTION_NVGRE_ENCAP] = {
2888 .name = "nvgre_encap",
2889 .help = "NVGRE encapsulation, uses configuration set by \"set"
2891 .priv = PRIV_ACTION(NVGRE_ENCAP,
2892 sizeof(struct action_nvgre_encap_data)),
2893 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2894 .call = parse_vc_action_nvgre_encap,
2896 [ACTION_NVGRE_DECAP] = {
2897 .name = "nvgre_decap",
2898 .help = "Performs a decapsulation action by stripping all"
2899 " headers of the NVGRE tunnel network overlay from the"
2901 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2902 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2905 [ACTION_L2_ENCAP] = {
2907 .help = "l2 encap, uses configuration set by"
2908 " \"set l2_encap\"",
2909 .priv = PRIV_ACTION(RAW_ENCAP,
2910 sizeof(struct action_raw_encap_data)),
2911 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2912 .call = parse_vc_action_l2_encap,
2914 [ACTION_L2_DECAP] = {
2916 .help = "l2 decap, uses configuration set by"
2917 " \"set l2_decap\"",
2918 .priv = PRIV_ACTION(RAW_DECAP,
2919 sizeof(struct action_raw_decap_data)),
2920 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2921 .call = parse_vc_action_l2_decap,
2923 [ACTION_MPLSOGRE_ENCAP] = {
2924 .name = "mplsogre_encap",
2925 .help = "mplsogre encapsulation, uses configuration set by"
2926 " \"set mplsogre_encap\"",
2927 .priv = PRIV_ACTION(RAW_ENCAP,
2928 sizeof(struct action_raw_encap_data)),
2929 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2930 .call = parse_vc_action_mplsogre_encap,
2932 [ACTION_MPLSOGRE_DECAP] = {
2933 .name = "mplsogre_decap",
2934 .help = "mplsogre decapsulation, uses configuration set by"
2935 " \"set mplsogre_decap\"",
2936 .priv = PRIV_ACTION(RAW_DECAP,
2937 sizeof(struct action_raw_decap_data)),
2938 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2939 .call = parse_vc_action_mplsogre_decap,
2941 [ACTION_MPLSOUDP_ENCAP] = {
2942 .name = "mplsoudp_encap",
2943 .help = "mplsoudp encapsulation, uses configuration set by"
2944 " \"set mplsoudp_encap\"",
2945 .priv = PRIV_ACTION(RAW_ENCAP,
2946 sizeof(struct action_raw_encap_data)),
2947 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2948 .call = parse_vc_action_mplsoudp_encap,
2950 [ACTION_MPLSOUDP_DECAP] = {
2951 .name = "mplsoudp_decap",
2952 .help = "mplsoudp decapsulation, uses configuration set by"
2953 " \"set mplsoudp_decap\"",
2954 .priv = PRIV_ACTION(RAW_DECAP,
2955 sizeof(struct action_raw_decap_data)),
2956 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2957 .call = parse_vc_action_mplsoudp_decap,
2959 [ACTION_SET_IPV4_SRC] = {
2960 .name = "set_ipv4_src",
2961 .help = "Set a new IPv4 source address in the outermost"
2963 .priv = PRIV_ACTION(SET_IPV4_SRC,
2964 sizeof(struct rte_flow_action_set_ipv4)),
2965 .next = NEXT(action_set_ipv4_src),
2968 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2969 .name = "ipv4_addr",
2970 .help = "new IPv4 source address to set",
2971 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2972 .args = ARGS(ARGS_ENTRY_HTON
2973 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2974 .call = parse_vc_conf,
2976 [ACTION_SET_IPV4_DST] = {
2977 .name = "set_ipv4_dst",
2978 .help = "Set a new IPv4 destination address in the outermost"
2980 .priv = PRIV_ACTION(SET_IPV4_DST,
2981 sizeof(struct rte_flow_action_set_ipv4)),
2982 .next = NEXT(action_set_ipv4_dst),
2985 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2986 .name = "ipv4_addr",
2987 .help = "new IPv4 destination address to set",
2988 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2989 .args = ARGS(ARGS_ENTRY_HTON
2990 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2991 .call = parse_vc_conf,
2993 [ACTION_SET_IPV6_SRC] = {
2994 .name = "set_ipv6_src",
2995 .help = "Set a new IPv6 source address in the outermost"
2997 .priv = PRIV_ACTION(SET_IPV6_SRC,
2998 sizeof(struct rte_flow_action_set_ipv6)),
2999 .next = NEXT(action_set_ipv6_src),
3002 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
3003 .name = "ipv6_addr",
3004 .help = "new IPv6 source address to set",
3005 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
3006 .args = ARGS(ARGS_ENTRY_HTON
3007 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3008 .call = parse_vc_conf,
3010 [ACTION_SET_IPV6_DST] = {
3011 .name = "set_ipv6_dst",
3012 .help = "Set a new IPv6 destination address in the outermost"
3014 .priv = PRIV_ACTION(SET_IPV6_DST,
3015 sizeof(struct rte_flow_action_set_ipv6)),
3016 .next = NEXT(action_set_ipv6_dst),
3019 [ACTION_SET_IPV6_DST_IPV6_DST] = {
3020 .name = "ipv6_addr",
3021 .help = "new IPv6 destination address to set",
3022 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
3023 .args = ARGS(ARGS_ENTRY_HTON
3024 (struct rte_flow_action_set_ipv6, ipv6_addr)),
3025 .call = parse_vc_conf,
3027 [ACTION_SET_TP_SRC] = {
3028 .name = "set_tp_src",
3029 .help = "set a new source port number in the outermost"
3031 .priv = PRIV_ACTION(SET_TP_SRC,
3032 sizeof(struct rte_flow_action_set_tp)),
3033 .next = NEXT(action_set_tp_src),
3036 [ACTION_SET_TP_SRC_TP_SRC] = {
3038 .help = "new source port number to set",
3039 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
3040 .args = ARGS(ARGS_ENTRY_HTON
3041 (struct rte_flow_action_set_tp, port)),
3042 .call = parse_vc_conf,
3044 [ACTION_SET_TP_DST] = {
3045 .name = "set_tp_dst",
3046 .help = "set a new destination port number in the outermost"
3048 .priv = PRIV_ACTION(SET_TP_DST,
3049 sizeof(struct rte_flow_action_set_tp)),
3050 .next = NEXT(action_set_tp_dst),
3053 [ACTION_SET_TP_DST_TP_DST] = {
3055 .help = "new destination port number to set",
3056 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
3057 .args = ARGS(ARGS_ENTRY_HTON
3058 (struct rte_flow_action_set_tp, port)),
3059 .call = parse_vc_conf,
3061 [ACTION_MAC_SWAP] = {
3063 .help = "Swap the source and destination MAC addresses"
3064 " in the outermost Ethernet header",
3065 .priv = PRIV_ACTION(MAC_SWAP, 0),
3066 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3069 [ACTION_DEC_TTL] = {
3071 .help = "decrease network TTL if available",
3072 .priv = PRIV_ACTION(DEC_TTL, 0),
3073 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3076 [ACTION_SET_TTL] = {
3078 .help = "set ttl value",
3079 .priv = PRIV_ACTION(SET_TTL,
3080 sizeof(struct rte_flow_action_set_ttl)),
3081 .next = NEXT(action_set_ttl),
3084 [ACTION_SET_TTL_TTL] = {
3085 .name = "ttl_value",
3086 .help = "new ttl value to set",
3087 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
3088 .args = ARGS(ARGS_ENTRY_HTON
3089 (struct rte_flow_action_set_ttl, ttl_value)),
3090 .call = parse_vc_conf,
3092 [ACTION_SET_MAC_SRC] = {
3093 .name = "set_mac_src",
3094 .help = "set source mac address",
3095 .priv = PRIV_ACTION(SET_MAC_SRC,
3096 sizeof(struct rte_flow_action_set_mac)),
3097 .next = NEXT(action_set_mac_src),
3100 [ACTION_SET_MAC_SRC_MAC_SRC] = {
3102 .help = "new source mac address",
3103 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
3104 .args = ARGS(ARGS_ENTRY_HTON
3105 (struct rte_flow_action_set_mac, mac_addr)),
3106 .call = parse_vc_conf,
3108 [ACTION_SET_MAC_DST] = {
3109 .name = "set_mac_dst",
3110 .help = "set destination mac address",
3111 .priv = PRIV_ACTION(SET_MAC_DST,
3112 sizeof(struct rte_flow_action_set_mac)),
3113 .next = NEXT(action_set_mac_dst),
3116 [ACTION_SET_MAC_DST_MAC_DST] = {
3118 .help = "new destination mac address to set",
3119 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
3120 .args = ARGS(ARGS_ENTRY_HTON
3121 (struct rte_flow_action_set_mac, mac_addr)),
3122 .call = parse_vc_conf,
3124 [ACTION_INC_TCP_SEQ] = {
3125 .name = "inc_tcp_seq",
3126 .help = "increase TCP sequence number",
3127 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
3128 .next = NEXT(action_inc_tcp_seq),
3131 [ACTION_INC_TCP_SEQ_VALUE] = {
3133 .help = "the value to increase TCP sequence number by",
3134 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3135 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3136 .call = parse_vc_conf,
3138 [ACTION_DEC_TCP_SEQ] = {
3139 .name = "dec_tcp_seq",
3140 .help = "decrease TCP sequence number",
3141 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
3142 .next = NEXT(action_dec_tcp_seq),
3145 [ACTION_DEC_TCP_SEQ_VALUE] = {
3147 .help = "the value to decrease TCP sequence number by",
3148 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
3149 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3150 .call = parse_vc_conf,
3152 [ACTION_INC_TCP_ACK] = {
3153 .name = "inc_tcp_ack",
3154 .help = "increase TCP acknowledgment number",
3155 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
3156 .next = NEXT(action_inc_tcp_ack),
3159 [ACTION_INC_TCP_ACK_VALUE] = {
3161 .help = "the value to increase TCP acknowledgment number by",
3162 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3163 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3164 .call = parse_vc_conf,
3166 [ACTION_DEC_TCP_ACK] = {
3167 .name = "dec_tcp_ack",
3168 .help = "decrease TCP acknowledgment number",
3169 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
3170 .next = NEXT(action_dec_tcp_ack),
3173 [ACTION_DEC_TCP_ACK_VALUE] = {
3175 .help = "the value to decrease TCP acknowledgment number by",
3176 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
3177 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
3178 .call = parse_vc_conf,
3180 [ACTION_RAW_ENCAP] = {
3181 .name = "raw_encap",
3182 .help = "encapsulation data, defined by set raw_encap",
3183 .priv = PRIV_ACTION(RAW_ENCAP,
3184 sizeof(struct rte_flow_action_raw_encap)),
3185 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3186 .call = parse_vc_action_raw_encap,
3188 [ACTION_RAW_DECAP] = {
3189 .name = "raw_decap",
3190 .help = "decapsulation data, defined by set raw_encap",
3191 .priv = PRIV_ACTION(RAW_DECAP,
3192 sizeof(struct rte_flow_action_raw_decap)),
3193 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
3194 .call = parse_vc_action_raw_decap,
3196 /* Top level command. */
3199 .help = "set raw encap/decap data",
3200 .type = "set raw_encap|raw_decap <pattern>",
3201 .next = NEXT(NEXT_ENTRY
3204 .call = parse_set_init,
3206 /* Sub-level commands. */
3208 .name = "raw_encap",
3209 .help = "set raw encap data",
3210 .next = NEXT(next_item),
3211 .call = parse_set_raw_encap_decap,
3214 .name = "raw_decap",
3215 .help = "set raw decap data",
3216 .next = NEXT(next_item),
3217 .call = parse_set_raw_encap_decap,
3221 /** Remove and return last entry from argument stack. */
3222 static const struct arg *
3223 pop_args(struct context *ctx)
3225 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
3228 /** Add entry on top of the argument stack. */
3230 push_args(struct context *ctx, const struct arg *arg)
3232 if (ctx->args_num == CTX_STACK_SIZE)
3234 ctx->args[ctx->args_num++] = arg;
3238 /** Spread value into buffer according to bit-mask. */
3240 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
3242 uint32_t i = arg->size;
3250 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3259 unsigned int shift = 0;
3260 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3262 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3263 if (!(arg->mask[i] & (1 << shift)))
3268 *buf &= ~(1 << shift);
3269 *buf |= (val & 1) << shift;
3277 /** Compare a string with a partial one of a given length. */
3279 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3281 int r = strncmp(full, partial, partial_len);
3285 if (strlen(full) <= partial_len)
3287 return full[partial_len];
3291 * Parse a prefix length and generate a bit-mask.
3293 * Last argument (ctx->args) is retrieved to determine mask size, storage
3294 * location and whether the result must use network byte ordering.
3297 parse_prefix(struct context *ctx, const struct token *token,
3298 const char *str, unsigned int len,
3299 void *buf, unsigned int size)
3301 const struct arg *arg = pop_args(ctx);
3302 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3309 /* Argument is expected. */
3313 u = strtoumax(str, &end, 0);
3314 if (errno || (size_t)(end - str) != len)
3319 extra = arg_entry_bf_fill(NULL, 0, arg);
3328 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3329 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3336 if (bytes > size || bytes + !!extra > size)
3340 buf = (uint8_t *)ctx->object + arg->offset;
3341 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3343 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3344 memset(buf, 0x00, size - bytes);
3346 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3350 memset(buf, 0xff, bytes);
3351 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3353 ((uint8_t *)buf)[bytes] = conv[extra];
3356 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3359 push_args(ctx, arg);
3363 /** Default parsing function for token name matching. */
3365 parse_default(struct context *ctx, const struct token *token,
3366 const char *str, unsigned int len,
3367 void *buf, unsigned int size)
3372 if (strcmp_partial(token->name, str, len))
3377 /** Parse flow command, initialize output buffer for subsequent tokens. */
3379 parse_init(struct context *ctx, const struct token *token,
3380 const char *str, unsigned int len,
3381 void *buf, unsigned int size)
3383 struct buffer *out = buf;
3385 /* Token name must match. */
3386 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3388 /* Nothing else to do if there is no buffer. */
3391 /* Make sure buffer is large enough. */
3392 if (size < sizeof(*out))
3394 /* Initialize buffer. */
3395 memset(out, 0x00, sizeof(*out));
3396 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3399 ctx->objmask = NULL;
3403 /** Parse tokens for validate/create commands. */
3405 parse_vc(struct context *ctx, const struct token *token,
3406 const char *str, unsigned int len,
3407 void *buf, unsigned int size)
3409 struct buffer *out = buf;
3413 /* Token name must match. */
3414 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3416 /* Nothing else to do if there is no buffer. */
3419 if (!out->command) {
3420 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3422 if (sizeof(*out) > size)
3424 out->command = ctx->curr;
3427 ctx->objmask = NULL;
3428 out->args.vc.data = (uint8_t *)out + size;
3432 ctx->object = &out->args.vc.attr;
3433 ctx->objmask = NULL;
3434 switch (ctx->curr) {
3439 out->args.vc.attr.ingress = 1;
3442 out->args.vc.attr.egress = 1;
3445 out->args.vc.attr.transfer = 1;
3448 out->args.vc.pattern =
3449 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3451 ctx->object = out->args.vc.pattern;
3452 ctx->objmask = NULL;
3455 out->args.vc.actions =
3456 (void *)RTE_ALIGN_CEIL((uintptr_t)
3457 (out->args.vc.pattern +
3458 out->args.vc.pattern_n),
3460 ctx->object = out->args.vc.actions;
3461 ctx->objmask = NULL;
3468 if (!out->args.vc.actions) {
3469 const struct parse_item_priv *priv = token->priv;
3470 struct rte_flow_item *item =
3471 out->args.vc.pattern + out->args.vc.pattern_n;
3473 data_size = priv->size * 3; /* spec, last, mask */
3474 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3475 (out->args.vc.data - data_size),
3477 if ((uint8_t *)item + sizeof(*item) > data)
3479 *item = (struct rte_flow_item){
3482 ++out->args.vc.pattern_n;
3484 ctx->objmask = NULL;
3486 const struct parse_action_priv *priv = token->priv;
3487 struct rte_flow_action *action =
3488 out->args.vc.actions + out->args.vc.actions_n;
3490 data_size = priv->size; /* configuration */
3491 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3492 (out->args.vc.data - data_size),
3494 if ((uint8_t *)action + sizeof(*action) > data)
3496 *action = (struct rte_flow_action){
3498 .conf = data_size ? data : NULL,
3500 ++out->args.vc.actions_n;
3501 ctx->object = action;
3502 ctx->objmask = NULL;
3504 memset(data, 0, data_size);
3505 out->args.vc.data = data;
3506 ctx->objdata = data_size;
3510 /** Parse pattern item parameter type. */
3512 parse_vc_spec(struct context *ctx, const struct token *token,
3513 const char *str, unsigned int len,
3514 void *buf, unsigned int size)
3516 struct buffer *out = buf;
3517 struct rte_flow_item *item;
3523 /* Token name must match. */
3524 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3526 /* Parse parameter types. */
3527 switch (ctx->curr) {
3528 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3534 case ITEM_PARAM_SPEC:
3537 case ITEM_PARAM_LAST:
3540 case ITEM_PARAM_PREFIX:
3541 /* Modify next token to expect a prefix. */
3542 if (ctx->next_num < 2)
3544 ctx->next[ctx->next_num - 2] = prefix;
3546 case ITEM_PARAM_MASK:
3552 /* Nothing else to do if there is no buffer. */
3555 if (!out->args.vc.pattern_n)
3557 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3558 data_size = ctx->objdata / 3; /* spec, last, mask */
3559 /* Point to selected object. */
3560 ctx->object = out->args.vc.data + (data_size * index);
3562 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3563 item->mask = ctx->objmask;
3565 ctx->objmask = NULL;
3566 /* Update relevant item pointer. */
3567 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3572 /** Parse action configuration field. */
3574 parse_vc_conf(struct context *ctx, const struct token *token,
3575 const char *str, unsigned int len,
3576 void *buf, unsigned int size)
3578 struct buffer *out = buf;
3581 /* Token name must match. */
3582 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3584 /* Nothing else to do if there is no buffer. */
3587 /* Point to selected object. */
3588 ctx->object = out->args.vc.data;
3589 ctx->objmask = NULL;
3593 /** Parse RSS action. */
3595 parse_vc_action_rss(struct context *ctx, const struct token *token,
3596 const char *str, unsigned int len,
3597 void *buf, unsigned int size)
3599 struct buffer *out = buf;
3600 struct rte_flow_action *action;
3601 struct action_rss_data *action_rss_data;
3605 ret = parse_vc(ctx, token, str, len, buf, size);
3608 /* Nothing else to do if there is no buffer. */
3611 if (!out->args.vc.actions_n)
3613 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3614 /* Point to selected object. */
3615 ctx->object = out->args.vc.data;
3616 ctx->objmask = NULL;
3617 /* Set up default configuration. */
3618 action_rss_data = ctx->object;
3619 *action_rss_data = (struct action_rss_data){
3620 .conf = (struct rte_flow_action_rss){
3621 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3624 .key_len = sizeof(action_rss_data->key),
3625 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3626 .key = action_rss_data->key,
3627 .queue = action_rss_data->queue,
3629 .key = "testpmd's default RSS hash key, "
3630 "override it for better balancing",
3633 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3634 action_rss_data->queue[i] = i;
3635 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3636 ctx->port != (portid_t)RTE_PORT_ALL) {
3637 struct rte_eth_dev_info info;
3640 ret2 = rte_eth_dev_info_get(ctx->port, &info);
3644 action_rss_data->conf.key_len =
3645 RTE_MIN(sizeof(action_rss_data->key),
3646 info.hash_key_size);
3648 action->conf = &action_rss_data->conf;
3653 * Parse func field for RSS action.
3655 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3656 * ACTION_RSS_FUNC_* index that called this function.
3659 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3660 const char *str, unsigned int len,
3661 void *buf, unsigned int size)
3663 struct action_rss_data *action_rss_data;
3664 enum rte_eth_hash_function func;
3668 /* Token name must match. */
3669 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3671 switch (ctx->curr) {
3672 case ACTION_RSS_FUNC_DEFAULT:
3673 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3675 case ACTION_RSS_FUNC_TOEPLITZ:
3676 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3678 case ACTION_RSS_FUNC_SIMPLE_XOR:
3679 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3686 action_rss_data = ctx->object;
3687 action_rss_data->conf.func = func;
3692 * Parse type field for RSS action.
3694 * Valid tokens are type field names and the "end" token.
3697 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3698 const char *str, unsigned int len,
3699 void *buf, unsigned int size)
3701 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3702 struct action_rss_data *action_rss_data;
3708 if (ctx->curr != ACTION_RSS_TYPE)
3710 if (!(ctx->objdata >> 16) && ctx->object) {
3711 action_rss_data = ctx->object;
3712 action_rss_data->conf.types = 0;
3714 if (!strcmp_partial("end", str, len)) {
3715 ctx->objdata &= 0xffff;
3718 for (i = 0; rss_type_table[i].str; ++i)
3719 if (!strcmp_partial(rss_type_table[i].str, str, len))
3721 if (!rss_type_table[i].str)
3723 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3725 if (ctx->next_num == RTE_DIM(ctx->next))
3727 ctx->next[ctx->next_num++] = next;
3730 action_rss_data = ctx->object;
3731 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3736 * Parse queue field for RSS action.
3738 * Valid tokens are queue indices and the "end" token.
3741 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3742 const char *str, unsigned int len,
3743 void *buf, unsigned int size)
3745 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3746 struct action_rss_data *action_rss_data;
3747 const struct arg *arg;
3754 if (ctx->curr != ACTION_RSS_QUEUE)
3756 i = ctx->objdata >> 16;
3757 if (!strcmp_partial("end", str, len)) {
3758 ctx->objdata &= 0xffff;
3761 if (i >= ACTION_RSS_QUEUE_NUM)
3763 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3764 i * sizeof(action_rss_data->queue[i]),
3765 sizeof(action_rss_data->queue[i]));
3766 if (push_args(ctx, arg))
3768 ret = parse_int(ctx, token, str, len, NULL, 0);
3774 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3776 if (ctx->next_num == RTE_DIM(ctx->next))
3778 ctx->next[ctx->next_num++] = next;
3782 action_rss_data = ctx->object;
3783 action_rss_data->conf.queue_num = i;
3784 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3788 /** Parse VXLAN encap action. */
3790 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3791 const char *str, unsigned int len,
3792 void *buf, unsigned int size)
3794 struct buffer *out = buf;
3795 struct rte_flow_action *action;
3796 struct action_vxlan_encap_data *action_vxlan_encap_data;
3799 ret = parse_vc(ctx, token, str, len, buf, size);
3802 /* Nothing else to do if there is no buffer. */
3805 if (!out->args.vc.actions_n)
3807 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3808 /* Point to selected object. */
3809 ctx->object = out->args.vc.data;
3810 ctx->objmask = NULL;
3811 /* Set up default configuration. */
3812 action_vxlan_encap_data = ctx->object;
3813 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3814 .conf = (struct rte_flow_action_vxlan_encap){
3815 .definition = action_vxlan_encap_data->items,
3819 .type = RTE_FLOW_ITEM_TYPE_ETH,
3820 .spec = &action_vxlan_encap_data->item_eth,
3821 .mask = &rte_flow_item_eth_mask,
3824 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3825 .spec = &action_vxlan_encap_data->item_vlan,
3826 .mask = &rte_flow_item_vlan_mask,
3829 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3830 .spec = &action_vxlan_encap_data->item_ipv4,
3831 .mask = &rte_flow_item_ipv4_mask,
3834 .type = RTE_FLOW_ITEM_TYPE_UDP,
3835 .spec = &action_vxlan_encap_data->item_udp,
3836 .mask = &rte_flow_item_udp_mask,
3839 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3840 .spec = &action_vxlan_encap_data->item_vxlan,
3841 .mask = &rte_flow_item_vxlan_mask,
3844 .type = RTE_FLOW_ITEM_TYPE_END,
3849 .tci = vxlan_encap_conf.vlan_tci,
3853 .src_addr = vxlan_encap_conf.ipv4_src,
3854 .dst_addr = vxlan_encap_conf.ipv4_dst,
3857 .src_port = vxlan_encap_conf.udp_src,
3858 .dst_port = vxlan_encap_conf.udp_dst,
3860 .item_vxlan.flags = 0,
3862 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3863 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3864 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3865 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3866 if (!vxlan_encap_conf.select_ipv4) {
3867 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3868 &vxlan_encap_conf.ipv6_src,
3869 sizeof(vxlan_encap_conf.ipv6_src));
3870 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3871 &vxlan_encap_conf.ipv6_dst,
3872 sizeof(vxlan_encap_conf.ipv6_dst));
3873 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3874 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3875 .spec = &action_vxlan_encap_data->item_ipv6,
3876 .mask = &rte_flow_item_ipv6_mask,
3879 if (!vxlan_encap_conf.select_vlan)
3880 action_vxlan_encap_data->items[1].type =
3881 RTE_FLOW_ITEM_TYPE_VOID;
3882 if (vxlan_encap_conf.select_tos_ttl) {
3883 if (vxlan_encap_conf.select_ipv4) {
3884 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3886 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3887 sizeof(ipv4_mask_tos));
3888 ipv4_mask_tos.hdr.type_of_service = 0xff;
3889 ipv4_mask_tos.hdr.time_to_live = 0xff;
3890 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3891 vxlan_encap_conf.ip_tos;
3892 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3893 vxlan_encap_conf.ip_ttl;
3894 action_vxlan_encap_data->items[2].mask =
3897 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3899 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3900 sizeof(ipv6_mask_tos));
3901 ipv6_mask_tos.hdr.vtc_flow |=
3902 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3903 ipv6_mask_tos.hdr.hop_limits = 0xff;
3904 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3906 ((uint32_t)vxlan_encap_conf.ip_tos <<
3907 RTE_IPV6_HDR_TC_SHIFT);
3908 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3909 vxlan_encap_conf.ip_ttl;
3910 action_vxlan_encap_data->items[2].mask =
3914 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3915 RTE_DIM(vxlan_encap_conf.vni));
3916 action->conf = &action_vxlan_encap_data->conf;
3920 /** Parse NVGRE encap action. */
3922 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3923 const char *str, unsigned int len,
3924 void *buf, unsigned int size)
3926 struct buffer *out = buf;
3927 struct rte_flow_action *action;
3928 struct action_nvgre_encap_data *action_nvgre_encap_data;
3931 ret = parse_vc(ctx, token, str, len, buf, size);
3934 /* Nothing else to do if there is no buffer. */
3937 if (!out->args.vc.actions_n)
3939 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3940 /* Point to selected object. */
3941 ctx->object = out->args.vc.data;
3942 ctx->objmask = NULL;
3943 /* Set up default configuration. */
3944 action_nvgre_encap_data = ctx->object;
3945 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3946 .conf = (struct rte_flow_action_nvgre_encap){
3947 .definition = action_nvgre_encap_data->items,
3951 .type = RTE_FLOW_ITEM_TYPE_ETH,
3952 .spec = &action_nvgre_encap_data->item_eth,
3953 .mask = &rte_flow_item_eth_mask,
3956 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3957 .spec = &action_nvgre_encap_data->item_vlan,
3958 .mask = &rte_flow_item_vlan_mask,
3961 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3962 .spec = &action_nvgre_encap_data->item_ipv4,
3963 .mask = &rte_flow_item_ipv4_mask,
3966 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3967 .spec = &action_nvgre_encap_data->item_nvgre,
3968 .mask = &rte_flow_item_nvgre_mask,
3971 .type = RTE_FLOW_ITEM_TYPE_END,
3976 .tci = nvgre_encap_conf.vlan_tci,
3980 .src_addr = nvgre_encap_conf.ipv4_src,
3981 .dst_addr = nvgre_encap_conf.ipv4_dst,
3983 .item_nvgre.flow_id = 0,
3985 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3986 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3987 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3988 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3989 if (!nvgre_encap_conf.select_ipv4) {
3990 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3991 &nvgre_encap_conf.ipv6_src,
3992 sizeof(nvgre_encap_conf.ipv6_src));
3993 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3994 &nvgre_encap_conf.ipv6_dst,
3995 sizeof(nvgre_encap_conf.ipv6_dst));
3996 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3997 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3998 .spec = &action_nvgre_encap_data->item_ipv6,
3999 .mask = &rte_flow_item_ipv6_mask,
4002 if (!nvgre_encap_conf.select_vlan)
4003 action_nvgre_encap_data->items[1].type =
4004 RTE_FLOW_ITEM_TYPE_VOID;
4005 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
4006 RTE_DIM(nvgre_encap_conf.tni));
4007 action->conf = &action_nvgre_encap_data->conf;
4011 /** Parse l2 encap action. */
4013 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
4014 const char *str, unsigned int len,
4015 void *buf, unsigned int size)
4017 struct buffer *out = buf;
4018 struct rte_flow_action *action;
4019 struct action_raw_encap_data *action_encap_data;
4020 struct rte_flow_item_eth eth = { .type = 0, };
4021 struct rte_flow_item_vlan vlan = {
4022 .tci = mplsoudp_encap_conf.vlan_tci,
4028 ret = parse_vc(ctx, token, str, len, buf, size);
4031 /* Nothing else to do if there is no buffer. */
4034 if (!out->args.vc.actions_n)
4036 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4037 /* Point to selected object. */
4038 ctx->object = out->args.vc.data;
4039 ctx->objmask = NULL;
4040 /* Copy the headers to the buffer. */
4041 action_encap_data = ctx->object;
4042 *action_encap_data = (struct action_raw_encap_data) {
4043 .conf = (struct rte_flow_action_raw_encap){
4044 .data = action_encap_data->data,
4048 header = action_encap_data->data;
4049 if (l2_encap_conf.select_vlan)
4050 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4051 else if (l2_encap_conf.select_ipv4)
4052 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4054 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4055 memcpy(eth.dst.addr_bytes,
4056 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4057 memcpy(eth.src.addr_bytes,
4058 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4059 memcpy(header, ð, sizeof(eth));
4060 header += sizeof(eth);
4061 if (l2_encap_conf.select_vlan) {
4062 if (l2_encap_conf.select_ipv4)
4063 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4065 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4066 memcpy(header, &vlan, sizeof(vlan));
4067 header += sizeof(vlan);
4069 action_encap_data->conf.size = header -
4070 action_encap_data->data;
4071 action->conf = &action_encap_data->conf;
4075 /** Parse l2 decap action. */
4077 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
4078 const char *str, unsigned int len,
4079 void *buf, unsigned int size)
4081 struct buffer *out = buf;
4082 struct rte_flow_action *action;
4083 struct action_raw_decap_data *action_decap_data;
4084 struct rte_flow_item_eth eth = { .type = 0, };
4085 struct rte_flow_item_vlan vlan = {
4086 .tci = mplsoudp_encap_conf.vlan_tci,
4092 ret = parse_vc(ctx, token, str, len, buf, size);
4095 /* Nothing else to do if there is no buffer. */
4098 if (!out->args.vc.actions_n)
4100 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4101 /* Point to selected object. */
4102 ctx->object = out->args.vc.data;
4103 ctx->objmask = NULL;
4104 /* Copy the headers to the buffer. */
4105 action_decap_data = ctx->object;
4106 *action_decap_data = (struct action_raw_decap_data) {
4107 .conf = (struct rte_flow_action_raw_decap){
4108 .data = action_decap_data->data,
4112 header = action_decap_data->data;
4113 if (l2_decap_conf.select_vlan)
4114 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4115 memcpy(header, ð, sizeof(eth));
4116 header += sizeof(eth);
4117 if (l2_decap_conf.select_vlan) {
4118 memcpy(header, &vlan, sizeof(vlan));
4119 header += sizeof(vlan);
4121 action_decap_data->conf.size = header -
4122 action_decap_data->data;
4123 action->conf = &action_decap_data->conf;
4127 #define ETHER_TYPE_MPLS_UNICAST 0x8847
4129 /** Parse MPLSOGRE encap action. */
4131 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
4132 const char *str, unsigned int len,
4133 void *buf, unsigned int size)
4135 struct buffer *out = buf;
4136 struct rte_flow_action *action;
4137 struct action_raw_encap_data *action_encap_data;
4138 struct rte_flow_item_eth eth = { .type = 0, };
4139 struct rte_flow_item_vlan vlan = {
4140 .tci = mplsogre_encap_conf.vlan_tci,
4143 struct rte_flow_item_ipv4 ipv4 = {
4145 .src_addr = mplsogre_encap_conf.ipv4_src,
4146 .dst_addr = mplsogre_encap_conf.ipv4_dst,
4147 .next_proto_id = IPPROTO_GRE,
4148 .version_ihl = RTE_IPV4_VHL_DEF,
4149 .time_to_live = IPDEFTTL,
4152 struct rte_flow_item_ipv6 ipv6 = {
4154 .proto = IPPROTO_GRE,
4155 .hop_limits = IPDEFTTL,
4158 struct rte_flow_item_gre gre = {
4159 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4161 struct rte_flow_item_mpls mpls;
4165 ret = parse_vc(ctx, token, str, len, buf, size);
4168 /* Nothing else to do if there is no buffer. */
4171 if (!out->args.vc.actions_n)
4173 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4174 /* Point to selected object. */
4175 ctx->object = out->args.vc.data;
4176 ctx->objmask = NULL;
4177 /* Copy the headers to the buffer. */
4178 action_encap_data = ctx->object;
4179 *action_encap_data = (struct action_raw_encap_data) {
4180 .conf = (struct rte_flow_action_raw_encap){
4181 .data = action_encap_data->data,
4186 header = action_encap_data->data;
4187 if (mplsogre_encap_conf.select_vlan)
4188 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4189 else if (mplsogre_encap_conf.select_ipv4)
4190 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4192 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4193 memcpy(eth.dst.addr_bytes,
4194 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4195 memcpy(eth.src.addr_bytes,
4196 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4197 memcpy(header, ð, sizeof(eth));
4198 header += sizeof(eth);
4199 if (mplsogre_encap_conf.select_vlan) {
4200 if (mplsogre_encap_conf.select_ipv4)
4201 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4203 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4204 memcpy(header, &vlan, sizeof(vlan));
4205 header += sizeof(vlan);
4207 if (mplsogre_encap_conf.select_ipv4) {
4208 memcpy(header, &ipv4, sizeof(ipv4));
4209 header += sizeof(ipv4);
4211 memcpy(&ipv6.hdr.src_addr,
4212 &mplsogre_encap_conf.ipv6_src,
4213 sizeof(mplsogre_encap_conf.ipv6_src));
4214 memcpy(&ipv6.hdr.dst_addr,
4215 &mplsogre_encap_conf.ipv6_dst,
4216 sizeof(mplsogre_encap_conf.ipv6_dst));
4217 memcpy(header, &ipv6, sizeof(ipv6));
4218 header += sizeof(ipv6);
4220 memcpy(header, &gre, sizeof(gre));
4221 header += sizeof(gre);
4222 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
4223 RTE_DIM(mplsogre_encap_conf.label));
4224 mpls.label_tc_s[2] |= 0x1;
4225 memcpy(header, &mpls, sizeof(mpls));
4226 header += sizeof(mpls);
4227 action_encap_data->conf.size = header -
4228 action_encap_data->data;
4229 action->conf = &action_encap_data->conf;
4233 /** Parse MPLSOGRE decap action. */
4235 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
4236 const char *str, unsigned int len,
4237 void *buf, unsigned int size)
4239 struct buffer *out = buf;
4240 struct rte_flow_action *action;
4241 struct action_raw_decap_data *action_decap_data;
4242 struct rte_flow_item_eth eth = { .type = 0, };
4243 struct rte_flow_item_vlan vlan = {.tci = 0};
4244 struct rte_flow_item_ipv4 ipv4 = {
4246 .next_proto_id = IPPROTO_GRE,
4249 struct rte_flow_item_ipv6 ipv6 = {
4251 .proto = IPPROTO_GRE,
4254 struct rte_flow_item_gre gre = {
4255 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
4257 struct rte_flow_item_mpls mpls;
4261 ret = parse_vc(ctx, token, str, len, buf, size);
4264 /* Nothing else to do if there is no buffer. */
4267 if (!out->args.vc.actions_n)
4269 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4270 /* Point to selected object. */
4271 ctx->object = out->args.vc.data;
4272 ctx->objmask = NULL;
4273 /* Copy the headers to the buffer. */
4274 action_decap_data = ctx->object;
4275 *action_decap_data = (struct action_raw_decap_data) {
4276 .conf = (struct rte_flow_action_raw_decap){
4277 .data = action_decap_data->data,
4281 header = action_decap_data->data;
4282 if (mplsogre_decap_conf.select_vlan)
4283 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4284 else if (mplsogre_encap_conf.select_ipv4)
4285 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4287 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4288 memcpy(eth.dst.addr_bytes,
4289 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4290 memcpy(eth.src.addr_bytes,
4291 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4292 memcpy(header, ð, sizeof(eth));
4293 header += sizeof(eth);
4294 if (mplsogre_encap_conf.select_vlan) {
4295 if (mplsogre_encap_conf.select_ipv4)
4296 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4298 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4299 memcpy(header, &vlan, sizeof(vlan));
4300 header += sizeof(vlan);
4302 if (mplsogre_encap_conf.select_ipv4) {
4303 memcpy(header, &ipv4, sizeof(ipv4));
4304 header += sizeof(ipv4);
4306 memcpy(header, &ipv6, sizeof(ipv6));
4307 header += sizeof(ipv6);
4309 memcpy(header, &gre, sizeof(gre));
4310 header += sizeof(gre);
4311 memset(&mpls, 0, sizeof(mpls));
4312 memcpy(header, &mpls, sizeof(mpls));
4313 header += sizeof(mpls);
4314 action_decap_data->conf.size = header -
4315 action_decap_data->data;
4316 action->conf = &action_decap_data->conf;
4320 /** Parse MPLSOUDP encap action. */
4322 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4323 const char *str, unsigned int len,
4324 void *buf, unsigned int size)
4326 struct buffer *out = buf;
4327 struct rte_flow_action *action;
4328 struct action_raw_encap_data *action_encap_data;
4329 struct rte_flow_item_eth eth = { .type = 0, };
4330 struct rte_flow_item_vlan vlan = {
4331 .tci = mplsoudp_encap_conf.vlan_tci,
4334 struct rte_flow_item_ipv4 ipv4 = {
4336 .src_addr = mplsoudp_encap_conf.ipv4_src,
4337 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4338 .next_proto_id = IPPROTO_UDP,
4339 .version_ihl = RTE_IPV4_VHL_DEF,
4340 .time_to_live = IPDEFTTL,
4343 struct rte_flow_item_ipv6 ipv6 = {
4345 .proto = IPPROTO_UDP,
4346 .hop_limits = IPDEFTTL,
4349 struct rte_flow_item_udp udp = {
4351 .src_port = mplsoudp_encap_conf.udp_src,
4352 .dst_port = mplsoudp_encap_conf.udp_dst,
4355 struct rte_flow_item_mpls mpls;
4359 ret = parse_vc(ctx, token, str, len, buf, size);
4362 /* Nothing else to do if there is no buffer. */
4365 if (!out->args.vc.actions_n)
4367 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4368 /* Point to selected object. */
4369 ctx->object = out->args.vc.data;
4370 ctx->objmask = NULL;
4371 /* Copy the headers to the buffer. */
4372 action_encap_data = ctx->object;
4373 *action_encap_data = (struct action_raw_encap_data) {
4374 .conf = (struct rte_flow_action_raw_encap){
4375 .data = action_encap_data->data,
4380 header = action_encap_data->data;
4381 if (mplsoudp_encap_conf.select_vlan)
4382 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4383 else if (mplsoudp_encap_conf.select_ipv4)
4384 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4386 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4387 memcpy(eth.dst.addr_bytes,
4388 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4389 memcpy(eth.src.addr_bytes,
4390 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4391 memcpy(header, ð, sizeof(eth));
4392 header += sizeof(eth);
4393 if (mplsoudp_encap_conf.select_vlan) {
4394 if (mplsoudp_encap_conf.select_ipv4)
4395 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4397 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4398 memcpy(header, &vlan, sizeof(vlan));
4399 header += sizeof(vlan);
4401 if (mplsoudp_encap_conf.select_ipv4) {
4402 memcpy(header, &ipv4, sizeof(ipv4));
4403 header += sizeof(ipv4);
4405 memcpy(&ipv6.hdr.src_addr,
4406 &mplsoudp_encap_conf.ipv6_src,
4407 sizeof(mplsoudp_encap_conf.ipv6_src));
4408 memcpy(&ipv6.hdr.dst_addr,
4409 &mplsoudp_encap_conf.ipv6_dst,
4410 sizeof(mplsoudp_encap_conf.ipv6_dst));
4411 memcpy(header, &ipv6, sizeof(ipv6));
4412 header += sizeof(ipv6);
4414 memcpy(header, &udp, sizeof(udp));
4415 header += sizeof(udp);
4416 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4417 RTE_DIM(mplsoudp_encap_conf.label));
4418 mpls.label_tc_s[2] |= 0x1;
4419 memcpy(header, &mpls, sizeof(mpls));
4420 header += sizeof(mpls);
4421 action_encap_data->conf.size = header -
4422 action_encap_data->data;
4423 action->conf = &action_encap_data->conf;
4427 /** Parse MPLSOUDP decap action. */
4429 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4430 const char *str, unsigned int len,
4431 void *buf, unsigned int size)
4433 struct buffer *out = buf;
4434 struct rte_flow_action *action;
4435 struct action_raw_decap_data *action_decap_data;
4436 struct rte_flow_item_eth eth = { .type = 0, };
4437 struct rte_flow_item_vlan vlan = {.tci = 0};
4438 struct rte_flow_item_ipv4 ipv4 = {
4440 .next_proto_id = IPPROTO_UDP,
4443 struct rte_flow_item_ipv6 ipv6 = {
4445 .proto = IPPROTO_UDP,
4448 struct rte_flow_item_udp udp = {
4450 .dst_port = rte_cpu_to_be_16(6635),
4453 struct rte_flow_item_mpls mpls;
4457 ret = parse_vc(ctx, token, str, len, buf, size);
4460 /* Nothing else to do if there is no buffer. */
4463 if (!out->args.vc.actions_n)
4465 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4466 /* Point to selected object. */
4467 ctx->object = out->args.vc.data;
4468 ctx->objmask = NULL;
4469 /* Copy the headers to the buffer. */
4470 action_decap_data = ctx->object;
4471 *action_decap_data = (struct action_raw_decap_data) {
4472 .conf = (struct rte_flow_action_raw_decap){
4473 .data = action_decap_data->data,
4477 header = action_decap_data->data;
4478 if (mplsoudp_decap_conf.select_vlan)
4479 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4480 else if (mplsoudp_encap_conf.select_ipv4)
4481 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4483 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4484 memcpy(eth.dst.addr_bytes,
4485 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4486 memcpy(eth.src.addr_bytes,
4487 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4488 memcpy(header, ð, sizeof(eth));
4489 header += sizeof(eth);
4490 if (mplsoudp_encap_conf.select_vlan) {
4491 if (mplsoudp_encap_conf.select_ipv4)
4492 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4494 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4495 memcpy(header, &vlan, sizeof(vlan));
4496 header += sizeof(vlan);
4498 if (mplsoudp_encap_conf.select_ipv4) {
4499 memcpy(header, &ipv4, sizeof(ipv4));
4500 header += sizeof(ipv4);
4502 memcpy(header, &ipv6, sizeof(ipv6));
4503 header += sizeof(ipv6);
4505 memcpy(header, &udp, sizeof(udp));
4506 header += sizeof(udp);
4507 memset(&mpls, 0, sizeof(mpls));
4508 memcpy(header, &mpls, sizeof(mpls));
4509 header += sizeof(mpls);
4510 action_decap_data->conf.size = header -
4511 action_decap_data->data;
4512 action->conf = &action_decap_data->conf;
4517 parse_vc_action_raw_encap(struct context *ctx, const struct token *token,
4518 const char *str, unsigned int len, void *buf,
4521 struct buffer *out = buf;
4522 struct rte_flow_action *action;
4523 struct rte_flow_action_raw_encap *action_raw_encap_conf = NULL;
4524 uint8_t *data = NULL;
4527 ret = parse_vc(ctx, token, str, len, buf, size);
4530 /* Nothing else to do if there is no buffer. */
4533 if (!out->args.vc.actions_n)
4535 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4536 /* Point to selected object. */
4537 ctx->object = out->args.vc.data;
4538 ctx->objmask = NULL;
4539 /* Copy the headers to the buffer. */
4540 action_raw_encap_conf = ctx->object;
4541 /* data stored from tail of data buffer */
4542 data = (uint8_t *)&(raw_encap_conf.data) +
4543 ACTION_RAW_ENCAP_MAX_DATA - raw_encap_conf.size;
4544 action_raw_encap_conf->data = data;
4545 action_raw_encap_conf->preserve = NULL;
4546 action_raw_encap_conf->size = raw_encap_conf.size;
4547 action->conf = action_raw_encap_conf;
4552 parse_vc_action_raw_decap(struct context *ctx, const struct token *token,
4553 const char *str, unsigned int len, void *buf,
4556 struct buffer *out = buf;
4557 struct rte_flow_action *action;
4558 struct rte_flow_action_raw_decap *action_raw_decap_conf = NULL;
4559 uint8_t *data = NULL;
4562 ret = parse_vc(ctx, token, str, len, buf, size);
4565 /* Nothing else to do if there is no buffer. */
4568 if (!out->args.vc.actions_n)
4570 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4571 /* Point to selected object. */
4572 ctx->object = out->args.vc.data;
4573 ctx->objmask = NULL;
4574 /* Copy the headers to the buffer. */
4575 action_raw_decap_conf = ctx->object;
4576 /* data stored from tail of data buffer */
4577 data = (uint8_t *)&(raw_decap_conf.data) +
4578 ACTION_RAW_ENCAP_MAX_DATA - raw_decap_conf.size;
4579 action_raw_decap_conf->data = data;
4580 action_raw_decap_conf->size = raw_decap_conf.size;
4581 action->conf = action_raw_decap_conf;
4585 /** Parse tokens for destroy command. */
4587 parse_destroy(struct context *ctx, const struct token *token,
4588 const char *str, unsigned int len,
4589 void *buf, unsigned int size)
4591 struct buffer *out = buf;
4593 /* Token name must match. */
4594 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4596 /* Nothing else to do if there is no buffer. */
4599 if (!out->command) {
4600 if (ctx->curr != DESTROY)
4602 if (sizeof(*out) > size)
4604 out->command = ctx->curr;
4607 ctx->objmask = NULL;
4608 out->args.destroy.rule =
4609 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4613 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4614 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4617 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4618 ctx->objmask = NULL;
4622 /** Parse tokens for flush command. */
4624 parse_flush(struct context *ctx, const struct token *token,
4625 const char *str, unsigned int len,
4626 void *buf, unsigned int size)
4628 struct buffer *out = buf;
4630 /* Token name must match. */
4631 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4633 /* Nothing else to do if there is no buffer. */
4636 if (!out->command) {
4637 if (ctx->curr != FLUSH)
4639 if (sizeof(*out) > size)
4641 out->command = ctx->curr;
4644 ctx->objmask = NULL;
4649 /** Parse tokens for query command. */
4651 parse_query(struct context *ctx, const struct token *token,
4652 const char *str, unsigned int len,
4653 void *buf, unsigned int size)
4655 struct buffer *out = buf;
4657 /* Token name must match. */
4658 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4660 /* Nothing else to do if there is no buffer. */
4663 if (!out->command) {
4664 if (ctx->curr != QUERY)
4666 if (sizeof(*out) > size)
4668 out->command = ctx->curr;
4671 ctx->objmask = NULL;
4676 /** Parse action names. */
4678 parse_action(struct context *ctx, const struct token *token,
4679 const char *str, unsigned int len,
4680 void *buf, unsigned int size)
4682 struct buffer *out = buf;
4683 const struct arg *arg = pop_args(ctx);
4687 /* Argument is expected. */
4690 /* Parse action name. */
4691 for (i = 0; next_action[i]; ++i) {
4692 const struct parse_action_priv *priv;
4694 token = &token_list[next_action[i]];
4695 if (strcmp_partial(token->name, str, len))
4701 memcpy((uint8_t *)ctx->object + arg->offset,
4707 push_args(ctx, arg);
4711 /** Parse tokens for list command. */
4713 parse_list(struct context *ctx, const struct token *token,
4714 const char *str, unsigned int len,
4715 void *buf, unsigned int size)
4717 struct buffer *out = buf;
4719 /* Token name must match. */
4720 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4722 /* Nothing else to do if there is no buffer. */
4725 if (!out->command) {
4726 if (ctx->curr != LIST)
4728 if (sizeof(*out) > size)
4730 out->command = ctx->curr;
4733 ctx->objmask = NULL;
4734 out->args.list.group =
4735 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4739 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4740 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4743 ctx->object = out->args.list.group + out->args.list.group_n++;
4744 ctx->objmask = NULL;
4748 /** Parse tokens for isolate command. */
4750 parse_isolate(struct context *ctx, const struct token *token,
4751 const char *str, unsigned int len,
4752 void *buf, unsigned int size)
4754 struct buffer *out = buf;
4756 /* Token name must match. */
4757 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4759 /* Nothing else to do if there is no buffer. */
4762 if (!out->command) {
4763 if (ctx->curr != ISOLATE)
4765 if (sizeof(*out) > size)
4767 out->command = ctx->curr;
4770 ctx->objmask = NULL;
4776 * Parse signed/unsigned integers 8 to 64-bit long.
4778 * Last argument (ctx->args) is retrieved to determine integer type and
4782 parse_int(struct context *ctx, const struct token *token,
4783 const char *str, unsigned int len,
4784 void *buf, unsigned int size)
4786 const struct arg *arg = pop_args(ctx);
4791 /* Argument is expected. */
4796 (uintmax_t)strtoimax(str, &end, 0) :
4797 strtoumax(str, &end, 0);
4798 if (errno || (size_t)(end - str) != len)
4801 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4802 (intmax_t)u > (intmax_t)arg->max)) ||
4803 (!arg->sign && (u < arg->min || u > arg->max))))
4808 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4809 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4813 buf = (uint8_t *)ctx->object + arg->offset;
4815 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4819 case sizeof(uint8_t):
4820 *(uint8_t *)buf = u;
4822 case sizeof(uint16_t):
4823 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4825 case sizeof(uint8_t [3]):
4826 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4828 ((uint8_t *)buf)[0] = u;
4829 ((uint8_t *)buf)[1] = u >> 8;
4830 ((uint8_t *)buf)[2] = u >> 16;
4834 ((uint8_t *)buf)[0] = u >> 16;
4835 ((uint8_t *)buf)[1] = u >> 8;
4836 ((uint8_t *)buf)[2] = u;
4838 case sizeof(uint32_t):
4839 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4841 case sizeof(uint64_t):
4842 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4847 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4849 buf = (uint8_t *)ctx->objmask + arg->offset;
4854 push_args(ctx, arg);
4861 * Three arguments (ctx->args) are retrieved from the stack to store data,
4862 * its actual length and address (in that order).
4865 parse_string(struct context *ctx, const struct token *token,
4866 const char *str, unsigned int len,
4867 void *buf, unsigned int size)
4869 const struct arg *arg_data = pop_args(ctx);
4870 const struct arg *arg_len = pop_args(ctx);
4871 const struct arg *arg_addr = pop_args(ctx);
4872 char tmp[16]; /* Ought to be enough. */
4875 /* Arguments are expected. */
4879 push_args(ctx, arg_data);
4883 push_args(ctx, arg_len);
4884 push_args(ctx, arg_data);
4887 size = arg_data->size;
4888 /* Bit-mask fill is not supported. */
4889 if (arg_data->mask || size < len)
4893 /* Let parse_int() fill length information first. */
4894 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4897 push_args(ctx, arg_len);
4898 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4903 buf = (uint8_t *)ctx->object + arg_data->offset;
4904 /* Output buffer is not necessarily NUL-terminated. */
4905 memcpy(buf, str, len);
4906 memset((uint8_t *)buf + len, 0x00, size - len);
4908 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4909 /* Save address if requested. */
4910 if (arg_addr->size) {
4911 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4913 (uint8_t *)ctx->object + arg_data->offset
4917 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4919 (uint8_t *)ctx->objmask + arg_data->offset
4925 push_args(ctx, arg_addr);
4926 push_args(ctx, arg_len);
4927 push_args(ctx, arg_data);
4932 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4938 /* Check input parameters */
4939 if ((src == NULL) ||
4945 /* Convert chars to bytes */
4946 for (i = 0, len = 0; i < *size; i += 2) {
4947 snprintf(tmp, 3, "%s", src + i);
4948 dst[len++] = strtoul(tmp, &c, 16);
4963 parse_hex(struct context *ctx, const struct token *token,
4964 const char *str, unsigned int len,
4965 void *buf, unsigned int size)
4967 const struct arg *arg_data = pop_args(ctx);
4968 const struct arg *arg_len = pop_args(ctx);
4969 const struct arg *arg_addr = pop_args(ctx);
4970 char tmp[16]; /* Ought to be enough. */
4972 unsigned int hexlen = len;
4973 unsigned int length = 256;
4974 uint8_t hex_tmp[length];
4976 /* Arguments are expected. */
4980 push_args(ctx, arg_data);
4984 push_args(ctx, arg_len);
4985 push_args(ctx, arg_data);
4988 size = arg_data->size;
4989 /* Bit-mask fill is not supported. */
4995 /* translate bytes string to array. */
4996 if (str[0] == '0' && ((str[1] == 'x') ||
5001 if (hexlen > length)
5003 ret = parse_hex_string(str, hex_tmp, &hexlen);
5006 /* Let parse_int() fill length information first. */
5007 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
5010 push_args(ctx, arg_len);
5011 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
5016 buf = (uint8_t *)ctx->object + arg_data->offset;
5017 /* Output buffer is not necessarily NUL-terminated. */
5018 memcpy(buf, hex_tmp, hexlen);
5019 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
5021 memset((uint8_t *)ctx->objmask + arg_data->offset,
5023 /* Save address if requested. */
5024 if (arg_addr->size) {
5025 memcpy((uint8_t *)ctx->object + arg_addr->offset,
5027 (uint8_t *)ctx->object + arg_data->offset
5031 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
5033 (uint8_t *)ctx->objmask + arg_data->offset
5039 push_args(ctx, arg_addr);
5040 push_args(ctx, arg_len);
5041 push_args(ctx, arg_data);
5047 * Parse a MAC address.
5049 * Last argument (ctx->args) is retrieved to determine storage size and
5053 parse_mac_addr(struct context *ctx, const struct token *token,
5054 const char *str, unsigned int len,
5055 void *buf, unsigned int size)
5057 const struct arg *arg = pop_args(ctx);
5058 struct rte_ether_addr tmp;
5062 /* Argument is expected. */
5066 /* Bit-mask fill is not supported. */
5067 if (arg->mask || size != sizeof(tmp))
5069 /* Only network endian is supported. */
5072 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
5073 if (ret < 0 || (unsigned int)ret != len)
5077 buf = (uint8_t *)ctx->object + arg->offset;
5078 memcpy(buf, &tmp, size);
5080 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5083 push_args(ctx, arg);
5088 * Parse an IPv4 address.
5090 * Last argument (ctx->args) is retrieved to determine storage size and
5094 parse_ipv4_addr(struct context *ctx, const struct token *token,
5095 const char *str, unsigned int len,
5096 void *buf, unsigned int size)
5098 const struct arg *arg = pop_args(ctx);
5103 /* Argument is expected. */
5107 /* Bit-mask fill is not supported. */
5108 if (arg->mask || size != sizeof(tmp))
5110 /* Only network endian is supported. */
5113 memcpy(str2, str, len);
5115 ret = inet_pton(AF_INET, str2, &tmp);
5117 /* Attempt integer parsing. */
5118 push_args(ctx, arg);
5119 return parse_int(ctx, token, str, len, buf, size);
5123 buf = (uint8_t *)ctx->object + arg->offset;
5124 memcpy(buf, &tmp, size);
5126 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5129 push_args(ctx, arg);
5134 * Parse an IPv6 address.
5136 * Last argument (ctx->args) is retrieved to determine storage size and
5140 parse_ipv6_addr(struct context *ctx, const struct token *token,
5141 const char *str, unsigned int len,
5142 void *buf, unsigned int size)
5144 const struct arg *arg = pop_args(ctx);
5146 struct in6_addr tmp;
5150 /* Argument is expected. */
5154 /* Bit-mask fill is not supported. */
5155 if (arg->mask || size != sizeof(tmp))
5157 /* Only network endian is supported. */
5160 memcpy(str2, str, len);
5162 ret = inet_pton(AF_INET6, str2, &tmp);
5167 buf = (uint8_t *)ctx->object + arg->offset;
5168 memcpy(buf, &tmp, size);
5170 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
5173 push_args(ctx, arg);
5177 /** Boolean values (even indices stand for false). */
5178 static const char *const boolean_name[] = {
5188 * Parse a boolean value.
5190 * Last argument (ctx->args) is retrieved to determine storage size and
5194 parse_boolean(struct context *ctx, const struct token *token,
5195 const char *str, unsigned int len,
5196 void *buf, unsigned int size)
5198 const struct arg *arg = pop_args(ctx);
5202 /* Argument is expected. */
5205 for (i = 0; boolean_name[i]; ++i)
5206 if (!strcmp_partial(boolean_name[i], str, len))
5208 /* Process token as integer. */
5209 if (boolean_name[i])
5210 str = i & 1 ? "1" : "0";
5211 push_args(ctx, arg);
5212 ret = parse_int(ctx, token, str, strlen(str), buf, size);
5213 return ret > 0 ? (int)len : ret;
5216 /** Parse port and update context. */
5218 parse_port(struct context *ctx, const struct token *token,
5219 const char *str, unsigned int len,
5220 void *buf, unsigned int size)
5222 struct buffer *out = &(struct buffer){ .port = 0 };
5230 ctx->objmask = NULL;
5231 size = sizeof(*out);
5233 ret = parse_int(ctx, token, str, len, out, size);
5235 ctx->port = out->port;
5241 /** Parse set command, initialize output buffer for subsequent tokens. */
5243 parse_set_raw_encap_decap(struct context *ctx, const struct token *token,
5244 const char *str, unsigned int len,
5245 void *buf, unsigned int size)
5247 struct buffer *out = buf;
5249 /* Token name must match. */
5250 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5252 /* Nothing else to do if there is no buffer. */
5255 /* Make sure buffer is large enough. */
5256 if (size < sizeof(*out))
5259 ctx->objmask = NULL;
5262 out->command = ctx->curr;
5267 * Parse set raw_encap/raw_decap command,
5268 * initialize output buffer for subsequent tokens.
5271 parse_set_init(struct context *ctx, const struct token *token,
5272 const char *str, unsigned int len,
5273 void *buf, unsigned int size)
5275 struct buffer *out = buf;
5277 /* Token name must match. */
5278 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
5280 /* Nothing else to do if there is no buffer. */
5283 /* Make sure buffer is large enough. */
5284 if (size < sizeof(*out))
5286 /* Initialize buffer. */
5287 memset(out, 0x00, sizeof(*out));
5288 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
5291 ctx->objmask = NULL;
5292 if (!out->command) {
5293 if (ctx->curr != SET)
5295 if (sizeof(*out) > size)
5297 out->command = ctx->curr;
5298 out->args.vc.data = (uint8_t *)out + size;
5299 /* All we need is pattern */
5300 out->args.vc.pattern =
5301 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
5303 ctx->object = out->args.vc.pattern;
5308 /** No completion. */
5310 comp_none(struct context *ctx, const struct token *token,
5311 unsigned int ent, char *buf, unsigned int size)
5321 /** Complete boolean values. */
5323 comp_boolean(struct context *ctx, const struct token *token,
5324 unsigned int ent, char *buf, unsigned int size)
5330 for (i = 0; boolean_name[i]; ++i)
5331 if (buf && i == ent)
5332 return strlcpy(buf, boolean_name[i], size);
5338 /** Complete action names. */
5340 comp_action(struct context *ctx, const struct token *token,
5341 unsigned int ent, char *buf, unsigned int size)
5347 for (i = 0; next_action[i]; ++i)
5348 if (buf && i == ent)
5349 return strlcpy(buf, token_list[next_action[i]].name,
5356 /** Complete available ports. */
5358 comp_port(struct context *ctx, const struct token *token,
5359 unsigned int ent, char *buf, unsigned int size)
5366 RTE_ETH_FOREACH_DEV(p) {
5367 if (buf && i == ent)
5368 return snprintf(buf, size, "%u", p);
5376 /** Complete available rule IDs. */
5378 comp_rule_id(struct context *ctx, const struct token *token,
5379 unsigned int ent, char *buf, unsigned int size)
5382 struct rte_port *port;
5383 struct port_flow *pf;
5386 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
5387 ctx->port == (portid_t)RTE_PORT_ALL)
5389 port = &ports[ctx->port];
5390 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
5391 if (buf && i == ent)
5392 return snprintf(buf, size, "%u", pf->id);
5400 /** Complete type field for RSS action. */
5402 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5403 unsigned int ent, char *buf, unsigned int size)
5409 for (i = 0; rss_type_table[i].str; ++i)
5414 return strlcpy(buf, rss_type_table[ent].str, size);
5416 return snprintf(buf, size, "end");
5420 /** Complete queue field for RSS action. */
5422 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5423 unsigned int ent, char *buf, unsigned int size)
5430 return snprintf(buf, size, "%u", ent);
5432 return snprintf(buf, size, "end");
5436 /** Internal context. */
5437 static struct context cmd_flow_context;
5439 /** Global parser instance (cmdline API). */
5440 cmdline_parse_inst_t cmd_flow;
5441 cmdline_parse_inst_t cmd_set_raw;
5443 /** Initialize context. */
5445 cmd_flow_context_init(struct context *ctx)
5447 /* A full memset() is not necessary. */
5457 ctx->objmask = NULL;
5460 /** Parse a token (cmdline API). */
5462 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5465 struct context *ctx = &cmd_flow_context;
5466 const struct token *token;
5467 const enum index *list;
5472 token = &token_list[ctx->curr];
5473 /* Check argument length. */
5476 for (len = 0; src[len]; ++len)
5477 if (src[len] == '#' || isspace(src[len]))
5481 /* Last argument and EOL detection. */
5482 for (i = len; src[i]; ++i)
5483 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5485 else if (!isspace(src[i])) {
5490 if (src[i] == '\r' || src[i] == '\n') {
5494 /* Initialize context if necessary. */
5495 if (!ctx->next_num) {
5498 ctx->next[ctx->next_num++] = token->next[0];
5500 /* Process argument through candidates. */
5501 ctx->prev = ctx->curr;
5502 list = ctx->next[ctx->next_num - 1];
5503 for (i = 0; list[i]; ++i) {
5504 const struct token *next = &token_list[list[i]];
5507 ctx->curr = list[i];
5509 tmp = next->call(ctx, next, src, len, result, size);
5511 tmp = parse_default(ctx, next, src, len, result, size);
5512 if (tmp == -1 || tmp != len)
5520 /* Push subsequent tokens if any. */
5522 for (i = 0; token->next[i]; ++i) {
5523 if (ctx->next_num == RTE_DIM(ctx->next))
5525 ctx->next[ctx->next_num++] = token->next[i];
5527 /* Push arguments if any. */
5529 for (i = 0; token->args[i]; ++i) {
5530 if (ctx->args_num == RTE_DIM(ctx->args))
5532 ctx->args[ctx->args_num++] = token->args[i];
5537 /** Return number of completion entries (cmdline API). */
5539 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5541 struct context *ctx = &cmd_flow_context;
5542 const struct token *token = &token_list[ctx->curr];
5543 const enum index *list;
5547 /* Count number of tokens in current list. */
5549 list = ctx->next[ctx->next_num - 1];
5551 list = token->next[0];
5552 for (i = 0; list[i]; ++i)
5557 * If there is a single token, use its completion callback, otherwise
5558 * return the number of entries.
5560 token = &token_list[list[0]];
5561 if (i == 1 && token->comp) {
5562 /* Save index for cmd_flow_get_help(). */
5563 ctx->prev = list[0];
5564 return token->comp(ctx, token, 0, NULL, 0);
5569 /** Return a completion entry (cmdline API). */
5571 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5572 char *dst, unsigned int size)
5574 struct context *ctx = &cmd_flow_context;
5575 const struct token *token = &token_list[ctx->curr];
5576 const enum index *list;
5580 /* Count number of tokens in current list. */
5582 list = ctx->next[ctx->next_num - 1];
5584 list = token->next[0];
5585 for (i = 0; list[i]; ++i)
5589 /* If there is a single token, use its completion callback. */
5590 token = &token_list[list[0]];
5591 if (i == 1 && token->comp) {
5592 /* Save index for cmd_flow_get_help(). */
5593 ctx->prev = list[0];
5594 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5596 /* Otherwise make sure the index is valid and use defaults. */
5599 token = &token_list[list[index]];
5600 strlcpy(dst, token->name, size);
5601 /* Save index for cmd_flow_get_help(). */
5602 ctx->prev = list[index];
5606 /** Populate help strings for current token (cmdline API). */
5608 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5610 struct context *ctx = &cmd_flow_context;
5611 const struct token *token = &token_list[ctx->prev];
5616 /* Set token type and update global help with details. */
5617 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5619 cmd_flow.help_str = token->help;
5621 cmd_flow.help_str = token->name;
5625 /** Token definition template (cmdline API). */
5626 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5627 .ops = &(struct cmdline_token_ops){
5628 .parse = cmd_flow_parse,
5629 .complete_get_nb = cmd_flow_complete_get_nb,
5630 .complete_get_elt = cmd_flow_complete_get_elt,
5631 .get_help = cmd_flow_get_help,
5636 /** Populate the next dynamic token. */
5638 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5639 cmdline_parse_token_hdr_t **hdr_inst)
5641 struct context *ctx = &cmd_flow_context;
5643 /* Always reinitialize context before requesting the first token. */
5644 if (!(hdr_inst - cmd_flow.tokens))
5645 cmd_flow_context_init(ctx);
5646 /* Return NULL when no more tokens are expected. */
5647 if (!ctx->next_num && ctx->curr) {
5651 /* Determine if command should end here. */
5652 if (ctx->eol && ctx->last && ctx->next_num) {
5653 const enum index *list = ctx->next[ctx->next_num - 1];
5656 for (i = 0; list[i]; ++i) {
5663 *hdr = &cmd_flow_token_hdr;
5666 /** Dispatch parsed buffer to function calls. */
5668 cmd_flow_parsed(const struct buffer *in)
5670 switch (in->command) {
5672 port_flow_validate(in->port, &in->args.vc.attr,
5673 in->args.vc.pattern, in->args.vc.actions);
5676 port_flow_create(in->port, &in->args.vc.attr,
5677 in->args.vc.pattern, in->args.vc.actions);
5680 port_flow_destroy(in->port, in->args.destroy.rule_n,
5681 in->args.destroy.rule);
5684 port_flow_flush(in->port);
5687 port_flow_query(in->port, in->args.query.rule,
5688 &in->args.query.action);
5691 port_flow_list(in->port, in->args.list.group_n,
5692 in->args.list.group);
5695 port_flow_isolate(in->port, in->args.isolate.set);
5702 /** Token generator and output processing callback (cmdline API). */
5704 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5707 cmd_flow_tok(arg0, arg2);
5709 cmd_flow_parsed(arg0);
5712 /** Global parser instance (cmdline API). */
5713 cmdline_parse_inst_t cmd_flow = {
5715 .data = NULL, /**< Unused. */
5716 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5719 }, /**< Tokens are returned by cmd_flow_tok(). */
5722 /** set cmd facility. Reuse cmd flow's infrastructure as much as possible. */
5725 update_fields(uint8_t *buf, struct rte_flow_item *item, uint16_t next_proto)
5727 struct rte_flow_item_ipv4 *ipv4;
5728 struct rte_flow_item_eth *eth;
5729 struct rte_flow_item_ipv6 *ipv6;
5730 struct rte_flow_item_vxlan *vxlan;
5731 struct rte_flow_item_vxlan_gpe *gpe;
5732 struct rte_flow_item_nvgre *nvgre;
5733 uint32_t ipv6_vtc_flow;
5735 switch (item->type) {
5736 case RTE_FLOW_ITEM_TYPE_ETH:
5737 eth = (struct rte_flow_item_eth *)buf;
5739 eth->type = rte_cpu_to_be_16(next_proto);
5741 case RTE_FLOW_ITEM_TYPE_IPV4:
5742 ipv4 = (struct rte_flow_item_ipv4 *)buf;
5743 ipv4->hdr.version_ihl = 0x45;
5744 ipv4->hdr.next_proto_id = (uint8_t)next_proto;
5746 case RTE_FLOW_ITEM_TYPE_IPV6:
5747 ipv6 = (struct rte_flow_item_ipv6 *)buf;
5748 ipv6->hdr.proto = (uint8_t)next_proto;
5749 ipv6_vtc_flow = rte_be_to_cpu_32(ipv6->hdr.vtc_flow);
5750 ipv6_vtc_flow &= 0x0FFFFFFF; /*< reset version bits. */
5751 ipv6_vtc_flow |= 0x60000000; /*< set ipv6 version. */
5752 ipv6->hdr.vtc_flow = rte_cpu_to_be_32(ipv6_vtc_flow);
5754 case RTE_FLOW_ITEM_TYPE_VXLAN:
5755 vxlan = (struct rte_flow_item_vxlan *)buf;
5756 vxlan->flags = 0x08;
5758 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5759 gpe = (struct rte_flow_item_vxlan_gpe *)buf;
5762 case RTE_FLOW_ITEM_TYPE_NVGRE:
5763 nvgre = (struct rte_flow_item_nvgre *)buf;
5764 nvgre->protocol = rte_cpu_to_be_16(0x6558);
5765 nvgre->c_k_s_rsvd0_ver = rte_cpu_to_be_16(0x2000);
5772 /** Helper of get item's default mask. */
5774 flow_item_default_mask(const struct rte_flow_item *item)
5776 const void *mask = NULL;
5777 static rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
5779 switch (item->type) {
5780 case RTE_FLOW_ITEM_TYPE_ANY:
5781 mask = &rte_flow_item_any_mask;
5783 case RTE_FLOW_ITEM_TYPE_VF:
5784 mask = &rte_flow_item_vf_mask;
5786 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5787 mask = &rte_flow_item_port_id_mask;
5789 case RTE_FLOW_ITEM_TYPE_RAW:
5790 mask = &rte_flow_item_raw_mask;
5792 case RTE_FLOW_ITEM_TYPE_ETH:
5793 mask = &rte_flow_item_eth_mask;
5795 case RTE_FLOW_ITEM_TYPE_VLAN:
5796 mask = &rte_flow_item_vlan_mask;
5798 case RTE_FLOW_ITEM_TYPE_IPV4:
5799 mask = &rte_flow_item_ipv4_mask;
5801 case RTE_FLOW_ITEM_TYPE_IPV6:
5802 mask = &rte_flow_item_ipv6_mask;
5804 case RTE_FLOW_ITEM_TYPE_ICMP:
5805 mask = &rte_flow_item_icmp_mask;
5807 case RTE_FLOW_ITEM_TYPE_UDP:
5808 mask = &rte_flow_item_udp_mask;
5810 case RTE_FLOW_ITEM_TYPE_TCP:
5811 mask = &rte_flow_item_tcp_mask;
5813 case RTE_FLOW_ITEM_TYPE_SCTP:
5814 mask = &rte_flow_item_sctp_mask;
5816 case RTE_FLOW_ITEM_TYPE_VXLAN:
5817 mask = &rte_flow_item_vxlan_mask;
5819 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5820 mask = &rte_flow_item_vxlan_gpe_mask;
5822 case RTE_FLOW_ITEM_TYPE_E_TAG:
5823 mask = &rte_flow_item_e_tag_mask;
5825 case RTE_FLOW_ITEM_TYPE_NVGRE:
5826 mask = &rte_flow_item_nvgre_mask;
5828 case RTE_FLOW_ITEM_TYPE_MPLS:
5829 mask = &rte_flow_item_mpls_mask;
5831 case RTE_FLOW_ITEM_TYPE_GRE:
5832 mask = &rte_flow_item_gre_mask;
5834 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5835 mask = &gre_key_default_mask;
5837 case RTE_FLOW_ITEM_TYPE_META:
5838 mask = &rte_flow_item_meta_mask;
5840 case RTE_FLOW_ITEM_TYPE_FUZZY:
5841 mask = &rte_flow_item_fuzzy_mask;
5843 case RTE_FLOW_ITEM_TYPE_GTP:
5844 mask = &rte_flow_item_gtp_mask;
5846 case RTE_FLOW_ITEM_TYPE_ESP:
5847 mask = &rte_flow_item_esp_mask;
5849 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
5850 mask = &rte_flow_item_gtp_psc_mask;
5852 case RTE_FLOW_ITEM_TYPE_PPPOE_PROTO_ID:
5853 mask = &rte_flow_item_pppoe_proto_id_mask;
5862 /** Dispatch parsed buffer to function calls. */
5864 cmd_set_raw_parsed(const struct buffer *in)
5866 uint32_t n = in->args.vc.pattern_n;
5868 struct rte_flow_item *item = NULL;
5870 uint8_t *data = NULL;
5871 uint8_t *data_tail = NULL;
5872 size_t *total_size = NULL;
5873 uint16_t upper_layer = 0;
5876 RTE_ASSERT(in->command == SET_RAW_ENCAP ||
5877 in->command == SET_RAW_DECAP);
5878 if (in->command == SET_RAW_ENCAP) {
5879 total_size = &raw_encap_conf.size;
5880 data = (uint8_t *)&raw_encap_conf.data;
5882 total_size = &raw_decap_conf.size;
5883 data = (uint8_t *)&raw_decap_conf.data;
5886 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5887 /* process hdr from upper layer to low layer (L3/L4 -> L2). */
5888 data_tail = data + ACTION_RAW_ENCAP_MAX_DATA;
5889 for (i = n - 1 ; i >= 0; --i) {
5890 item = in->args.vc.pattern + i;
5891 if (item->spec == NULL)
5892 item->spec = flow_item_default_mask(item);
5893 switch (item->type) {
5894 case RTE_FLOW_ITEM_TYPE_ETH:
5895 size = sizeof(struct rte_flow_item_eth);
5897 case RTE_FLOW_ITEM_TYPE_VLAN:
5898 size = sizeof(struct rte_flow_item_vlan);
5899 proto = RTE_ETHER_TYPE_VLAN;
5901 case RTE_FLOW_ITEM_TYPE_IPV4:
5902 size = sizeof(struct rte_flow_item_ipv4);
5903 proto = RTE_ETHER_TYPE_IPV4;
5905 case RTE_FLOW_ITEM_TYPE_IPV6:
5906 size = sizeof(struct rte_flow_item_ipv6);
5907 proto = RTE_ETHER_TYPE_IPV6;
5909 case RTE_FLOW_ITEM_TYPE_UDP:
5910 size = sizeof(struct rte_flow_item_udp);
5913 case RTE_FLOW_ITEM_TYPE_TCP:
5914 size = sizeof(struct rte_flow_item_tcp);
5917 case RTE_FLOW_ITEM_TYPE_VXLAN:
5918 size = sizeof(struct rte_flow_item_vxlan);
5920 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5921 size = sizeof(struct rte_flow_item_vxlan_gpe);
5923 case RTE_FLOW_ITEM_TYPE_GRE:
5924 size = sizeof(struct rte_flow_item_gre);
5927 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5928 size = sizeof(rte_be32_t);
5930 case RTE_FLOW_ITEM_TYPE_MPLS:
5931 size = sizeof(struct rte_flow_item_mpls);
5933 case RTE_FLOW_ITEM_TYPE_NVGRE:
5934 size = sizeof(struct rte_flow_item_nvgre);
5938 printf("Error - Not supported item\n");
5940 memset(data, 0x00, ACTION_RAW_ENCAP_MAX_DATA);
5943 *total_size += size;
5944 rte_memcpy(data_tail - (*total_size), item->spec, size);
5945 /* update some fields which cannot be set by cmdline */
5946 update_fields((data_tail - (*total_size)), item,
5948 upper_layer = proto;
5950 if (verbose_level & 0x1)
5951 printf("total data size is %zu\n", (*total_size));
5952 RTE_ASSERT((*total_size) <= ACTION_RAW_ENCAP_MAX_DATA);
5955 /** Populate help strings for current token (cmdline API). */
5957 cmd_set_raw_get_help(cmdline_parse_token_hdr_t *hdr, char *dst,
5960 struct context *ctx = &cmd_flow_context;
5961 const struct token *token = &token_list[ctx->prev];
5966 /* Set token type and update global help with details. */
5967 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
5969 cmd_set_raw.help_str = token->help;
5971 cmd_set_raw.help_str = token->name;
5975 /** Token definition template (cmdline API). */
5976 static struct cmdline_token_hdr cmd_set_raw_token_hdr = {
5977 .ops = &(struct cmdline_token_ops){
5978 .parse = cmd_flow_parse,
5979 .complete_get_nb = cmd_flow_complete_get_nb,
5980 .complete_get_elt = cmd_flow_complete_get_elt,
5981 .get_help = cmd_set_raw_get_help,
5986 /** Populate the next dynamic token. */
5988 cmd_set_raw_tok(cmdline_parse_token_hdr_t **hdr,
5989 cmdline_parse_token_hdr_t **hdr_inst)
5991 struct context *ctx = &cmd_flow_context;
5993 /* Always reinitialize context before requesting the first token. */
5994 if (!(hdr_inst - cmd_set_raw.tokens)) {
5995 cmd_flow_context_init(ctx);
5996 ctx->curr = START_SET;
5998 /* Return NULL when no more tokens are expected. */
5999 if (!ctx->next_num && (ctx->curr != START_SET)) {
6003 /* Determine if command should end here. */
6004 if (ctx->eol && ctx->last && ctx->next_num) {
6005 const enum index *list = ctx->next[ctx->next_num - 1];
6008 for (i = 0; list[i]; ++i) {
6015 *hdr = &cmd_set_raw_token_hdr;
6018 /** Token generator and output processing callback (cmdline API). */
6020 cmd_set_raw_cb(void *arg0, struct cmdline *cl, void *arg2)
6023 cmd_set_raw_tok(arg0, arg2);
6025 cmd_set_raw_parsed(arg0);
6028 /** Global parser instance (cmdline API). */
6029 cmdline_parse_inst_t cmd_set_raw = {
6030 .f = cmd_set_raw_cb,
6031 .data = NULL, /**< Unused. */
6032 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
6035 }, /**< Tokens are returned by cmd_flow_tok(). */