1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
111 ITEM_VLAN_INNER_TYPE,
143 ITEM_E_TAG_GRP_ECID_B,
162 ITEM_ARP_ETH_IPV4_SHA,
163 ITEM_ARP_ETH_IPV4_SPA,
164 ITEM_ARP_ETH_IPV4_THA,
165 ITEM_ARP_ETH_IPV4_TPA,
167 ITEM_IPV6_EXT_NEXT_HDR,
172 ITEM_ICMP6_ND_NS_TARGET_ADDR,
174 ITEM_ICMP6_ND_NA_TARGET_ADDR,
176 ITEM_ICMP6_ND_OPT_TYPE,
177 ITEM_ICMP6_ND_OPT_SLA_ETH,
178 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
179 ITEM_ICMP6_ND_OPT_TLA_ETH,
180 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
184 /* Validate/create actions. */
204 ACTION_RSS_FUNC_DEFAULT,
205 ACTION_RSS_FUNC_TOEPLITZ,
206 ACTION_RSS_FUNC_SIMPLE_XOR,
218 ACTION_PHY_PORT_ORIGINAL,
219 ACTION_PHY_PORT_INDEX,
221 ACTION_PORT_ID_ORIGINAL,
225 ACTION_OF_SET_MPLS_TTL,
226 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
227 ACTION_OF_DEC_MPLS_TTL,
228 ACTION_OF_SET_NW_TTL,
229 ACTION_OF_SET_NW_TTL_NW_TTL,
230 ACTION_OF_DEC_NW_TTL,
231 ACTION_OF_COPY_TTL_OUT,
232 ACTION_OF_COPY_TTL_IN,
235 ACTION_OF_PUSH_VLAN_ETHERTYPE,
236 ACTION_OF_SET_VLAN_VID,
237 ACTION_OF_SET_VLAN_VID_VLAN_VID,
238 ACTION_OF_SET_VLAN_PCP,
239 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
241 ACTION_OF_POP_MPLS_ETHERTYPE,
243 ACTION_OF_PUSH_MPLS_ETHERTYPE,
250 ACTION_MPLSOGRE_ENCAP,
251 ACTION_MPLSOGRE_DECAP,
252 ACTION_MPLSOUDP_ENCAP,
253 ACTION_MPLSOUDP_DECAP,
255 ACTION_SET_IPV4_SRC_IPV4_SRC,
257 ACTION_SET_IPV4_DST_IPV4_DST,
259 ACTION_SET_IPV6_SRC_IPV6_SRC,
261 ACTION_SET_IPV6_DST_IPV6_DST,
263 ACTION_SET_TP_SRC_TP_SRC,
265 ACTION_SET_TP_DST_TP_DST,
271 ACTION_SET_MAC_SRC_MAC_SRC,
273 ACTION_SET_MAC_DST_MAC_DST,
276 /** Maximum size for pattern in struct rte_flow_item_raw. */
277 #define ITEM_RAW_PATTERN_SIZE 40
279 /** Storage size for struct rte_flow_item_raw including pattern. */
280 #define ITEM_RAW_SIZE \
281 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
283 /** Maximum number of queue indices in struct rte_flow_action_rss. */
284 #define ACTION_RSS_QUEUE_NUM 32
286 /** Storage for struct rte_flow_action_rss including external data. */
287 struct action_rss_data {
288 struct rte_flow_action_rss conf;
289 uint8_t key[RSS_HASH_KEY_LENGTH];
290 uint16_t queue[ACTION_RSS_QUEUE_NUM];
293 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
294 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
296 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
297 struct action_vxlan_encap_data {
298 struct rte_flow_action_vxlan_encap conf;
299 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
300 struct rte_flow_item_eth item_eth;
301 struct rte_flow_item_vlan item_vlan;
303 struct rte_flow_item_ipv4 item_ipv4;
304 struct rte_flow_item_ipv6 item_ipv6;
306 struct rte_flow_item_udp item_udp;
307 struct rte_flow_item_vxlan item_vxlan;
310 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
311 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
313 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
314 struct action_nvgre_encap_data {
315 struct rte_flow_action_nvgre_encap conf;
316 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
317 struct rte_flow_item_eth item_eth;
318 struct rte_flow_item_vlan item_vlan;
320 struct rte_flow_item_ipv4 item_ipv4;
321 struct rte_flow_item_ipv6 item_ipv6;
323 struct rte_flow_item_nvgre item_nvgre;
326 /** Maximum data size in struct rte_flow_action_raw_encap. */
327 #define ACTION_RAW_ENCAP_MAX_DATA 128
329 /** Storage for struct rte_flow_action_raw_encap including external data. */
330 struct action_raw_encap_data {
331 struct rte_flow_action_raw_encap conf;
332 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
333 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
336 /** Storage for struct rte_flow_action_raw_decap including external data. */
337 struct action_raw_decap_data {
338 struct rte_flow_action_raw_decap conf;
339 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
342 /** Maximum number of subsequent tokens and arguments on the stack. */
343 #define CTX_STACK_SIZE 16
345 /** Parser context. */
347 /** Stack of subsequent token lists to process. */
348 const enum index *next[CTX_STACK_SIZE];
349 /** Arguments for stacked tokens. */
350 const void *args[CTX_STACK_SIZE];
351 enum index curr; /**< Current token index. */
352 enum index prev; /**< Index of the last token seen. */
353 int next_num; /**< Number of entries in next[]. */
354 int args_num; /**< Number of entries in args[]. */
355 uint32_t eol:1; /**< EOL has been detected. */
356 uint32_t last:1; /**< No more arguments. */
357 portid_t port; /**< Current port ID (for completions). */
358 uint32_t objdata; /**< Object-specific data. */
359 void *object; /**< Address of current object for relative offsets. */
360 void *objmask; /**< Object a full mask must be written to. */
363 /** Token argument. */
365 uint32_t hton:1; /**< Use network byte ordering. */
366 uint32_t sign:1; /**< Value is signed. */
367 uint32_t bounded:1; /**< Value is bounded. */
368 uintmax_t min; /**< Minimum value if bounded. */
369 uintmax_t max; /**< Maximum value if bounded. */
370 uint32_t offset; /**< Relative offset from ctx->object. */
371 uint32_t size; /**< Field size. */
372 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
375 /** Parser token definition. */
377 /** Type displayed during completion (defaults to "TOKEN"). */
379 /** Help displayed during completion (defaults to token name). */
381 /** Private data used by parser functions. */
384 * Lists of subsequent tokens to push on the stack. Each call to the
385 * parser consumes the last entry of that stack.
387 const enum index *const *next;
388 /** Arguments stack for subsequent tokens that need them. */
389 const struct arg *const *args;
391 * Token-processing callback, returns -1 in case of error, the
392 * length of the matched string otherwise. If NULL, attempts to
393 * match the token name.
395 * If buf is not NULL, the result should be stored in it according
396 * to context. An error is returned if not large enough.
398 int (*call)(struct context *ctx, const struct token *token,
399 const char *str, unsigned int len,
400 void *buf, unsigned int size);
402 * Callback that provides possible values for this token, used for
403 * completion. Returns -1 in case of error, the number of possible
404 * values otherwise. If NULL, the token name is used.
406 * If buf is not NULL, entry index ent is written to buf and the
407 * full length of the entry is returned (same behavior as
410 int (*comp)(struct context *ctx, const struct token *token,
411 unsigned int ent, char *buf, unsigned int size);
412 /** Mandatory token name, no default value. */
416 /** Static initializer for the next field. */
417 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
419 /** Static initializer for a NEXT() entry. */
420 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
422 /** Static initializer for the args field. */
423 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
425 /** Static initializer for ARGS() to target a field. */
426 #define ARGS_ENTRY(s, f) \
427 (&(const struct arg){ \
428 .offset = offsetof(s, f), \
429 .size = sizeof(((s *)0)->f), \
432 /** Static initializer for ARGS() to target a bit-field. */
433 #define ARGS_ENTRY_BF(s, f, b) \
434 (&(const struct arg){ \
436 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
439 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
440 #define ARGS_ENTRY_MASK(s, f, m) \
441 (&(const struct arg){ \
442 .offset = offsetof(s, f), \
443 .size = sizeof(((s *)0)->f), \
444 .mask = (const void *)(m), \
447 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
448 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
449 (&(const struct arg){ \
451 .offset = offsetof(s, f), \
452 .size = sizeof(((s *)0)->f), \
453 .mask = (const void *)(m), \
456 /** Static initializer for ARGS() to target a pointer. */
457 #define ARGS_ENTRY_PTR(s, f) \
458 (&(const struct arg){ \
459 .size = sizeof(*((s *)0)->f), \
462 /** Static initializer for ARGS() with arbitrary offset and size. */
463 #define ARGS_ENTRY_ARB(o, s) \
464 (&(const struct arg){ \
469 /** Same as ARGS_ENTRY_ARB() with bounded values. */
470 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
471 (&(const struct arg){ \
479 /** Same as ARGS_ENTRY() using network byte ordering. */
480 #define ARGS_ENTRY_HTON(s, f) \
481 (&(const struct arg){ \
483 .offset = offsetof(s, f), \
484 .size = sizeof(((s *)0)->f), \
487 /** Parser output buffer layout expected by cmd_flow_parsed(). */
489 enum index command; /**< Flow command. */
490 portid_t port; /**< Affected port ID. */
493 struct rte_flow_attr attr;
494 struct rte_flow_item *pattern;
495 struct rte_flow_action *actions;
499 } vc; /**< Validate/create arguments. */
503 } destroy; /**< Destroy arguments. */
506 struct rte_flow_action action;
507 } query; /**< Query arguments. */
511 } list; /**< List arguments. */
514 } isolate; /**< Isolated mode arguments. */
515 } args; /**< Command arguments. */
518 /** Private data for pattern items. */
519 struct parse_item_priv {
520 enum rte_flow_item_type type; /**< Item type. */
521 uint32_t size; /**< Size of item specification structure. */
524 #define PRIV_ITEM(t, s) \
525 (&(const struct parse_item_priv){ \
526 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
530 /** Private data for actions. */
531 struct parse_action_priv {
532 enum rte_flow_action_type type; /**< Action type. */
533 uint32_t size; /**< Size of action configuration structure. */
536 #define PRIV_ACTION(t, s) \
537 (&(const struct parse_action_priv){ \
538 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
542 static const enum index next_vc_attr[] = {
552 static const enum index next_destroy_attr[] = {
558 static const enum index next_list_attr[] = {
564 static const enum index item_param[] = {
573 static const enum index next_item[] = {
609 ITEM_ICMP6_ND_OPT_SLA_ETH,
610 ITEM_ICMP6_ND_OPT_TLA_ETH,
615 static const enum index item_fuzzy[] = {
621 static const enum index item_any[] = {
627 static const enum index item_vf[] = {
633 static const enum index item_phy_port[] = {
639 static const enum index item_port_id[] = {
645 static const enum index item_mark[] = {
651 static const enum index item_raw[] = {
661 static const enum index item_eth[] = {
669 static const enum index item_vlan[] = {
674 ITEM_VLAN_INNER_TYPE,
679 static const enum index item_ipv4[] = {
689 static const enum index item_ipv6[] = {
700 static const enum index item_icmp[] = {
707 static const enum index item_udp[] = {
714 static const enum index item_tcp[] = {
722 static const enum index item_sctp[] = {
731 static const enum index item_vxlan[] = {
737 static const enum index item_e_tag[] = {
738 ITEM_E_TAG_GRP_ECID_B,
743 static const enum index item_nvgre[] = {
749 static const enum index item_mpls[] = {
755 static const enum index item_gre[] = {
761 static const enum index item_gtp[] = {
767 static const enum index item_geneve[] = {
774 static const enum index item_vxlan_gpe[] = {
780 static const enum index item_arp_eth_ipv4[] = {
781 ITEM_ARP_ETH_IPV4_SHA,
782 ITEM_ARP_ETH_IPV4_SPA,
783 ITEM_ARP_ETH_IPV4_THA,
784 ITEM_ARP_ETH_IPV4_TPA,
789 static const enum index item_ipv6_ext[] = {
790 ITEM_IPV6_EXT_NEXT_HDR,
795 static const enum index item_icmp6[] = {
802 static const enum index item_icmp6_nd_ns[] = {
803 ITEM_ICMP6_ND_NS_TARGET_ADDR,
808 static const enum index item_icmp6_nd_na[] = {
809 ITEM_ICMP6_ND_NA_TARGET_ADDR,
814 static const enum index item_icmp6_nd_opt[] = {
815 ITEM_ICMP6_ND_OPT_TYPE,
820 static const enum index item_icmp6_nd_opt_sla_eth[] = {
821 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
826 static const enum index item_icmp6_nd_opt_tla_eth[] = {
827 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
832 static const enum index item_meta[] = {
838 static const enum index next_action[] = {
854 ACTION_OF_SET_MPLS_TTL,
855 ACTION_OF_DEC_MPLS_TTL,
856 ACTION_OF_SET_NW_TTL,
857 ACTION_OF_DEC_NW_TTL,
858 ACTION_OF_COPY_TTL_OUT,
859 ACTION_OF_COPY_TTL_IN,
862 ACTION_OF_SET_VLAN_VID,
863 ACTION_OF_SET_VLAN_PCP,
872 ACTION_MPLSOGRE_ENCAP,
873 ACTION_MPLSOGRE_DECAP,
874 ACTION_MPLSOUDP_ENCAP,
875 ACTION_MPLSOUDP_DECAP,
890 static const enum index action_mark[] = {
896 static const enum index action_queue[] = {
902 static const enum index action_count[] = {
909 static const enum index action_rss[] = {
920 static const enum index action_vf[] = {
927 static const enum index action_phy_port[] = {
928 ACTION_PHY_PORT_ORIGINAL,
929 ACTION_PHY_PORT_INDEX,
934 static const enum index action_port_id[] = {
935 ACTION_PORT_ID_ORIGINAL,
941 static const enum index action_meter[] = {
947 static const enum index action_of_set_mpls_ttl[] = {
948 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
953 static const enum index action_of_set_nw_ttl[] = {
954 ACTION_OF_SET_NW_TTL_NW_TTL,
959 static const enum index action_of_push_vlan[] = {
960 ACTION_OF_PUSH_VLAN_ETHERTYPE,
965 static const enum index action_of_set_vlan_vid[] = {
966 ACTION_OF_SET_VLAN_VID_VLAN_VID,
971 static const enum index action_of_set_vlan_pcp[] = {
972 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
977 static const enum index action_of_pop_mpls[] = {
978 ACTION_OF_POP_MPLS_ETHERTYPE,
983 static const enum index action_of_push_mpls[] = {
984 ACTION_OF_PUSH_MPLS_ETHERTYPE,
989 static const enum index action_set_ipv4_src[] = {
990 ACTION_SET_IPV4_SRC_IPV4_SRC,
995 static const enum index action_set_mac_src[] = {
996 ACTION_SET_MAC_SRC_MAC_SRC,
1001 static const enum index action_set_ipv4_dst[] = {
1002 ACTION_SET_IPV4_DST_IPV4_DST,
1007 static const enum index action_set_ipv6_src[] = {
1008 ACTION_SET_IPV6_SRC_IPV6_SRC,
1013 static const enum index action_set_ipv6_dst[] = {
1014 ACTION_SET_IPV6_DST_IPV6_DST,
1019 static const enum index action_set_tp_src[] = {
1020 ACTION_SET_TP_SRC_TP_SRC,
1025 static const enum index action_set_tp_dst[] = {
1026 ACTION_SET_TP_DST_TP_DST,
1031 static const enum index action_set_ttl[] = {
1037 static const enum index action_jump[] = {
1043 static const enum index action_set_mac_dst[] = {
1044 ACTION_SET_MAC_DST_MAC_DST,
1049 static int parse_init(struct context *, const struct token *,
1050 const char *, unsigned int,
1051 void *, unsigned int);
1052 static int parse_vc(struct context *, const struct token *,
1053 const char *, unsigned int,
1054 void *, unsigned int);
1055 static int parse_vc_spec(struct context *, const struct token *,
1056 const char *, unsigned int, void *, unsigned int);
1057 static int parse_vc_conf(struct context *, const struct token *,
1058 const char *, unsigned int, void *, unsigned int);
1059 static int parse_vc_action_rss(struct context *, const struct token *,
1060 const char *, unsigned int, void *,
1062 static int parse_vc_action_rss_func(struct context *, const struct token *,
1063 const char *, unsigned int, void *,
1065 static int parse_vc_action_rss_type(struct context *, const struct token *,
1066 const char *, unsigned int, void *,
1068 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1069 const char *, unsigned int, void *,
1071 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1072 const char *, unsigned int, void *,
1074 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1075 const char *, unsigned int, void *,
1077 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1078 const char *, unsigned int, void *,
1080 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1081 const char *, unsigned int, void *,
1083 static int parse_vc_action_mplsogre_encap(struct context *,
1084 const struct token *, const char *,
1085 unsigned int, void *, unsigned int);
1086 static int parse_vc_action_mplsogre_decap(struct context *,
1087 const struct token *, const char *,
1088 unsigned int, void *, unsigned int);
1089 static int parse_vc_action_mplsoudp_encap(struct context *,
1090 const struct token *, const char *,
1091 unsigned int, void *, unsigned int);
1092 static int parse_vc_action_mplsoudp_decap(struct context *,
1093 const struct token *, const char *,
1094 unsigned int, void *, unsigned int);
1095 static int parse_destroy(struct context *, const struct token *,
1096 const char *, unsigned int,
1097 void *, unsigned int);
1098 static int parse_flush(struct context *, const struct token *,
1099 const char *, unsigned int,
1100 void *, unsigned int);
1101 static int parse_query(struct context *, const struct token *,
1102 const char *, unsigned int,
1103 void *, unsigned int);
1104 static int parse_action(struct context *, const struct token *,
1105 const char *, unsigned int,
1106 void *, unsigned int);
1107 static int parse_list(struct context *, const struct token *,
1108 const char *, unsigned int,
1109 void *, unsigned int);
1110 static int parse_isolate(struct context *, const struct token *,
1111 const char *, unsigned int,
1112 void *, unsigned int);
1113 static int parse_int(struct context *, const struct token *,
1114 const char *, unsigned int,
1115 void *, unsigned int);
1116 static int parse_prefix(struct context *, const struct token *,
1117 const char *, unsigned int,
1118 void *, unsigned int);
1119 static int parse_boolean(struct context *, const struct token *,
1120 const char *, unsigned int,
1121 void *, unsigned int);
1122 static int parse_string(struct context *, const struct token *,
1123 const char *, unsigned int,
1124 void *, unsigned int);
1125 static int parse_mac_addr(struct context *, const struct token *,
1126 const char *, unsigned int,
1127 void *, unsigned int);
1128 static int parse_ipv4_addr(struct context *, const struct token *,
1129 const char *, unsigned int,
1130 void *, unsigned int);
1131 static int parse_ipv6_addr(struct context *, const struct token *,
1132 const char *, unsigned int,
1133 void *, unsigned int);
1134 static int parse_port(struct context *, const struct token *,
1135 const char *, unsigned int,
1136 void *, unsigned int);
1137 static int comp_none(struct context *, const struct token *,
1138 unsigned int, char *, unsigned int);
1139 static int comp_boolean(struct context *, const struct token *,
1140 unsigned int, char *, unsigned int);
1141 static int comp_action(struct context *, const struct token *,
1142 unsigned int, char *, unsigned int);
1143 static int comp_port(struct context *, const struct token *,
1144 unsigned int, char *, unsigned int);
1145 static int comp_rule_id(struct context *, const struct token *,
1146 unsigned int, char *, unsigned int);
1147 static int comp_vc_action_rss_type(struct context *, const struct token *,
1148 unsigned int, char *, unsigned int);
1149 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1150 unsigned int, char *, unsigned int);
1152 /** Token definitions. */
1153 static const struct token token_list[] = {
1154 /* Special tokens. */
1157 .help = "null entry, abused as the entry point",
1158 .next = NEXT(NEXT_ENTRY(FLOW)),
1163 .help = "command may end here",
1165 /* Common tokens. */
1169 .help = "integer value",
1174 .name = "{unsigned}",
1176 .help = "unsigned integer value",
1183 .help = "prefix length for bit-mask",
1184 .call = parse_prefix,
1188 .name = "{boolean}",
1190 .help = "any boolean value",
1191 .call = parse_boolean,
1192 .comp = comp_boolean,
1197 .help = "fixed string",
1198 .call = parse_string,
1202 .name = "{MAC address}",
1204 .help = "standard MAC address notation",
1205 .call = parse_mac_addr,
1209 .name = "{IPv4 address}",
1210 .type = "IPV4 ADDRESS",
1211 .help = "standard IPv4 address notation",
1212 .call = parse_ipv4_addr,
1216 .name = "{IPv6 address}",
1217 .type = "IPV6 ADDRESS",
1218 .help = "standard IPv6 address notation",
1219 .call = parse_ipv6_addr,
1223 .name = "{rule id}",
1225 .help = "rule identifier",
1227 .comp = comp_rule_id,
1230 .name = "{port_id}",
1232 .help = "port identifier",
1237 .name = "{group_id}",
1239 .help = "group identifier",
1243 [PRIORITY_LEVEL] = {
1246 .help = "priority level",
1250 /* Top-level command. */
1253 .type = "{command} {port_id} [{arg} [...]]",
1254 .help = "manage ingress/egress flow rules",
1255 .next = NEXT(NEXT_ENTRY
1265 /* Sub-level commands. */
1268 .help = "check whether a flow rule can be created",
1269 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1270 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1275 .help = "create a flow rule",
1276 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1277 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1282 .help = "destroy specific flow rules",
1283 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1284 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1285 .call = parse_destroy,
1289 .help = "destroy all flow rules",
1290 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1291 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1292 .call = parse_flush,
1296 .help = "query an existing flow rule",
1297 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1298 NEXT_ENTRY(RULE_ID),
1299 NEXT_ENTRY(PORT_ID)),
1300 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1301 ARGS_ENTRY(struct buffer, args.query.rule),
1302 ARGS_ENTRY(struct buffer, port)),
1303 .call = parse_query,
1307 .help = "list existing flow rules",
1308 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1309 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1314 .help = "restrict ingress traffic to the defined flow rules",
1315 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1316 NEXT_ENTRY(PORT_ID)),
1317 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1318 ARGS_ENTRY(struct buffer, port)),
1319 .call = parse_isolate,
1321 /* Destroy arguments. */
1324 .help = "specify a rule identifier",
1325 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1326 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1327 .call = parse_destroy,
1329 /* Query arguments. */
1333 .help = "action to query, must be part of the rule",
1334 .call = parse_action,
1335 .comp = comp_action,
1337 /* List arguments. */
1340 .help = "specify a group",
1341 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1342 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1345 /* Validate/create attributes. */
1348 .help = "specify a group",
1349 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1350 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1355 .help = "specify a priority level",
1356 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1357 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1362 .help = "affect rule to ingress",
1363 .next = NEXT(next_vc_attr),
1368 .help = "affect rule to egress",
1369 .next = NEXT(next_vc_attr),
1374 .help = "apply rule directly to endpoints found in pattern",
1375 .next = NEXT(next_vc_attr),
1378 /* Validate/create pattern. */
1381 .help = "submit a list of pattern items",
1382 .next = NEXT(next_item),
1387 .help = "match value perfectly (with full bit-mask)",
1388 .call = parse_vc_spec,
1390 [ITEM_PARAM_SPEC] = {
1392 .help = "match value according to configured bit-mask",
1393 .call = parse_vc_spec,
1395 [ITEM_PARAM_LAST] = {
1397 .help = "specify upper bound to establish a range",
1398 .call = parse_vc_spec,
1400 [ITEM_PARAM_MASK] = {
1402 .help = "specify bit-mask with relevant bits set to one",
1403 .call = parse_vc_spec,
1405 [ITEM_PARAM_PREFIX] = {
1407 .help = "generate bit-mask from a prefix length",
1408 .call = parse_vc_spec,
1412 .help = "specify next pattern item",
1413 .next = NEXT(next_item),
1417 .help = "end list of pattern items",
1418 .priv = PRIV_ITEM(END, 0),
1419 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1424 .help = "no-op pattern item",
1425 .priv = PRIV_ITEM(VOID, 0),
1426 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1431 .help = "perform actions when pattern does not match",
1432 .priv = PRIV_ITEM(INVERT, 0),
1433 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1438 .help = "match any protocol for the current layer",
1439 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1440 .next = NEXT(item_any),
1445 .help = "number of layers covered",
1446 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1447 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1451 .help = "match traffic from/to the physical function",
1452 .priv = PRIV_ITEM(PF, 0),
1453 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1458 .help = "match traffic from/to a virtual function ID",
1459 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1460 .next = NEXT(item_vf),
1466 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1467 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1471 .help = "match traffic from/to a specific physical port",
1472 .priv = PRIV_ITEM(PHY_PORT,
1473 sizeof(struct rte_flow_item_phy_port)),
1474 .next = NEXT(item_phy_port),
1477 [ITEM_PHY_PORT_INDEX] = {
1479 .help = "physical port index",
1480 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1481 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1485 .help = "match traffic from/to a given DPDK port ID",
1486 .priv = PRIV_ITEM(PORT_ID,
1487 sizeof(struct rte_flow_item_port_id)),
1488 .next = NEXT(item_port_id),
1491 [ITEM_PORT_ID_ID] = {
1493 .help = "DPDK port ID",
1494 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1495 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1499 .help = "match traffic against value set in previously matched rule",
1500 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1501 .next = NEXT(item_mark),
1506 .help = "Integer value to match against",
1507 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1508 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1512 .help = "match an arbitrary byte string",
1513 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1514 .next = NEXT(item_raw),
1517 [ITEM_RAW_RELATIVE] = {
1519 .help = "look for pattern after the previous item",
1520 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1521 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1524 [ITEM_RAW_SEARCH] = {
1526 .help = "search pattern from offset (see also limit)",
1527 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1528 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1531 [ITEM_RAW_OFFSET] = {
1533 .help = "absolute or relative offset for pattern",
1534 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1535 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1537 [ITEM_RAW_LIMIT] = {
1539 .help = "search area limit for start of pattern",
1540 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1541 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1543 [ITEM_RAW_PATTERN] = {
1545 .help = "byte string to look for",
1546 .next = NEXT(item_raw,
1548 NEXT_ENTRY(ITEM_PARAM_IS,
1551 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1552 ARGS_ENTRY(struct rte_flow_item_raw, length),
1553 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1554 ITEM_RAW_PATTERN_SIZE)),
1558 .help = "match Ethernet header",
1559 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1560 .next = NEXT(item_eth),
1565 .help = "destination MAC",
1566 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1567 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1571 .help = "source MAC",
1572 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1573 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1577 .help = "EtherType",
1578 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1579 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1583 .help = "match 802.1Q/ad VLAN tag",
1584 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1585 .next = NEXT(item_vlan),
1590 .help = "tag control information",
1591 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1592 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1596 .help = "priority code point",
1597 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1598 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1603 .help = "drop eligible indicator",
1604 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1605 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1610 .help = "VLAN identifier",
1611 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1612 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1615 [ITEM_VLAN_INNER_TYPE] = {
1616 .name = "inner_type",
1617 .help = "inner EtherType",
1618 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1619 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1624 .help = "match IPv4 header",
1625 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1626 .next = NEXT(item_ipv4),
1631 .help = "type of service",
1632 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1633 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1634 hdr.type_of_service)),
1638 .help = "time to live",
1639 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1640 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1643 [ITEM_IPV4_PROTO] = {
1645 .help = "next protocol ID",
1646 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1647 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1648 hdr.next_proto_id)),
1652 .help = "source address",
1653 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1654 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1659 .help = "destination address",
1660 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1661 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1666 .help = "match IPv6 header",
1667 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1668 .next = NEXT(item_ipv6),
1673 .help = "traffic class",
1674 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1675 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1677 "\x0f\xf0\x00\x00")),
1679 [ITEM_IPV6_FLOW] = {
1681 .help = "flow label",
1682 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1683 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1685 "\x00\x0f\xff\xff")),
1687 [ITEM_IPV6_PROTO] = {
1689 .help = "protocol (next header)",
1690 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1691 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1696 .help = "hop limit",
1697 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1698 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1703 .help = "source address",
1704 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1705 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1710 .help = "destination address",
1711 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1712 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1717 .help = "match ICMP header",
1718 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1719 .next = NEXT(item_icmp),
1722 [ITEM_ICMP_TYPE] = {
1724 .help = "ICMP packet type",
1725 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1726 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1729 [ITEM_ICMP_CODE] = {
1731 .help = "ICMP packet code",
1732 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1733 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1738 .help = "match UDP header",
1739 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1740 .next = NEXT(item_udp),
1745 .help = "UDP source port",
1746 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1747 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1752 .help = "UDP destination port",
1753 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1754 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1759 .help = "match TCP header",
1760 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1761 .next = NEXT(item_tcp),
1766 .help = "TCP source port",
1767 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1768 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1773 .help = "TCP destination port",
1774 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1775 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1778 [ITEM_TCP_FLAGS] = {
1780 .help = "TCP flags",
1781 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1782 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1787 .help = "match SCTP header",
1788 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1789 .next = NEXT(item_sctp),
1794 .help = "SCTP source port",
1795 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1796 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1801 .help = "SCTP destination port",
1802 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1803 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1808 .help = "validation tag",
1809 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1810 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1813 [ITEM_SCTP_CKSUM] = {
1816 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1817 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1822 .help = "match VXLAN header",
1823 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1824 .next = NEXT(item_vxlan),
1827 [ITEM_VXLAN_VNI] = {
1829 .help = "VXLAN identifier",
1830 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1831 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1835 .help = "match E-Tag header",
1836 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1837 .next = NEXT(item_e_tag),
1840 [ITEM_E_TAG_GRP_ECID_B] = {
1841 .name = "grp_ecid_b",
1842 .help = "GRP and E-CID base",
1843 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1844 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1850 .help = "match NVGRE header",
1851 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1852 .next = NEXT(item_nvgre),
1855 [ITEM_NVGRE_TNI] = {
1857 .help = "virtual subnet ID",
1858 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1859 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1863 .help = "match MPLS header",
1864 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1865 .next = NEXT(item_mpls),
1868 [ITEM_MPLS_LABEL] = {
1870 .help = "MPLS label",
1871 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1872 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1878 .help = "match GRE header",
1879 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1880 .next = NEXT(item_gre),
1883 [ITEM_GRE_PROTO] = {
1885 .help = "GRE protocol type",
1886 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1887 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1892 .help = "fuzzy pattern match, expect faster than default",
1893 .priv = PRIV_ITEM(FUZZY,
1894 sizeof(struct rte_flow_item_fuzzy)),
1895 .next = NEXT(item_fuzzy),
1898 [ITEM_FUZZY_THRESH] = {
1900 .help = "match accuracy threshold",
1901 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1902 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1907 .help = "match GTP header",
1908 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1909 .next = NEXT(item_gtp),
1914 .help = "tunnel endpoint identifier",
1915 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1916 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1920 .help = "match GTP header",
1921 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1922 .next = NEXT(item_gtp),
1927 .help = "match GTP header",
1928 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1929 .next = NEXT(item_gtp),
1934 .help = "match GENEVE header",
1935 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1936 .next = NEXT(item_geneve),
1939 [ITEM_GENEVE_VNI] = {
1941 .help = "virtual network identifier",
1942 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1943 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1945 [ITEM_GENEVE_PROTO] = {
1947 .help = "GENEVE protocol type",
1948 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1949 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1952 [ITEM_VXLAN_GPE] = {
1953 .name = "vxlan-gpe",
1954 .help = "match VXLAN-GPE header",
1955 .priv = PRIV_ITEM(VXLAN_GPE,
1956 sizeof(struct rte_flow_item_vxlan_gpe)),
1957 .next = NEXT(item_vxlan_gpe),
1960 [ITEM_VXLAN_GPE_VNI] = {
1962 .help = "VXLAN-GPE identifier",
1963 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1964 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1967 [ITEM_ARP_ETH_IPV4] = {
1968 .name = "arp_eth_ipv4",
1969 .help = "match ARP header for Ethernet/IPv4",
1970 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1971 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1972 .next = NEXT(item_arp_eth_ipv4),
1975 [ITEM_ARP_ETH_IPV4_SHA] = {
1977 .help = "sender hardware address",
1978 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1980 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1983 [ITEM_ARP_ETH_IPV4_SPA] = {
1985 .help = "sender IPv4 address",
1986 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1988 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1991 [ITEM_ARP_ETH_IPV4_THA] = {
1993 .help = "target hardware address",
1994 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1996 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1999 [ITEM_ARP_ETH_IPV4_TPA] = {
2001 .help = "target IPv4 address",
2002 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2004 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2009 .help = "match presence of any IPv6 extension header",
2010 .priv = PRIV_ITEM(IPV6_EXT,
2011 sizeof(struct rte_flow_item_ipv6_ext)),
2012 .next = NEXT(item_ipv6_ext),
2015 [ITEM_IPV6_EXT_NEXT_HDR] = {
2017 .help = "next header",
2018 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2019 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2024 .help = "match any ICMPv6 header",
2025 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2026 .next = NEXT(item_icmp6),
2029 [ITEM_ICMP6_TYPE] = {
2031 .help = "ICMPv6 type",
2032 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2033 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2036 [ITEM_ICMP6_CODE] = {
2038 .help = "ICMPv6 code",
2039 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2040 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2043 [ITEM_ICMP6_ND_NS] = {
2044 .name = "icmp6_nd_ns",
2045 .help = "match ICMPv6 neighbor discovery solicitation",
2046 .priv = PRIV_ITEM(ICMP6_ND_NS,
2047 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2048 .next = NEXT(item_icmp6_nd_ns),
2051 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2052 .name = "target_addr",
2053 .help = "target address",
2054 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2056 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2059 [ITEM_ICMP6_ND_NA] = {
2060 .name = "icmp6_nd_na",
2061 .help = "match ICMPv6 neighbor discovery advertisement",
2062 .priv = PRIV_ITEM(ICMP6_ND_NA,
2063 sizeof(struct rte_flow_item_icmp6_nd_na)),
2064 .next = NEXT(item_icmp6_nd_na),
2067 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2068 .name = "target_addr",
2069 .help = "target address",
2070 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2072 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2075 [ITEM_ICMP6_ND_OPT] = {
2076 .name = "icmp6_nd_opt",
2077 .help = "match presence of any ICMPv6 neighbor discovery"
2079 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2080 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2081 .next = NEXT(item_icmp6_nd_opt),
2084 [ITEM_ICMP6_ND_OPT_TYPE] = {
2086 .help = "ND option type",
2087 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2089 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2092 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2093 .name = "icmp6_nd_opt_sla_eth",
2094 .help = "match ICMPv6 neighbor discovery source Ethernet"
2095 " link-layer address option",
2097 (ICMP6_ND_OPT_SLA_ETH,
2098 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2099 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2102 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2104 .help = "source Ethernet LLA",
2105 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2107 .args = ARGS(ARGS_ENTRY_HTON
2108 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2110 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2111 .name = "icmp6_nd_opt_tla_eth",
2112 .help = "match ICMPv6 neighbor discovery target Ethernet"
2113 " link-layer address option",
2115 (ICMP6_ND_OPT_TLA_ETH,
2116 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2117 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2120 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2122 .help = "target Ethernet LLA",
2123 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2125 .args = ARGS(ARGS_ENTRY_HTON
2126 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2130 .help = "match metadata header",
2131 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2132 .next = NEXT(item_meta),
2135 [ITEM_META_DATA] = {
2137 .help = "metadata value",
2138 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2139 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2140 data, "\xff\xff\xff\xff")),
2143 /* Validate/create actions. */
2146 .help = "submit a list of associated actions",
2147 .next = NEXT(next_action),
2152 .help = "specify next action",
2153 .next = NEXT(next_action),
2157 .help = "end list of actions",
2158 .priv = PRIV_ACTION(END, 0),
2163 .help = "no-op action",
2164 .priv = PRIV_ACTION(VOID, 0),
2165 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2168 [ACTION_PASSTHRU] = {
2170 .help = "let subsequent rule process matched packets",
2171 .priv = PRIV_ACTION(PASSTHRU, 0),
2172 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2177 .help = "redirect traffic to a given group",
2178 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2179 .next = NEXT(action_jump),
2182 [ACTION_JUMP_GROUP] = {
2184 .help = "group to redirect traffic to",
2185 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2186 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2187 .call = parse_vc_conf,
2191 .help = "attach 32 bit value to packets",
2192 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2193 .next = NEXT(action_mark),
2196 [ACTION_MARK_ID] = {
2198 .help = "32 bit value to return with packets",
2199 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2200 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2201 .call = parse_vc_conf,
2205 .help = "flag packets",
2206 .priv = PRIV_ACTION(FLAG, 0),
2207 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2212 .help = "assign packets to a given queue index",
2213 .priv = PRIV_ACTION(QUEUE,
2214 sizeof(struct rte_flow_action_queue)),
2215 .next = NEXT(action_queue),
2218 [ACTION_QUEUE_INDEX] = {
2220 .help = "queue index to use",
2221 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2222 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2223 .call = parse_vc_conf,
2227 .help = "drop packets (note: passthru has priority)",
2228 .priv = PRIV_ACTION(DROP, 0),
2229 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2234 .help = "enable counters for this rule",
2235 .priv = PRIV_ACTION(COUNT,
2236 sizeof(struct rte_flow_action_count)),
2237 .next = NEXT(action_count),
2240 [ACTION_COUNT_ID] = {
2241 .name = "identifier",
2242 .help = "counter identifier to use",
2243 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2244 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2245 .call = parse_vc_conf,
2247 [ACTION_COUNT_SHARED] = {
2249 .help = "shared counter",
2250 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2251 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2253 .call = parse_vc_conf,
2257 .help = "spread packets among several queues",
2258 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2259 .next = NEXT(action_rss),
2260 .call = parse_vc_action_rss,
2262 [ACTION_RSS_FUNC] = {
2264 .help = "RSS hash function to apply",
2265 .next = NEXT(action_rss,
2266 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2267 ACTION_RSS_FUNC_TOEPLITZ,
2268 ACTION_RSS_FUNC_SIMPLE_XOR)),
2270 [ACTION_RSS_FUNC_DEFAULT] = {
2272 .help = "default hash function",
2273 .call = parse_vc_action_rss_func,
2275 [ACTION_RSS_FUNC_TOEPLITZ] = {
2277 .help = "Toeplitz hash function",
2278 .call = parse_vc_action_rss_func,
2280 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2281 .name = "simple_xor",
2282 .help = "simple XOR hash function",
2283 .call = parse_vc_action_rss_func,
2285 [ACTION_RSS_LEVEL] = {
2287 .help = "encapsulation level for \"types\"",
2288 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2289 .args = ARGS(ARGS_ENTRY_ARB
2290 (offsetof(struct action_rss_data, conf) +
2291 offsetof(struct rte_flow_action_rss, level),
2292 sizeof(((struct rte_flow_action_rss *)0)->
2295 [ACTION_RSS_TYPES] = {
2297 .help = "specific RSS hash types",
2298 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2300 [ACTION_RSS_TYPE] = {
2302 .help = "RSS hash type",
2303 .call = parse_vc_action_rss_type,
2304 .comp = comp_vc_action_rss_type,
2306 [ACTION_RSS_KEY] = {
2308 .help = "RSS hash key",
2309 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
2310 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2312 (offsetof(struct action_rss_data, conf) +
2313 offsetof(struct rte_flow_action_rss, key_len),
2314 sizeof(((struct rte_flow_action_rss *)0)->
2316 ARGS_ENTRY(struct action_rss_data, key)),
2318 [ACTION_RSS_KEY_LEN] = {
2320 .help = "RSS hash key length in bytes",
2321 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2322 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2323 (offsetof(struct action_rss_data, conf) +
2324 offsetof(struct rte_flow_action_rss, key_len),
2325 sizeof(((struct rte_flow_action_rss *)0)->
2328 RSS_HASH_KEY_LENGTH)),
2330 [ACTION_RSS_QUEUES] = {
2332 .help = "queue indices to use",
2333 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2334 .call = parse_vc_conf,
2336 [ACTION_RSS_QUEUE] = {
2338 .help = "queue index",
2339 .call = parse_vc_action_rss_queue,
2340 .comp = comp_vc_action_rss_queue,
2344 .help = "direct traffic to physical function",
2345 .priv = PRIV_ACTION(PF, 0),
2346 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2351 .help = "direct traffic to a virtual function ID",
2352 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2353 .next = NEXT(action_vf),
2356 [ACTION_VF_ORIGINAL] = {
2358 .help = "use original VF ID if possible",
2359 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2360 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2362 .call = parse_vc_conf,
2367 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2368 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2369 .call = parse_vc_conf,
2371 [ACTION_PHY_PORT] = {
2373 .help = "direct packets to physical port index",
2374 .priv = PRIV_ACTION(PHY_PORT,
2375 sizeof(struct rte_flow_action_phy_port)),
2376 .next = NEXT(action_phy_port),
2379 [ACTION_PHY_PORT_ORIGINAL] = {
2381 .help = "use original port index if possible",
2382 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2383 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2385 .call = parse_vc_conf,
2387 [ACTION_PHY_PORT_INDEX] = {
2389 .help = "physical port index",
2390 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2391 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2393 .call = parse_vc_conf,
2395 [ACTION_PORT_ID] = {
2397 .help = "direct matching traffic to a given DPDK port ID",
2398 .priv = PRIV_ACTION(PORT_ID,
2399 sizeof(struct rte_flow_action_port_id)),
2400 .next = NEXT(action_port_id),
2403 [ACTION_PORT_ID_ORIGINAL] = {
2405 .help = "use original DPDK port ID if possible",
2406 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2407 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2409 .call = parse_vc_conf,
2411 [ACTION_PORT_ID_ID] = {
2413 .help = "DPDK port ID",
2414 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2415 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2416 .call = parse_vc_conf,
2420 .help = "meter the directed packets at given id",
2421 .priv = PRIV_ACTION(METER,
2422 sizeof(struct rte_flow_action_meter)),
2423 .next = NEXT(action_meter),
2426 [ACTION_METER_ID] = {
2428 .help = "meter id to use",
2429 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2430 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2431 .call = parse_vc_conf,
2433 [ACTION_OF_SET_MPLS_TTL] = {
2434 .name = "of_set_mpls_ttl",
2435 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2438 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2439 .next = NEXT(action_of_set_mpls_ttl),
2442 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2445 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2446 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2448 .call = parse_vc_conf,
2450 [ACTION_OF_DEC_MPLS_TTL] = {
2451 .name = "of_dec_mpls_ttl",
2452 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2453 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2454 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2457 [ACTION_OF_SET_NW_TTL] = {
2458 .name = "of_set_nw_ttl",
2459 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2462 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2463 .next = NEXT(action_of_set_nw_ttl),
2466 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2469 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2470 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2472 .call = parse_vc_conf,
2474 [ACTION_OF_DEC_NW_TTL] = {
2475 .name = "of_dec_nw_ttl",
2476 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2477 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2478 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2481 [ACTION_OF_COPY_TTL_OUT] = {
2482 .name = "of_copy_ttl_out",
2483 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2484 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2485 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2488 [ACTION_OF_COPY_TTL_IN] = {
2489 .name = "of_copy_ttl_in",
2490 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2491 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2492 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2495 [ACTION_OF_POP_VLAN] = {
2496 .name = "of_pop_vlan",
2497 .help = "OpenFlow's OFPAT_POP_VLAN",
2498 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2499 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2502 [ACTION_OF_PUSH_VLAN] = {
2503 .name = "of_push_vlan",
2504 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2507 sizeof(struct rte_flow_action_of_push_vlan)),
2508 .next = NEXT(action_of_push_vlan),
2511 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2512 .name = "ethertype",
2513 .help = "EtherType",
2514 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2515 .args = ARGS(ARGS_ENTRY_HTON
2516 (struct rte_flow_action_of_push_vlan,
2518 .call = parse_vc_conf,
2520 [ACTION_OF_SET_VLAN_VID] = {
2521 .name = "of_set_vlan_vid",
2522 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2525 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2526 .next = NEXT(action_of_set_vlan_vid),
2529 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2532 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2533 .args = ARGS(ARGS_ENTRY_HTON
2534 (struct rte_flow_action_of_set_vlan_vid,
2536 .call = parse_vc_conf,
2538 [ACTION_OF_SET_VLAN_PCP] = {
2539 .name = "of_set_vlan_pcp",
2540 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2543 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2544 .next = NEXT(action_of_set_vlan_pcp),
2547 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2549 .help = "VLAN priority",
2550 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2551 .args = ARGS(ARGS_ENTRY_HTON
2552 (struct rte_flow_action_of_set_vlan_pcp,
2554 .call = parse_vc_conf,
2556 [ACTION_OF_POP_MPLS] = {
2557 .name = "of_pop_mpls",
2558 .help = "OpenFlow's OFPAT_POP_MPLS",
2559 .priv = PRIV_ACTION(OF_POP_MPLS,
2560 sizeof(struct rte_flow_action_of_pop_mpls)),
2561 .next = NEXT(action_of_pop_mpls),
2564 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2565 .name = "ethertype",
2566 .help = "EtherType",
2567 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2568 .args = ARGS(ARGS_ENTRY_HTON
2569 (struct rte_flow_action_of_pop_mpls,
2571 .call = parse_vc_conf,
2573 [ACTION_OF_PUSH_MPLS] = {
2574 .name = "of_push_mpls",
2575 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2578 sizeof(struct rte_flow_action_of_push_mpls)),
2579 .next = NEXT(action_of_push_mpls),
2582 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2583 .name = "ethertype",
2584 .help = "EtherType",
2585 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2586 .args = ARGS(ARGS_ENTRY_HTON
2587 (struct rte_flow_action_of_push_mpls,
2589 .call = parse_vc_conf,
2591 [ACTION_VXLAN_ENCAP] = {
2592 .name = "vxlan_encap",
2593 .help = "VXLAN encapsulation, uses configuration set by \"set"
2595 .priv = PRIV_ACTION(VXLAN_ENCAP,
2596 sizeof(struct action_vxlan_encap_data)),
2597 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2598 .call = parse_vc_action_vxlan_encap,
2600 [ACTION_VXLAN_DECAP] = {
2601 .name = "vxlan_decap",
2602 .help = "Performs a decapsulation action by stripping all"
2603 " headers of the VXLAN tunnel network overlay from the"
2605 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2606 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2609 [ACTION_NVGRE_ENCAP] = {
2610 .name = "nvgre_encap",
2611 .help = "NVGRE encapsulation, uses configuration set by \"set"
2613 .priv = PRIV_ACTION(NVGRE_ENCAP,
2614 sizeof(struct action_nvgre_encap_data)),
2615 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2616 .call = parse_vc_action_nvgre_encap,
2618 [ACTION_NVGRE_DECAP] = {
2619 .name = "nvgre_decap",
2620 .help = "Performs a decapsulation action by stripping all"
2621 " headers of the NVGRE tunnel network overlay from the"
2623 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2624 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2627 [ACTION_L2_ENCAP] = {
2629 .help = "l2 encap, uses configuration set by"
2630 " \"set l2_encap\"",
2631 .priv = PRIV_ACTION(RAW_ENCAP,
2632 sizeof(struct action_raw_encap_data)),
2633 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2634 .call = parse_vc_action_l2_encap,
2636 [ACTION_L2_DECAP] = {
2638 .help = "l2 decap, uses configuration set by"
2639 " \"set l2_decap\"",
2640 .priv = PRIV_ACTION(RAW_DECAP,
2641 sizeof(struct action_raw_decap_data)),
2642 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2643 .call = parse_vc_action_l2_decap,
2645 [ACTION_MPLSOGRE_ENCAP] = {
2646 .name = "mplsogre_encap",
2647 .help = "mplsogre encapsulation, uses configuration set by"
2648 " \"set mplsogre_encap\"",
2649 .priv = PRIV_ACTION(RAW_ENCAP,
2650 sizeof(struct action_raw_encap_data)),
2651 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2652 .call = parse_vc_action_mplsogre_encap,
2654 [ACTION_MPLSOGRE_DECAP] = {
2655 .name = "mplsogre_decap",
2656 .help = "mplsogre decapsulation, uses configuration set by"
2657 " \"set mplsogre_decap\"",
2658 .priv = PRIV_ACTION(RAW_DECAP,
2659 sizeof(struct action_raw_decap_data)),
2660 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2661 .call = parse_vc_action_mplsogre_decap,
2663 [ACTION_MPLSOUDP_ENCAP] = {
2664 .name = "mplsoudp_encap",
2665 .help = "mplsoudp encapsulation, uses configuration set by"
2666 " \"set mplsoudp_encap\"",
2667 .priv = PRIV_ACTION(RAW_ENCAP,
2668 sizeof(struct action_raw_encap_data)),
2669 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2670 .call = parse_vc_action_mplsoudp_encap,
2672 [ACTION_MPLSOUDP_DECAP] = {
2673 .name = "mplsoudp_decap",
2674 .help = "mplsoudp decapsulation, uses configuration set by"
2675 " \"set mplsoudp_decap\"",
2676 .priv = PRIV_ACTION(RAW_DECAP,
2677 sizeof(struct action_raw_decap_data)),
2678 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2679 .call = parse_vc_action_mplsoudp_decap,
2681 [ACTION_SET_IPV4_SRC] = {
2682 .name = "set_ipv4_src",
2683 .help = "Set a new IPv4 source address in the outermost"
2685 .priv = PRIV_ACTION(SET_IPV4_SRC,
2686 sizeof(struct rte_flow_action_set_ipv4)),
2687 .next = NEXT(action_set_ipv4_src),
2690 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2691 .name = "ipv4_addr",
2692 .help = "new IPv4 source address to set",
2693 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2694 .args = ARGS(ARGS_ENTRY_HTON
2695 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2696 .call = parse_vc_conf,
2698 [ACTION_SET_IPV4_DST] = {
2699 .name = "set_ipv4_dst",
2700 .help = "Set a new IPv4 destination address in the outermost"
2702 .priv = PRIV_ACTION(SET_IPV4_DST,
2703 sizeof(struct rte_flow_action_set_ipv4)),
2704 .next = NEXT(action_set_ipv4_dst),
2707 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2708 .name = "ipv4_addr",
2709 .help = "new IPv4 destination address to set",
2710 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2711 .args = ARGS(ARGS_ENTRY_HTON
2712 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2713 .call = parse_vc_conf,
2715 [ACTION_SET_IPV6_SRC] = {
2716 .name = "set_ipv6_src",
2717 .help = "Set a new IPv6 source address in the outermost"
2719 .priv = PRIV_ACTION(SET_IPV6_SRC,
2720 sizeof(struct rte_flow_action_set_ipv6)),
2721 .next = NEXT(action_set_ipv6_src),
2724 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2725 .name = "ipv6_addr",
2726 .help = "new IPv6 source address to set",
2727 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2728 .args = ARGS(ARGS_ENTRY_HTON
2729 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2730 .call = parse_vc_conf,
2732 [ACTION_SET_IPV6_DST] = {
2733 .name = "set_ipv6_dst",
2734 .help = "Set a new IPv6 destination address in the outermost"
2736 .priv = PRIV_ACTION(SET_IPV6_DST,
2737 sizeof(struct rte_flow_action_set_ipv6)),
2738 .next = NEXT(action_set_ipv6_dst),
2741 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2742 .name = "ipv6_addr",
2743 .help = "new IPv6 destination address to set",
2744 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2745 .args = ARGS(ARGS_ENTRY_HTON
2746 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2747 .call = parse_vc_conf,
2749 [ACTION_SET_TP_SRC] = {
2750 .name = "set_tp_src",
2751 .help = "set a new source port number in the outermost"
2753 .priv = PRIV_ACTION(SET_TP_SRC,
2754 sizeof(struct rte_flow_action_set_tp)),
2755 .next = NEXT(action_set_tp_src),
2758 [ACTION_SET_TP_SRC_TP_SRC] = {
2760 .help = "new source port number to set",
2761 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2762 .args = ARGS(ARGS_ENTRY_HTON
2763 (struct rte_flow_action_set_tp, port)),
2764 .call = parse_vc_conf,
2766 [ACTION_SET_TP_DST] = {
2767 .name = "set_tp_dst",
2768 .help = "set a new destination port number in the outermost"
2770 .priv = PRIV_ACTION(SET_TP_DST,
2771 sizeof(struct rte_flow_action_set_tp)),
2772 .next = NEXT(action_set_tp_dst),
2775 [ACTION_SET_TP_DST_TP_DST] = {
2777 .help = "new destination port number to set",
2778 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2779 .args = ARGS(ARGS_ENTRY_HTON
2780 (struct rte_flow_action_set_tp, port)),
2781 .call = parse_vc_conf,
2783 [ACTION_MAC_SWAP] = {
2785 .help = "Swap the source and destination MAC addresses"
2786 " in the outermost Ethernet header",
2787 .priv = PRIV_ACTION(MAC_SWAP, 0),
2788 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2791 [ACTION_DEC_TTL] = {
2793 .help = "decrease network TTL if available",
2794 .priv = PRIV_ACTION(DEC_TTL, 0),
2795 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2798 [ACTION_SET_TTL] = {
2800 .help = "set ttl value",
2801 .priv = PRIV_ACTION(SET_TTL,
2802 sizeof(struct rte_flow_action_set_ttl)),
2803 .next = NEXT(action_set_ttl),
2806 [ACTION_SET_TTL_TTL] = {
2807 .name = "ttl_value",
2808 .help = "new ttl value to set",
2809 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
2810 .args = ARGS(ARGS_ENTRY_HTON
2811 (struct rte_flow_action_set_ttl, ttl_value)),
2812 .call = parse_vc_conf,
2814 [ACTION_SET_MAC_SRC] = {
2815 .name = "set_mac_src",
2816 .help = "set source mac address",
2817 .priv = PRIV_ACTION(SET_MAC_SRC,
2818 sizeof(struct rte_flow_action_set_mac)),
2819 .next = NEXT(action_set_mac_src),
2822 [ACTION_SET_MAC_SRC_MAC_SRC] = {
2824 .help = "new source mac address",
2825 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
2826 .args = ARGS(ARGS_ENTRY_HTON
2827 (struct rte_flow_action_set_mac, mac_addr)),
2828 .call = parse_vc_conf,
2830 [ACTION_SET_MAC_DST] = {
2831 .name = "set_mac_dst",
2832 .help = "set destination mac address",
2833 .priv = PRIV_ACTION(SET_MAC_DST,
2834 sizeof(struct rte_flow_action_set_mac)),
2835 .next = NEXT(action_set_mac_dst),
2838 [ACTION_SET_MAC_DST_MAC_DST] = {
2840 .help = "new destination mac address to set",
2841 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
2842 .args = ARGS(ARGS_ENTRY_HTON
2843 (struct rte_flow_action_set_mac, mac_addr)),
2844 .call = parse_vc_conf,
2848 /** Remove and return last entry from argument stack. */
2849 static const struct arg *
2850 pop_args(struct context *ctx)
2852 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2855 /** Add entry on top of the argument stack. */
2857 push_args(struct context *ctx, const struct arg *arg)
2859 if (ctx->args_num == CTX_STACK_SIZE)
2861 ctx->args[ctx->args_num++] = arg;
2865 /** Spread value into buffer according to bit-mask. */
2867 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2869 uint32_t i = arg->size;
2877 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2886 unsigned int shift = 0;
2887 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2889 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2890 if (!(arg->mask[i] & (1 << shift)))
2895 *buf &= ~(1 << shift);
2896 *buf |= (val & 1) << shift;
2904 /** Compare a string with a partial one of a given length. */
2906 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2908 int r = strncmp(full, partial, partial_len);
2912 if (strlen(full) <= partial_len)
2914 return full[partial_len];
2918 * Parse a prefix length and generate a bit-mask.
2920 * Last argument (ctx->args) is retrieved to determine mask size, storage
2921 * location and whether the result must use network byte ordering.
2924 parse_prefix(struct context *ctx, const struct token *token,
2925 const char *str, unsigned int len,
2926 void *buf, unsigned int size)
2928 const struct arg *arg = pop_args(ctx);
2929 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2936 /* Argument is expected. */
2940 u = strtoumax(str, &end, 0);
2941 if (errno || (size_t)(end - str) != len)
2946 extra = arg_entry_bf_fill(NULL, 0, arg);
2955 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2956 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2963 if (bytes > size || bytes + !!extra > size)
2967 buf = (uint8_t *)ctx->object + arg->offset;
2968 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2970 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2971 memset(buf, 0x00, size - bytes);
2973 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2977 memset(buf, 0xff, bytes);
2978 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2980 ((uint8_t *)buf)[bytes] = conv[extra];
2983 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2986 push_args(ctx, arg);
2990 /** Default parsing function for token name matching. */
2992 parse_default(struct context *ctx, const struct token *token,
2993 const char *str, unsigned int len,
2994 void *buf, unsigned int size)
2999 if (strcmp_partial(token->name, str, len))
3004 /** Parse flow command, initialize output buffer for subsequent tokens. */
3006 parse_init(struct context *ctx, const struct token *token,
3007 const char *str, unsigned int len,
3008 void *buf, unsigned int size)
3010 struct buffer *out = buf;
3012 /* Token name must match. */
3013 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3015 /* Nothing else to do if there is no buffer. */
3018 /* Make sure buffer is large enough. */
3019 if (size < sizeof(*out))
3021 /* Initialize buffer. */
3022 memset(out, 0x00, sizeof(*out));
3023 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3026 ctx->objmask = NULL;
3030 /** Parse tokens for validate/create commands. */
3032 parse_vc(struct context *ctx, const struct token *token,
3033 const char *str, unsigned int len,
3034 void *buf, unsigned int size)
3036 struct buffer *out = buf;
3040 /* Token name must match. */
3041 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3043 /* Nothing else to do if there is no buffer. */
3046 if (!out->command) {
3047 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3049 if (sizeof(*out) > size)
3051 out->command = ctx->curr;
3054 ctx->objmask = NULL;
3055 out->args.vc.data = (uint8_t *)out + size;
3059 ctx->object = &out->args.vc.attr;
3060 ctx->objmask = NULL;
3061 switch (ctx->curr) {
3066 out->args.vc.attr.ingress = 1;
3069 out->args.vc.attr.egress = 1;
3072 out->args.vc.attr.transfer = 1;
3075 out->args.vc.pattern =
3076 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3078 ctx->object = out->args.vc.pattern;
3079 ctx->objmask = NULL;
3082 out->args.vc.actions =
3083 (void *)RTE_ALIGN_CEIL((uintptr_t)
3084 (out->args.vc.pattern +
3085 out->args.vc.pattern_n),
3087 ctx->object = out->args.vc.actions;
3088 ctx->objmask = NULL;
3095 if (!out->args.vc.actions) {
3096 const struct parse_item_priv *priv = token->priv;
3097 struct rte_flow_item *item =
3098 out->args.vc.pattern + out->args.vc.pattern_n;
3100 data_size = priv->size * 3; /* spec, last, mask */
3101 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3102 (out->args.vc.data - data_size),
3104 if ((uint8_t *)item + sizeof(*item) > data)
3106 *item = (struct rte_flow_item){
3109 ++out->args.vc.pattern_n;
3111 ctx->objmask = NULL;
3113 const struct parse_action_priv *priv = token->priv;
3114 struct rte_flow_action *action =
3115 out->args.vc.actions + out->args.vc.actions_n;
3117 data_size = priv->size; /* configuration */
3118 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3119 (out->args.vc.data - data_size),
3121 if ((uint8_t *)action + sizeof(*action) > data)
3123 *action = (struct rte_flow_action){
3125 .conf = data_size ? data : NULL,
3127 ++out->args.vc.actions_n;
3128 ctx->object = action;
3129 ctx->objmask = NULL;
3131 memset(data, 0, data_size);
3132 out->args.vc.data = data;
3133 ctx->objdata = data_size;
3137 /** Parse pattern item parameter type. */
3139 parse_vc_spec(struct context *ctx, const struct token *token,
3140 const char *str, unsigned int len,
3141 void *buf, unsigned int size)
3143 struct buffer *out = buf;
3144 struct rte_flow_item *item;
3150 /* Token name must match. */
3151 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3153 /* Parse parameter types. */
3154 switch (ctx->curr) {
3155 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3161 case ITEM_PARAM_SPEC:
3164 case ITEM_PARAM_LAST:
3167 case ITEM_PARAM_PREFIX:
3168 /* Modify next token to expect a prefix. */
3169 if (ctx->next_num < 2)
3171 ctx->next[ctx->next_num - 2] = prefix;
3173 case ITEM_PARAM_MASK:
3179 /* Nothing else to do if there is no buffer. */
3182 if (!out->args.vc.pattern_n)
3184 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3185 data_size = ctx->objdata / 3; /* spec, last, mask */
3186 /* Point to selected object. */
3187 ctx->object = out->args.vc.data + (data_size * index);
3189 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3190 item->mask = ctx->objmask;
3192 ctx->objmask = NULL;
3193 /* Update relevant item pointer. */
3194 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3199 /** Parse action configuration field. */
3201 parse_vc_conf(struct context *ctx, const struct token *token,
3202 const char *str, unsigned int len,
3203 void *buf, unsigned int size)
3205 struct buffer *out = buf;
3208 /* Token name must match. */
3209 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3211 /* Nothing else to do if there is no buffer. */
3214 /* Point to selected object. */
3215 ctx->object = out->args.vc.data;
3216 ctx->objmask = NULL;
3220 /** Parse RSS action. */
3222 parse_vc_action_rss(struct context *ctx, const struct token *token,
3223 const char *str, unsigned int len,
3224 void *buf, unsigned int size)
3226 struct buffer *out = buf;
3227 struct rte_flow_action *action;
3228 struct action_rss_data *action_rss_data;
3232 ret = parse_vc(ctx, token, str, len, buf, size);
3235 /* Nothing else to do if there is no buffer. */
3238 if (!out->args.vc.actions_n)
3240 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3241 /* Point to selected object. */
3242 ctx->object = out->args.vc.data;
3243 ctx->objmask = NULL;
3244 /* Set up default configuration. */
3245 action_rss_data = ctx->object;
3246 *action_rss_data = (struct action_rss_data){
3247 .conf = (struct rte_flow_action_rss){
3248 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3251 .key_len = sizeof(action_rss_data->key),
3252 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3253 .key = action_rss_data->key,
3254 .queue = action_rss_data->queue,
3256 .key = "testpmd's default RSS hash key, "
3257 "override it for better balancing",
3260 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3261 action_rss_data->queue[i] = i;
3262 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3263 ctx->port != (portid_t)RTE_PORT_ALL) {
3264 struct rte_eth_dev_info info;
3266 rte_eth_dev_info_get(ctx->port, &info);
3267 action_rss_data->conf.key_len =
3268 RTE_MIN(sizeof(action_rss_data->key),
3269 info.hash_key_size);
3271 action->conf = &action_rss_data->conf;
3276 * Parse func field for RSS action.
3278 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3279 * ACTION_RSS_FUNC_* index that called this function.
3282 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3283 const char *str, unsigned int len,
3284 void *buf, unsigned int size)
3286 struct action_rss_data *action_rss_data;
3287 enum rte_eth_hash_function func;
3291 /* Token name must match. */
3292 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3294 switch (ctx->curr) {
3295 case ACTION_RSS_FUNC_DEFAULT:
3296 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3298 case ACTION_RSS_FUNC_TOEPLITZ:
3299 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3301 case ACTION_RSS_FUNC_SIMPLE_XOR:
3302 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3309 action_rss_data = ctx->object;
3310 action_rss_data->conf.func = func;
3315 * Parse type field for RSS action.
3317 * Valid tokens are type field names and the "end" token.
3320 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3321 const char *str, unsigned int len,
3322 void *buf, unsigned int size)
3324 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3325 struct action_rss_data *action_rss_data;
3331 if (ctx->curr != ACTION_RSS_TYPE)
3333 if (!(ctx->objdata >> 16) && ctx->object) {
3334 action_rss_data = ctx->object;
3335 action_rss_data->conf.types = 0;
3337 if (!strcmp_partial("end", str, len)) {
3338 ctx->objdata &= 0xffff;
3341 for (i = 0; rss_type_table[i].str; ++i)
3342 if (!strcmp_partial(rss_type_table[i].str, str, len))
3344 if (!rss_type_table[i].str)
3346 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3348 if (ctx->next_num == RTE_DIM(ctx->next))
3350 ctx->next[ctx->next_num++] = next;
3353 action_rss_data = ctx->object;
3354 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3359 * Parse queue field for RSS action.
3361 * Valid tokens are queue indices and the "end" token.
3364 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3365 const char *str, unsigned int len,
3366 void *buf, unsigned int size)
3368 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3369 struct action_rss_data *action_rss_data;
3376 if (ctx->curr != ACTION_RSS_QUEUE)
3378 i = ctx->objdata >> 16;
3379 if (!strcmp_partial("end", str, len)) {
3380 ctx->objdata &= 0xffff;
3383 if (i >= ACTION_RSS_QUEUE_NUM)
3386 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3387 i * sizeof(action_rss_data->queue[i]),
3388 sizeof(action_rss_data->queue[i]))))
3390 ret = parse_int(ctx, token, str, len, NULL, 0);
3396 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3398 if (ctx->next_num == RTE_DIM(ctx->next))
3400 ctx->next[ctx->next_num++] = next;
3404 action_rss_data = ctx->object;
3405 action_rss_data->conf.queue_num = i;
3406 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3410 /** Parse VXLAN encap action. */
3412 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3413 const char *str, unsigned int len,
3414 void *buf, unsigned int size)
3416 struct buffer *out = buf;
3417 struct rte_flow_action *action;
3418 struct action_vxlan_encap_data *action_vxlan_encap_data;
3421 ret = parse_vc(ctx, token, str, len, buf, size);
3424 /* Nothing else to do if there is no buffer. */
3427 if (!out->args.vc.actions_n)
3429 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3430 /* Point to selected object. */
3431 ctx->object = out->args.vc.data;
3432 ctx->objmask = NULL;
3433 /* Set up default configuration. */
3434 action_vxlan_encap_data = ctx->object;
3435 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3436 .conf = (struct rte_flow_action_vxlan_encap){
3437 .definition = action_vxlan_encap_data->items,
3441 .type = RTE_FLOW_ITEM_TYPE_ETH,
3442 .spec = &action_vxlan_encap_data->item_eth,
3443 .mask = &rte_flow_item_eth_mask,
3446 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3447 .spec = &action_vxlan_encap_data->item_vlan,
3448 .mask = &rte_flow_item_vlan_mask,
3451 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3452 .spec = &action_vxlan_encap_data->item_ipv4,
3453 .mask = &rte_flow_item_ipv4_mask,
3456 .type = RTE_FLOW_ITEM_TYPE_UDP,
3457 .spec = &action_vxlan_encap_data->item_udp,
3458 .mask = &rte_flow_item_udp_mask,
3461 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3462 .spec = &action_vxlan_encap_data->item_vxlan,
3463 .mask = &rte_flow_item_vxlan_mask,
3466 .type = RTE_FLOW_ITEM_TYPE_END,
3471 .tci = vxlan_encap_conf.vlan_tci,
3475 .src_addr = vxlan_encap_conf.ipv4_src,
3476 .dst_addr = vxlan_encap_conf.ipv4_dst,
3479 .src_port = vxlan_encap_conf.udp_src,
3480 .dst_port = vxlan_encap_conf.udp_dst,
3482 .item_vxlan.flags = 0,
3484 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3485 vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
3486 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3487 vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
3488 if (!vxlan_encap_conf.select_ipv4) {
3489 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3490 &vxlan_encap_conf.ipv6_src,
3491 sizeof(vxlan_encap_conf.ipv6_src));
3492 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3493 &vxlan_encap_conf.ipv6_dst,
3494 sizeof(vxlan_encap_conf.ipv6_dst));
3495 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3496 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3497 .spec = &action_vxlan_encap_data->item_ipv6,
3498 .mask = &rte_flow_item_ipv6_mask,
3501 if (!vxlan_encap_conf.select_vlan)
3502 action_vxlan_encap_data->items[1].type =
3503 RTE_FLOW_ITEM_TYPE_VOID;
3504 if (vxlan_encap_conf.select_tos_ttl) {
3505 if (vxlan_encap_conf.select_ipv4) {
3506 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3508 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3509 sizeof(ipv4_mask_tos));
3510 ipv4_mask_tos.hdr.type_of_service = 0xff;
3511 ipv4_mask_tos.hdr.time_to_live = 0xff;
3512 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3513 vxlan_encap_conf.ip_tos;
3514 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3515 vxlan_encap_conf.ip_ttl;
3516 action_vxlan_encap_data->items[2].mask =
3519 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3521 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3522 sizeof(ipv6_mask_tos));
3523 ipv6_mask_tos.hdr.vtc_flow |=
3524 RTE_BE32(0xfful << IPV6_HDR_TC_SHIFT);
3525 ipv6_mask_tos.hdr.hop_limits = 0xff;
3526 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3528 ((uint32_t)vxlan_encap_conf.ip_tos <<
3530 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3531 vxlan_encap_conf.ip_ttl;
3532 action_vxlan_encap_data->items[2].mask =
3536 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3537 RTE_DIM(vxlan_encap_conf.vni));
3538 action->conf = &action_vxlan_encap_data->conf;
3542 /** Parse NVGRE encap action. */
3544 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3545 const char *str, unsigned int len,
3546 void *buf, unsigned int size)
3548 struct buffer *out = buf;
3549 struct rte_flow_action *action;
3550 struct action_nvgre_encap_data *action_nvgre_encap_data;
3553 ret = parse_vc(ctx, token, str, len, buf, size);
3556 /* Nothing else to do if there is no buffer. */
3559 if (!out->args.vc.actions_n)
3561 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3562 /* Point to selected object. */
3563 ctx->object = out->args.vc.data;
3564 ctx->objmask = NULL;
3565 /* Set up default configuration. */
3566 action_nvgre_encap_data = ctx->object;
3567 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3568 .conf = (struct rte_flow_action_nvgre_encap){
3569 .definition = action_nvgre_encap_data->items,
3573 .type = RTE_FLOW_ITEM_TYPE_ETH,
3574 .spec = &action_nvgre_encap_data->item_eth,
3575 .mask = &rte_flow_item_eth_mask,
3578 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3579 .spec = &action_nvgre_encap_data->item_vlan,
3580 .mask = &rte_flow_item_vlan_mask,
3583 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3584 .spec = &action_nvgre_encap_data->item_ipv4,
3585 .mask = &rte_flow_item_ipv4_mask,
3588 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3589 .spec = &action_nvgre_encap_data->item_nvgre,
3590 .mask = &rte_flow_item_nvgre_mask,
3593 .type = RTE_FLOW_ITEM_TYPE_END,
3598 .tci = nvgre_encap_conf.vlan_tci,
3602 .src_addr = nvgre_encap_conf.ipv4_src,
3603 .dst_addr = nvgre_encap_conf.ipv4_dst,
3605 .item_nvgre.flow_id = 0,
3607 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3608 nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3609 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3610 nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
3611 if (!nvgre_encap_conf.select_ipv4) {
3612 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3613 &nvgre_encap_conf.ipv6_src,
3614 sizeof(nvgre_encap_conf.ipv6_src));
3615 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3616 &nvgre_encap_conf.ipv6_dst,
3617 sizeof(nvgre_encap_conf.ipv6_dst));
3618 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3619 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3620 .spec = &action_nvgre_encap_data->item_ipv6,
3621 .mask = &rte_flow_item_ipv6_mask,
3624 if (!nvgre_encap_conf.select_vlan)
3625 action_nvgre_encap_data->items[1].type =
3626 RTE_FLOW_ITEM_TYPE_VOID;
3627 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3628 RTE_DIM(nvgre_encap_conf.tni));
3629 action->conf = &action_nvgre_encap_data->conf;
3633 /** Parse l2 encap action. */
3635 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3636 const char *str, unsigned int len,
3637 void *buf, unsigned int size)
3639 struct buffer *out = buf;
3640 struct rte_flow_action *action;
3641 struct action_raw_encap_data *action_encap_data;
3642 struct rte_flow_item_eth eth = { .type = 0, };
3643 struct rte_flow_item_vlan vlan = {
3644 .tci = mplsoudp_encap_conf.vlan_tci,
3650 ret = parse_vc(ctx, token, str, len, buf, size);
3653 /* Nothing else to do if there is no buffer. */
3656 if (!out->args.vc.actions_n)
3658 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3659 /* Point to selected object. */
3660 ctx->object = out->args.vc.data;
3661 ctx->objmask = NULL;
3662 /* Copy the headers to the buffer. */
3663 action_encap_data = ctx->object;
3664 *action_encap_data = (struct action_raw_encap_data) {
3665 .conf = (struct rte_flow_action_raw_encap){
3666 .data = action_encap_data->data,
3670 header = action_encap_data->data;
3671 if (l2_encap_conf.select_vlan)
3672 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3673 else if (l2_encap_conf.select_ipv4)
3674 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3676 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3677 memcpy(eth.dst.addr_bytes,
3678 l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
3679 memcpy(eth.src.addr_bytes,
3680 l2_encap_conf.eth_src, ETHER_ADDR_LEN);
3681 memcpy(header, ð, sizeof(eth));
3682 header += sizeof(eth);
3683 if (l2_encap_conf.select_vlan) {
3684 if (l2_encap_conf.select_ipv4)
3685 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3687 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3688 memcpy(header, &vlan, sizeof(vlan));
3689 header += sizeof(vlan);
3691 action_encap_data->conf.size = header -
3692 action_encap_data->data;
3693 action->conf = &action_encap_data->conf;
3697 /** Parse l2 decap action. */
3699 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3700 const char *str, unsigned int len,
3701 void *buf, unsigned int size)
3703 struct buffer *out = buf;
3704 struct rte_flow_action *action;
3705 struct action_raw_decap_data *action_decap_data;
3706 struct rte_flow_item_eth eth = { .type = 0, };
3707 struct rte_flow_item_vlan vlan = {
3708 .tci = mplsoudp_encap_conf.vlan_tci,
3714 ret = parse_vc(ctx, token, str, len, buf, size);
3717 /* Nothing else to do if there is no buffer. */
3720 if (!out->args.vc.actions_n)
3722 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3723 /* Point to selected object. */
3724 ctx->object = out->args.vc.data;
3725 ctx->objmask = NULL;
3726 /* Copy the headers to the buffer. */
3727 action_decap_data = ctx->object;
3728 *action_decap_data = (struct action_raw_decap_data) {
3729 .conf = (struct rte_flow_action_raw_decap){
3730 .data = action_decap_data->data,
3734 header = action_decap_data->data;
3735 if (l2_decap_conf.select_vlan)
3736 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3737 memcpy(header, ð, sizeof(eth));
3738 header += sizeof(eth);
3739 if (l2_decap_conf.select_vlan) {
3740 memcpy(header, &vlan, sizeof(vlan));
3741 header += sizeof(vlan);
3743 action_decap_data->conf.size = header -
3744 action_decap_data->data;
3745 action->conf = &action_decap_data->conf;
3749 #define ETHER_TYPE_MPLS_UNICAST 0x8847
3751 /** Parse MPLSOGRE encap action. */
3753 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
3754 const char *str, unsigned int len,
3755 void *buf, unsigned int size)
3757 struct buffer *out = buf;
3758 struct rte_flow_action *action;
3759 struct action_raw_encap_data *action_encap_data;
3760 struct rte_flow_item_eth eth = { .type = 0, };
3761 struct rte_flow_item_vlan vlan = {
3762 .tci = mplsogre_encap_conf.vlan_tci,
3765 struct rte_flow_item_ipv4 ipv4 = {
3767 .src_addr = mplsogre_encap_conf.ipv4_src,
3768 .dst_addr = mplsogre_encap_conf.ipv4_dst,
3769 .next_proto_id = IPPROTO_GRE,
3772 struct rte_flow_item_ipv6 ipv6 = {
3774 .proto = IPPROTO_GRE,
3777 struct rte_flow_item_gre gre = {
3778 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3780 struct rte_flow_item_mpls mpls;
3784 ret = parse_vc(ctx, token, str, len, buf, size);
3787 /* Nothing else to do if there is no buffer. */
3790 if (!out->args.vc.actions_n)
3792 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3793 /* Point to selected object. */
3794 ctx->object = out->args.vc.data;
3795 ctx->objmask = NULL;
3796 /* Copy the headers to the buffer. */
3797 action_encap_data = ctx->object;
3798 *action_encap_data = (struct action_raw_encap_data) {
3799 .conf = (struct rte_flow_action_raw_encap){
3800 .data = action_encap_data->data,
3805 header = action_encap_data->data;
3806 if (mplsogre_encap_conf.select_vlan)
3807 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3808 else if (mplsogre_encap_conf.select_ipv4)
3809 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3811 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3812 memcpy(eth.dst.addr_bytes,
3813 mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3814 memcpy(eth.src.addr_bytes,
3815 mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
3816 memcpy(header, ð, sizeof(eth));
3817 header += sizeof(eth);
3818 if (mplsogre_encap_conf.select_vlan) {
3819 if (mplsogre_encap_conf.select_ipv4)
3820 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3822 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3823 memcpy(header, &vlan, sizeof(vlan));
3824 header += sizeof(vlan);
3826 if (mplsogre_encap_conf.select_ipv4) {
3827 memcpy(header, &ipv4, sizeof(ipv4));
3828 header += sizeof(ipv4);
3830 memcpy(&ipv6.hdr.src_addr,
3831 &mplsogre_encap_conf.ipv6_src,
3832 sizeof(mplsogre_encap_conf.ipv6_src));
3833 memcpy(&ipv6.hdr.dst_addr,
3834 &mplsogre_encap_conf.ipv6_dst,
3835 sizeof(mplsogre_encap_conf.ipv6_dst));
3836 memcpy(header, &ipv6, sizeof(ipv6));
3837 header += sizeof(ipv6);
3839 memcpy(header, &gre, sizeof(gre));
3840 header += sizeof(gre);
3841 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
3842 RTE_DIM(mplsogre_encap_conf.label));
3843 mpls.label_tc_s[2] |= 0x1;
3844 memcpy(header, &mpls, sizeof(mpls));
3845 header += sizeof(mpls);
3846 action_encap_data->conf.size = header -
3847 action_encap_data->data;
3848 action->conf = &action_encap_data->conf;
3852 /** Parse MPLSOGRE decap action. */
3854 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
3855 const char *str, unsigned int len,
3856 void *buf, unsigned int size)
3858 struct buffer *out = buf;
3859 struct rte_flow_action *action;
3860 struct action_raw_decap_data *action_decap_data;
3861 struct rte_flow_item_eth eth = { .type = 0, };
3862 struct rte_flow_item_vlan vlan = {.tci = 0};
3863 struct rte_flow_item_ipv4 ipv4 = {
3865 .next_proto_id = IPPROTO_GRE,
3868 struct rte_flow_item_ipv6 ipv6 = {
3870 .proto = IPPROTO_GRE,
3873 struct rte_flow_item_gre gre = {
3874 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3876 struct rte_flow_item_mpls mpls;
3880 ret = parse_vc(ctx, token, str, len, buf, size);
3883 /* Nothing else to do if there is no buffer. */
3886 if (!out->args.vc.actions_n)
3888 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3889 /* Point to selected object. */
3890 ctx->object = out->args.vc.data;
3891 ctx->objmask = NULL;
3892 /* Copy the headers to the buffer. */
3893 action_decap_data = ctx->object;
3894 *action_decap_data = (struct action_raw_decap_data) {
3895 .conf = (struct rte_flow_action_raw_decap){
3896 .data = action_decap_data->data,
3900 header = action_decap_data->data;
3901 if (mplsogre_decap_conf.select_vlan)
3902 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3903 else if (mplsogre_encap_conf.select_ipv4)
3904 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3906 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3907 memcpy(eth.dst.addr_bytes,
3908 mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3909 memcpy(eth.src.addr_bytes,
3910 mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
3911 memcpy(header, ð, sizeof(eth));
3912 header += sizeof(eth);
3913 if (mplsogre_encap_conf.select_vlan) {
3914 if (mplsogre_encap_conf.select_ipv4)
3915 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3917 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3918 memcpy(header, &vlan, sizeof(vlan));
3919 header += sizeof(vlan);
3921 if (mplsogre_encap_conf.select_ipv4) {
3922 memcpy(header, &ipv4, sizeof(ipv4));
3923 header += sizeof(ipv4);
3925 memcpy(header, &ipv6, sizeof(ipv6));
3926 header += sizeof(ipv6);
3928 memcpy(header, &gre, sizeof(gre));
3929 header += sizeof(gre);
3930 memset(&mpls, 0, sizeof(mpls));
3931 memcpy(header, &mpls, sizeof(mpls));
3932 header += sizeof(mpls);
3933 action_decap_data->conf.size = header -
3934 action_decap_data->data;
3935 action->conf = &action_decap_data->conf;
3939 /** Parse MPLSOUDP encap action. */
3941 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
3942 const char *str, unsigned int len,
3943 void *buf, unsigned int size)
3945 struct buffer *out = buf;
3946 struct rte_flow_action *action;
3947 struct action_raw_encap_data *action_encap_data;
3948 struct rte_flow_item_eth eth = { .type = 0, };
3949 struct rte_flow_item_vlan vlan = {
3950 .tci = mplsoudp_encap_conf.vlan_tci,
3953 struct rte_flow_item_ipv4 ipv4 = {
3955 .src_addr = mplsoudp_encap_conf.ipv4_src,
3956 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
3957 .next_proto_id = IPPROTO_UDP,
3960 struct rte_flow_item_ipv6 ipv6 = {
3962 .proto = IPPROTO_UDP,
3965 struct rte_flow_item_udp udp = {
3967 .src_port = mplsoudp_encap_conf.udp_src,
3968 .dst_port = mplsoudp_encap_conf.udp_dst,
3971 struct rte_flow_item_mpls mpls;
3975 ret = parse_vc(ctx, token, str, len, buf, size);
3978 /* Nothing else to do if there is no buffer. */
3981 if (!out->args.vc.actions_n)
3983 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3984 /* Point to selected object. */
3985 ctx->object = out->args.vc.data;
3986 ctx->objmask = NULL;
3987 /* Copy the headers to the buffer. */
3988 action_encap_data = ctx->object;
3989 *action_encap_data = (struct action_raw_encap_data) {
3990 .conf = (struct rte_flow_action_raw_encap){
3991 .data = action_encap_data->data,
3996 header = action_encap_data->data;
3997 if (mplsoudp_encap_conf.select_vlan)
3998 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3999 else if (mplsoudp_encap_conf.select_ipv4)
4000 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4002 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4003 memcpy(eth.dst.addr_bytes,
4004 mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
4005 memcpy(eth.src.addr_bytes,
4006 mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
4007 memcpy(header, ð, sizeof(eth));
4008 header += sizeof(eth);
4009 if (mplsoudp_encap_conf.select_vlan) {
4010 if (mplsoudp_encap_conf.select_ipv4)
4011 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4013 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4014 memcpy(header, &vlan, sizeof(vlan));
4015 header += sizeof(vlan);
4017 if (mplsoudp_encap_conf.select_ipv4) {
4018 memcpy(header, &ipv4, sizeof(ipv4));
4019 header += sizeof(ipv4);
4021 memcpy(&ipv6.hdr.src_addr,
4022 &mplsoudp_encap_conf.ipv6_src,
4023 sizeof(mplsoudp_encap_conf.ipv6_src));
4024 memcpy(&ipv6.hdr.dst_addr,
4025 &mplsoudp_encap_conf.ipv6_dst,
4026 sizeof(mplsoudp_encap_conf.ipv6_dst));
4027 memcpy(header, &ipv6, sizeof(ipv6));
4028 header += sizeof(ipv6);
4030 memcpy(header, &udp, sizeof(udp));
4031 header += sizeof(udp);
4032 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4033 RTE_DIM(mplsoudp_encap_conf.label));
4034 mpls.label_tc_s[2] |= 0x1;
4035 memcpy(header, &mpls, sizeof(mpls));
4036 header += sizeof(mpls);
4037 action_encap_data->conf.size = header -
4038 action_encap_data->data;
4039 action->conf = &action_encap_data->conf;
4043 /** Parse MPLSOUDP decap action. */
4045 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4046 const char *str, unsigned int len,
4047 void *buf, unsigned int size)
4049 struct buffer *out = buf;
4050 struct rte_flow_action *action;
4051 struct action_raw_decap_data *action_decap_data;
4052 struct rte_flow_item_eth eth = { .type = 0, };
4053 struct rte_flow_item_vlan vlan = {.tci = 0};
4054 struct rte_flow_item_ipv4 ipv4 = {
4056 .next_proto_id = IPPROTO_UDP,
4059 struct rte_flow_item_ipv6 ipv6 = {
4061 .proto = IPPROTO_UDP,
4064 struct rte_flow_item_udp udp = {
4066 .dst_port = rte_cpu_to_be_16(6635),
4069 struct rte_flow_item_mpls mpls;
4073 ret = parse_vc(ctx, token, str, len, buf, size);
4076 /* Nothing else to do if there is no buffer. */
4079 if (!out->args.vc.actions_n)
4081 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4082 /* Point to selected object. */
4083 ctx->object = out->args.vc.data;
4084 ctx->objmask = NULL;
4085 /* Copy the headers to the buffer. */
4086 action_decap_data = ctx->object;
4087 *action_decap_data = (struct action_raw_decap_data) {
4088 .conf = (struct rte_flow_action_raw_decap){
4089 .data = action_decap_data->data,
4093 header = action_decap_data->data;
4094 if (mplsoudp_decap_conf.select_vlan)
4095 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
4096 else if (mplsoudp_encap_conf.select_ipv4)
4097 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4099 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4100 memcpy(eth.dst.addr_bytes,
4101 mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
4102 memcpy(eth.src.addr_bytes,
4103 mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
4104 memcpy(header, ð, sizeof(eth));
4105 header += sizeof(eth);
4106 if (mplsoudp_encap_conf.select_vlan) {
4107 if (mplsoudp_encap_conf.select_ipv4)
4108 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4110 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4111 memcpy(header, &vlan, sizeof(vlan));
4112 header += sizeof(vlan);
4114 if (mplsoudp_encap_conf.select_ipv4) {
4115 memcpy(header, &ipv4, sizeof(ipv4));
4116 header += sizeof(ipv4);
4118 memcpy(header, &ipv6, sizeof(ipv6));
4119 header += sizeof(ipv6);
4121 memcpy(header, &udp, sizeof(udp));
4122 header += sizeof(udp);
4123 memset(&mpls, 0, sizeof(mpls));
4124 memcpy(header, &mpls, sizeof(mpls));
4125 header += sizeof(mpls);
4126 action_decap_data->conf.size = header -
4127 action_decap_data->data;
4128 action->conf = &action_decap_data->conf;
4132 /** Parse tokens for destroy command. */
4134 parse_destroy(struct context *ctx, const struct token *token,
4135 const char *str, unsigned int len,
4136 void *buf, unsigned int size)
4138 struct buffer *out = buf;
4140 /* Token name must match. */
4141 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4143 /* Nothing else to do if there is no buffer. */
4146 if (!out->command) {
4147 if (ctx->curr != DESTROY)
4149 if (sizeof(*out) > size)
4151 out->command = ctx->curr;
4154 ctx->objmask = NULL;
4155 out->args.destroy.rule =
4156 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4160 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4161 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4164 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4165 ctx->objmask = NULL;
4169 /** Parse tokens for flush command. */
4171 parse_flush(struct context *ctx, const struct token *token,
4172 const char *str, unsigned int len,
4173 void *buf, unsigned int size)
4175 struct buffer *out = buf;
4177 /* Token name must match. */
4178 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4180 /* Nothing else to do if there is no buffer. */
4183 if (!out->command) {
4184 if (ctx->curr != FLUSH)
4186 if (sizeof(*out) > size)
4188 out->command = ctx->curr;
4191 ctx->objmask = NULL;
4196 /** Parse tokens for query command. */
4198 parse_query(struct context *ctx, const struct token *token,
4199 const char *str, unsigned int len,
4200 void *buf, unsigned int size)
4202 struct buffer *out = buf;
4204 /* Token name must match. */
4205 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4207 /* Nothing else to do if there is no buffer. */
4210 if (!out->command) {
4211 if (ctx->curr != QUERY)
4213 if (sizeof(*out) > size)
4215 out->command = ctx->curr;
4218 ctx->objmask = NULL;
4223 /** Parse action names. */
4225 parse_action(struct context *ctx, const struct token *token,
4226 const char *str, unsigned int len,
4227 void *buf, unsigned int size)
4229 struct buffer *out = buf;
4230 const struct arg *arg = pop_args(ctx);
4234 /* Argument is expected. */
4237 /* Parse action name. */
4238 for (i = 0; next_action[i]; ++i) {
4239 const struct parse_action_priv *priv;
4241 token = &token_list[next_action[i]];
4242 if (strcmp_partial(token->name, str, len))
4248 memcpy((uint8_t *)ctx->object + arg->offset,
4254 push_args(ctx, arg);
4258 /** Parse tokens for list command. */
4260 parse_list(struct context *ctx, const struct token *token,
4261 const char *str, unsigned int len,
4262 void *buf, unsigned int size)
4264 struct buffer *out = buf;
4266 /* Token name must match. */
4267 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4269 /* Nothing else to do if there is no buffer. */
4272 if (!out->command) {
4273 if (ctx->curr != LIST)
4275 if (sizeof(*out) > size)
4277 out->command = ctx->curr;
4280 ctx->objmask = NULL;
4281 out->args.list.group =
4282 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4286 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4287 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4290 ctx->object = out->args.list.group + out->args.list.group_n++;
4291 ctx->objmask = NULL;
4295 /** Parse tokens for isolate command. */
4297 parse_isolate(struct context *ctx, const struct token *token,
4298 const char *str, unsigned int len,
4299 void *buf, unsigned int size)
4301 struct buffer *out = buf;
4303 /* Token name must match. */
4304 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4306 /* Nothing else to do if there is no buffer. */
4309 if (!out->command) {
4310 if (ctx->curr != ISOLATE)
4312 if (sizeof(*out) > size)
4314 out->command = ctx->curr;
4317 ctx->objmask = NULL;
4323 * Parse signed/unsigned integers 8 to 64-bit long.
4325 * Last argument (ctx->args) is retrieved to determine integer type and
4329 parse_int(struct context *ctx, const struct token *token,
4330 const char *str, unsigned int len,
4331 void *buf, unsigned int size)
4333 const struct arg *arg = pop_args(ctx);
4338 /* Argument is expected. */
4343 (uintmax_t)strtoimax(str, &end, 0) :
4344 strtoumax(str, &end, 0);
4345 if (errno || (size_t)(end - str) != len)
4348 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4349 (intmax_t)u > (intmax_t)arg->max)) ||
4350 (!arg->sign && (u < arg->min || u > arg->max))))
4355 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4356 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4360 buf = (uint8_t *)ctx->object + arg->offset;
4362 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4366 case sizeof(uint8_t):
4367 *(uint8_t *)buf = u;
4369 case sizeof(uint16_t):
4370 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4372 case sizeof(uint8_t [3]):
4373 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4375 ((uint8_t *)buf)[0] = u;
4376 ((uint8_t *)buf)[1] = u >> 8;
4377 ((uint8_t *)buf)[2] = u >> 16;
4381 ((uint8_t *)buf)[0] = u >> 16;
4382 ((uint8_t *)buf)[1] = u >> 8;
4383 ((uint8_t *)buf)[2] = u;
4385 case sizeof(uint32_t):
4386 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4388 case sizeof(uint64_t):
4389 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4394 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4396 buf = (uint8_t *)ctx->objmask + arg->offset;
4401 push_args(ctx, arg);
4408 * Three arguments (ctx->args) are retrieved from the stack to store data,
4409 * its actual length and address (in that order).
4412 parse_string(struct context *ctx, const struct token *token,
4413 const char *str, unsigned int len,
4414 void *buf, unsigned int size)
4416 const struct arg *arg_data = pop_args(ctx);
4417 const struct arg *arg_len = pop_args(ctx);
4418 const struct arg *arg_addr = pop_args(ctx);
4419 char tmp[16]; /* Ought to be enough. */
4422 /* Arguments are expected. */
4426 push_args(ctx, arg_data);
4430 push_args(ctx, arg_len);
4431 push_args(ctx, arg_data);
4434 size = arg_data->size;
4435 /* Bit-mask fill is not supported. */
4436 if (arg_data->mask || size < len)
4440 /* Let parse_int() fill length information first. */
4441 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4444 push_args(ctx, arg_len);
4445 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4450 buf = (uint8_t *)ctx->object + arg_data->offset;
4451 /* Output buffer is not necessarily NUL-terminated. */
4452 memcpy(buf, str, len);
4453 memset((uint8_t *)buf + len, 0x00, size - len);
4455 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4456 /* Save address if requested. */
4457 if (arg_addr->size) {
4458 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4460 (uint8_t *)ctx->object + arg_data->offset
4464 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4466 (uint8_t *)ctx->objmask + arg_data->offset
4472 push_args(ctx, arg_addr);
4473 push_args(ctx, arg_len);
4474 push_args(ctx, arg_data);
4479 * Parse a MAC address.
4481 * Last argument (ctx->args) is retrieved to determine storage size and
4485 parse_mac_addr(struct context *ctx, const struct token *token,
4486 const char *str, unsigned int len,
4487 void *buf, unsigned int size)
4489 const struct arg *arg = pop_args(ctx);
4490 struct ether_addr tmp;
4494 /* Argument is expected. */
4498 /* Bit-mask fill is not supported. */
4499 if (arg->mask || size != sizeof(tmp))
4501 /* Only network endian is supported. */
4504 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
4505 if (ret < 0 || (unsigned int)ret != len)
4509 buf = (uint8_t *)ctx->object + arg->offset;
4510 memcpy(buf, &tmp, size);
4512 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4515 push_args(ctx, arg);
4520 * Parse an IPv4 address.
4522 * Last argument (ctx->args) is retrieved to determine storage size and
4526 parse_ipv4_addr(struct context *ctx, const struct token *token,
4527 const char *str, unsigned int len,
4528 void *buf, unsigned int size)
4530 const struct arg *arg = pop_args(ctx);
4535 /* Argument is expected. */
4539 /* Bit-mask fill is not supported. */
4540 if (arg->mask || size != sizeof(tmp))
4542 /* Only network endian is supported. */
4545 memcpy(str2, str, len);
4547 ret = inet_pton(AF_INET, str2, &tmp);
4549 /* Attempt integer parsing. */
4550 push_args(ctx, arg);
4551 return parse_int(ctx, token, str, len, buf, size);
4555 buf = (uint8_t *)ctx->object + arg->offset;
4556 memcpy(buf, &tmp, size);
4558 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4561 push_args(ctx, arg);
4566 * Parse an IPv6 address.
4568 * Last argument (ctx->args) is retrieved to determine storage size and
4572 parse_ipv6_addr(struct context *ctx, const struct token *token,
4573 const char *str, unsigned int len,
4574 void *buf, unsigned int size)
4576 const struct arg *arg = pop_args(ctx);
4578 struct in6_addr tmp;
4582 /* Argument is expected. */
4586 /* Bit-mask fill is not supported. */
4587 if (arg->mask || size != sizeof(tmp))
4589 /* Only network endian is supported. */
4592 memcpy(str2, str, len);
4594 ret = inet_pton(AF_INET6, str2, &tmp);
4599 buf = (uint8_t *)ctx->object + arg->offset;
4600 memcpy(buf, &tmp, size);
4602 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4605 push_args(ctx, arg);
4609 /** Boolean values (even indices stand for false). */
4610 static const char *const boolean_name[] = {
4620 * Parse a boolean value.
4622 * Last argument (ctx->args) is retrieved to determine storage size and
4626 parse_boolean(struct context *ctx, const struct token *token,
4627 const char *str, unsigned int len,
4628 void *buf, unsigned int size)
4630 const struct arg *arg = pop_args(ctx);
4634 /* Argument is expected. */
4637 for (i = 0; boolean_name[i]; ++i)
4638 if (!strcmp_partial(boolean_name[i], str, len))
4640 /* Process token as integer. */
4641 if (boolean_name[i])
4642 str = i & 1 ? "1" : "0";
4643 push_args(ctx, arg);
4644 ret = parse_int(ctx, token, str, strlen(str), buf, size);
4645 return ret > 0 ? (int)len : ret;
4648 /** Parse port and update context. */
4650 parse_port(struct context *ctx, const struct token *token,
4651 const char *str, unsigned int len,
4652 void *buf, unsigned int size)
4654 struct buffer *out = &(struct buffer){ .port = 0 };
4662 ctx->objmask = NULL;
4663 size = sizeof(*out);
4665 ret = parse_int(ctx, token, str, len, out, size);
4667 ctx->port = out->port;
4673 /** No completion. */
4675 comp_none(struct context *ctx, const struct token *token,
4676 unsigned int ent, char *buf, unsigned int size)
4686 /** Complete boolean values. */
4688 comp_boolean(struct context *ctx, const struct token *token,
4689 unsigned int ent, char *buf, unsigned int size)
4695 for (i = 0; boolean_name[i]; ++i)
4696 if (buf && i == ent)
4697 return snprintf(buf, size, "%s", boolean_name[i]);
4703 /** Complete action names. */
4705 comp_action(struct context *ctx, const struct token *token,
4706 unsigned int ent, char *buf, unsigned int size)
4712 for (i = 0; next_action[i]; ++i)
4713 if (buf && i == ent)
4714 return snprintf(buf, size, "%s",
4715 token_list[next_action[i]].name);
4721 /** Complete available ports. */
4723 comp_port(struct context *ctx, const struct token *token,
4724 unsigned int ent, char *buf, unsigned int size)
4731 RTE_ETH_FOREACH_DEV(p) {
4732 if (buf && i == ent)
4733 return snprintf(buf, size, "%u", p);
4741 /** Complete available rule IDs. */
4743 comp_rule_id(struct context *ctx, const struct token *token,
4744 unsigned int ent, char *buf, unsigned int size)
4747 struct rte_port *port;
4748 struct port_flow *pf;
4751 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4752 ctx->port == (portid_t)RTE_PORT_ALL)
4754 port = &ports[ctx->port];
4755 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4756 if (buf && i == ent)
4757 return snprintf(buf, size, "%u", pf->id);
4765 /** Complete type field for RSS action. */
4767 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
4768 unsigned int ent, char *buf, unsigned int size)
4774 for (i = 0; rss_type_table[i].str; ++i)
4779 return snprintf(buf, size, "%s", rss_type_table[ent].str);
4781 return snprintf(buf, size, "end");
4785 /** Complete queue field for RSS action. */
4787 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
4788 unsigned int ent, char *buf, unsigned int size)
4795 return snprintf(buf, size, "%u", ent);
4797 return snprintf(buf, size, "end");
4801 /** Internal context. */
4802 static struct context cmd_flow_context;
4804 /** Global parser instance (cmdline API). */
4805 cmdline_parse_inst_t cmd_flow;
4807 /** Initialize context. */
4809 cmd_flow_context_init(struct context *ctx)
4811 /* A full memset() is not necessary. */
4821 ctx->objmask = NULL;
4824 /** Parse a token (cmdline API). */
4826 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
4829 struct context *ctx = &cmd_flow_context;
4830 const struct token *token;
4831 const enum index *list;
4836 token = &token_list[ctx->curr];
4837 /* Check argument length. */
4840 for (len = 0; src[len]; ++len)
4841 if (src[len] == '#' || isspace(src[len]))
4845 /* Last argument and EOL detection. */
4846 for (i = len; src[i]; ++i)
4847 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
4849 else if (!isspace(src[i])) {
4854 if (src[i] == '\r' || src[i] == '\n') {
4858 /* Initialize context if necessary. */
4859 if (!ctx->next_num) {
4862 ctx->next[ctx->next_num++] = token->next[0];
4864 /* Process argument through candidates. */
4865 ctx->prev = ctx->curr;
4866 list = ctx->next[ctx->next_num - 1];
4867 for (i = 0; list[i]; ++i) {
4868 const struct token *next = &token_list[list[i]];
4871 ctx->curr = list[i];
4873 tmp = next->call(ctx, next, src, len, result, size);
4875 tmp = parse_default(ctx, next, src, len, result, size);
4876 if (tmp == -1 || tmp != len)
4884 /* Push subsequent tokens if any. */
4886 for (i = 0; token->next[i]; ++i) {
4887 if (ctx->next_num == RTE_DIM(ctx->next))
4889 ctx->next[ctx->next_num++] = token->next[i];
4891 /* Push arguments if any. */
4893 for (i = 0; token->args[i]; ++i) {
4894 if (ctx->args_num == RTE_DIM(ctx->args))
4896 ctx->args[ctx->args_num++] = token->args[i];
4901 /** Return number of completion entries (cmdline API). */
4903 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
4905 struct context *ctx = &cmd_flow_context;
4906 const struct token *token = &token_list[ctx->curr];
4907 const enum index *list;
4911 /* Count number of tokens in current list. */
4913 list = ctx->next[ctx->next_num - 1];
4915 list = token->next[0];
4916 for (i = 0; list[i]; ++i)
4921 * If there is a single token, use its completion callback, otherwise
4922 * return the number of entries.
4924 token = &token_list[list[0]];
4925 if (i == 1 && token->comp) {
4926 /* Save index for cmd_flow_get_help(). */
4927 ctx->prev = list[0];
4928 return token->comp(ctx, token, 0, NULL, 0);
4933 /** Return a completion entry (cmdline API). */
4935 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
4936 char *dst, unsigned int size)
4938 struct context *ctx = &cmd_flow_context;
4939 const struct token *token = &token_list[ctx->curr];
4940 const enum index *list;
4944 /* Count number of tokens in current list. */
4946 list = ctx->next[ctx->next_num - 1];
4948 list = token->next[0];
4949 for (i = 0; list[i]; ++i)
4953 /* If there is a single token, use its completion callback. */
4954 token = &token_list[list[0]];
4955 if (i == 1 && token->comp) {
4956 /* Save index for cmd_flow_get_help(). */
4957 ctx->prev = list[0];
4958 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
4960 /* Otherwise make sure the index is valid and use defaults. */
4963 token = &token_list[list[index]];
4964 snprintf(dst, size, "%s", token->name);
4965 /* Save index for cmd_flow_get_help(). */
4966 ctx->prev = list[index];
4970 /** Populate help strings for current token (cmdline API). */
4972 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
4974 struct context *ctx = &cmd_flow_context;
4975 const struct token *token = &token_list[ctx->prev];
4980 /* Set token type and update global help with details. */
4981 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
4983 cmd_flow.help_str = token->help;
4985 cmd_flow.help_str = token->name;
4989 /** Token definition template (cmdline API). */
4990 static struct cmdline_token_hdr cmd_flow_token_hdr = {
4991 .ops = &(struct cmdline_token_ops){
4992 .parse = cmd_flow_parse,
4993 .complete_get_nb = cmd_flow_complete_get_nb,
4994 .complete_get_elt = cmd_flow_complete_get_elt,
4995 .get_help = cmd_flow_get_help,
5000 /** Populate the next dynamic token. */
5002 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5003 cmdline_parse_token_hdr_t **hdr_inst)
5005 struct context *ctx = &cmd_flow_context;
5007 /* Always reinitialize context before requesting the first token. */
5008 if (!(hdr_inst - cmd_flow.tokens))
5009 cmd_flow_context_init(ctx);
5010 /* Return NULL when no more tokens are expected. */
5011 if (!ctx->next_num && ctx->curr) {
5015 /* Determine if command should end here. */
5016 if (ctx->eol && ctx->last && ctx->next_num) {
5017 const enum index *list = ctx->next[ctx->next_num - 1];
5020 for (i = 0; list[i]; ++i) {
5027 *hdr = &cmd_flow_token_hdr;
5030 /** Dispatch parsed buffer to function calls. */
5032 cmd_flow_parsed(const struct buffer *in)
5034 switch (in->command) {
5036 port_flow_validate(in->port, &in->args.vc.attr,
5037 in->args.vc.pattern, in->args.vc.actions);
5040 port_flow_create(in->port, &in->args.vc.attr,
5041 in->args.vc.pattern, in->args.vc.actions);
5044 port_flow_destroy(in->port, in->args.destroy.rule_n,
5045 in->args.destroy.rule);
5048 port_flow_flush(in->port);
5051 port_flow_query(in->port, in->args.query.rule,
5052 &in->args.query.action);
5055 port_flow_list(in->port, in->args.list.group_n,
5056 in->args.list.group);
5059 port_flow_isolate(in->port, in->args.isolate.set);
5066 /** Token generator and output processing callback (cmdline API). */
5068 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5071 cmd_flow_tok(arg0, arg2);
5073 cmd_flow_parsed(arg0);
5076 /** Global parser instance (cmdline API). */
5077 cmdline_parse_inst_t cmd_flow = {
5079 .data = NULL, /**< Unused. */
5080 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5083 }, /**< Tokens are returned by cmd_flow_tok(). */