1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_common.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
46 /* Top-level command. */
49 /* Sub-level commands. */
58 /* Destroy arguments. */
61 /* Query arguments. */
67 /* Validate/create arguments. */
74 /* Validate/create pattern. */
111 ITEM_VLAN_INNER_TYPE,
143 ITEM_E_TAG_GRP_ECID_B,
162 ITEM_ARP_ETH_IPV4_SHA,
163 ITEM_ARP_ETH_IPV4_SPA,
164 ITEM_ARP_ETH_IPV4_THA,
165 ITEM_ARP_ETH_IPV4_TPA,
167 ITEM_IPV6_EXT_NEXT_HDR,
172 ITEM_ICMP6_ND_NS_TARGET_ADDR,
174 ITEM_ICMP6_ND_NA_TARGET_ADDR,
176 ITEM_ICMP6_ND_OPT_TYPE,
177 ITEM_ICMP6_ND_OPT_SLA_ETH,
178 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
179 ITEM_ICMP6_ND_OPT_TLA_ETH,
180 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
184 /* Validate/create actions. */
204 ACTION_RSS_FUNC_DEFAULT,
205 ACTION_RSS_FUNC_TOEPLITZ,
206 ACTION_RSS_FUNC_SIMPLE_XOR,
218 ACTION_PHY_PORT_ORIGINAL,
219 ACTION_PHY_PORT_INDEX,
221 ACTION_PORT_ID_ORIGINAL,
225 ACTION_OF_SET_MPLS_TTL,
226 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
227 ACTION_OF_DEC_MPLS_TTL,
228 ACTION_OF_SET_NW_TTL,
229 ACTION_OF_SET_NW_TTL_NW_TTL,
230 ACTION_OF_DEC_NW_TTL,
231 ACTION_OF_COPY_TTL_OUT,
232 ACTION_OF_COPY_TTL_IN,
235 ACTION_OF_PUSH_VLAN_ETHERTYPE,
236 ACTION_OF_SET_VLAN_VID,
237 ACTION_OF_SET_VLAN_VID_VLAN_VID,
238 ACTION_OF_SET_VLAN_PCP,
239 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
241 ACTION_OF_POP_MPLS_ETHERTYPE,
243 ACTION_OF_PUSH_MPLS_ETHERTYPE,
250 ACTION_MPLSOGRE_ENCAP,
251 ACTION_MPLSOGRE_DECAP,
252 ACTION_MPLSOUDP_ENCAP,
253 ACTION_MPLSOUDP_DECAP,
255 ACTION_SET_IPV4_SRC_IPV4_SRC,
257 ACTION_SET_IPV4_DST_IPV4_DST,
259 ACTION_SET_IPV6_SRC_IPV6_SRC,
261 ACTION_SET_IPV6_DST_IPV6_DST,
263 ACTION_SET_TP_SRC_TP_SRC,
265 ACTION_SET_TP_DST_TP_DST,
271 ACTION_SET_MAC_SRC_MAC_SRC,
273 ACTION_SET_MAC_DST_MAC_DST,
276 /** Maximum size for pattern in struct rte_flow_item_raw. */
277 #define ITEM_RAW_PATTERN_SIZE 40
279 /** Storage size for struct rte_flow_item_raw including pattern. */
280 #define ITEM_RAW_SIZE \
281 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
283 /** Maximum number of queue indices in struct rte_flow_action_rss. */
284 #define ACTION_RSS_QUEUE_NUM 32
286 /** Storage for struct rte_flow_action_rss including external data. */
287 struct action_rss_data {
288 struct rte_flow_action_rss conf;
289 uint8_t key[RSS_HASH_KEY_LENGTH];
290 uint16_t queue[ACTION_RSS_QUEUE_NUM];
293 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
294 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
296 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
297 struct action_vxlan_encap_data {
298 struct rte_flow_action_vxlan_encap conf;
299 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
300 struct rte_flow_item_eth item_eth;
301 struct rte_flow_item_vlan item_vlan;
303 struct rte_flow_item_ipv4 item_ipv4;
304 struct rte_flow_item_ipv6 item_ipv6;
306 struct rte_flow_item_udp item_udp;
307 struct rte_flow_item_vxlan item_vxlan;
310 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
311 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
313 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
314 struct action_nvgre_encap_data {
315 struct rte_flow_action_nvgre_encap conf;
316 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
317 struct rte_flow_item_eth item_eth;
318 struct rte_flow_item_vlan item_vlan;
320 struct rte_flow_item_ipv4 item_ipv4;
321 struct rte_flow_item_ipv6 item_ipv6;
323 struct rte_flow_item_nvgre item_nvgre;
326 /** Maximum data size in struct rte_flow_action_raw_encap. */
327 #define ACTION_RAW_ENCAP_MAX_DATA 128
329 /** Storage for struct rte_flow_action_raw_encap including external data. */
330 struct action_raw_encap_data {
331 struct rte_flow_action_raw_encap conf;
332 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
333 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
336 /** Storage for struct rte_flow_action_raw_decap including external data. */
337 struct action_raw_decap_data {
338 struct rte_flow_action_raw_decap conf;
339 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
342 /** Maximum number of subsequent tokens and arguments on the stack. */
343 #define CTX_STACK_SIZE 16
345 /** Parser context. */
347 /** Stack of subsequent token lists to process. */
348 const enum index *next[CTX_STACK_SIZE];
349 /** Arguments for stacked tokens. */
350 const void *args[CTX_STACK_SIZE];
351 enum index curr; /**< Current token index. */
352 enum index prev; /**< Index of the last token seen. */
353 int next_num; /**< Number of entries in next[]. */
354 int args_num; /**< Number of entries in args[]. */
355 uint32_t eol:1; /**< EOL has been detected. */
356 uint32_t last:1; /**< No more arguments. */
357 portid_t port; /**< Current port ID (for completions). */
358 uint32_t objdata; /**< Object-specific data. */
359 void *object; /**< Address of current object for relative offsets. */
360 void *objmask; /**< Object a full mask must be written to. */
363 /** Token argument. */
365 uint32_t hton:1; /**< Use network byte ordering. */
366 uint32_t sign:1; /**< Value is signed. */
367 uint32_t bounded:1; /**< Value is bounded. */
368 uintmax_t min; /**< Minimum value if bounded. */
369 uintmax_t max; /**< Maximum value if bounded. */
370 uint32_t offset; /**< Relative offset from ctx->object. */
371 uint32_t size; /**< Field size. */
372 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
375 /** Parser token definition. */
377 /** Type displayed during completion (defaults to "TOKEN"). */
379 /** Help displayed during completion (defaults to token name). */
381 /** Private data used by parser functions. */
384 * Lists of subsequent tokens to push on the stack. Each call to the
385 * parser consumes the last entry of that stack.
387 const enum index *const *next;
388 /** Arguments stack for subsequent tokens that need them. */
389 const struct arg *const *args;
391 * Token-processing callback, returns -1 in case of error, the
392 * length of the matched string otherwise. If NULL, attempts to
393 * match the token name.
395 * If buf is not NULL, the result should be stored in it according
396 * to context. An error is returned if not large enough.
398 int (*call)(struct context *ctx, const struct token *token,
399 const char *str, unsigned int len,
400 void *buf, unsigned int size);
402 * Callback that provides possible values for this token, used for
403 * completion. Returns -1 in case of error, the number of possible
404 * values otherwise. If NULL, the token name is used.
406 * If buf is not NULL, entry index ent is written to buf and the
407 * full length of the entry is returned (same behavior as
410 int (*comp)(struct context *ctx, const struct token *token,
411 unsigned int ent, char *buf, unsigned int size);
412 /** Mandatory token name, no default value. */
416 /** Static initializer for the next field. */
417 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
419 /** Static initializer for a NEXT() entry. */
420 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
422 /** Static initializer for the args field. */
423 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
425 /** Static initializer for ARGS() to target a field. */
426 #define ARGS_ENTRY(s, f) \
427 (&(const struct arg){ \
428 .offset = offsetof(s, f), \
429 .size = sizeof(((s *)0)->f), \
432 /** Static initializer for ARGS() to target a bit-field. */
433 #define ARGS_ENTRY_BF(s, f, b) \
434 (&(const struct arg){ \
436 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
439 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
440 #define ARGS_ENTRY_MASK(s, f, m) \
441 (&(const struct arg){ \
442 .offset = offsetof(s, f), \
443 .size = sizeof(((s *)0)->f), \
444 .mask = (const void *)(m), \
447 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
448 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
449 (&(const struct arg){ \
451 .offset = offsetof(s, f), \
452 .size = sizeof(((s *)0)->f), \
453 .mask = (const void *)(m), \
456 /** Static initializer for ARGS() to target a pointer. */
457 #define ARGS_ENTRY_PTR(s, f) \
458 (&(const struct arg){ \
459 .size = sizeof(*((s *)0)->f), \
462 /** Static initializer for ARGS() with arbitrary offset and size. */
463 #define ARGS_ENTRY_ARB(o, s) \
464 (&(const struct arg){ \
469 /** Same as ARGS_ENTRY_ARB() with bounded values. */
470 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
471 (&(const struct arg){ \
479 /** Same as ARGS_ENTRY() using network byte ordering. */
480 #define ARGS_ENTRY_HTON(s, f) \
481 (&(const struct arg){ \
483 .offset = offsetof(s, f), \
484 .size = sizeof(((s *)0)->f), \
487 /** Parser output buffer layout expected by cmd_flow_parsed(). */
489 enum index command; /**< Flow command. */
490 portid_t port; /**< Affected port ID. */
493 struct rte_flow_attr attr;
494 struct rte_flow_item *pattern;
495 struct rte_flow_action *actions;
499 } vc; /**< Validate/create arguments. */
503 } destroy; /**< Destroy arguments. */
506 struct rte_flow_action action;
507 } query; /**< Query arguments. */
511 } list; /**< List arguments. */
514 } isolate; /**< Isolated mode arguments. */
515 } args; /**< Command arguments. */
518 /** Private data for pattern items. */
519 struct parse_item_priv {
520 enum rte_flow_item_type type; /**< Item type. */
521 uint32_t size; /**< Size of item specification structure. */
524 #define PRIV_ITEM(t, s) \
525 (&(const struct parse_item_priv){ \
526 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
530 /** Private data for actions. */
531 struct parse_action_priv {
532 enum rte_flow_action_type type; /**< Action type. */
533 uint32_t size; /**< Size of action configuration structure. */
536 #define PRIV_ACTION(t, s) \
537 (&(const struct parse_action_priv){ \
538 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
542 static const enum index next_vc_attr[] = {
552 static const enum index next_destroy_attr[] = {
558 static const enum index next_list_attr[] = {
564 static const enum index item_param[] = {
573 static const enum index item_param_is[] = {
578 static const enum index next_item[] = {
614 ITEM_ICMP6_ND_OPT_SLA_ETH,
615 ITEM_ICMP6_ND_OPT_TLA_ETH,
620 static const enum index item_fuzzy[] = {
626 static const enum index item_any[] = {
632 static const enum index item_vf[] = {
638 static const enum index item_phy_port[] = {
644 static const enum index item_port_id[] = {
650 static const enum index item_mark[] = {
656 static const enum index item_raw[] = {
666 static const enum index item_eth[] = {
674 static const enum index item_vlan[] = {
679 ITEM_VLAN_INNER_TYPE,
684 static const enum index item_ipv4[] = {
694 static const enum index item_ipv6[] = {
705 static const enum index item_icmp[] = {
712 static const enum index item_udp[] = {
719 static const enum index item_tcp[] = {
727 static const enum index item_sctp[] = {
736 static const enum index item_vxlan[] = {
742 static const enum index item_e_tag[] = {
743 ITEM_E_TAG_GRP_ECID_B,
748 static const enum index item_nvgre[] = {
754 static const enum index item_mpls[] = {
760 static const enum index item_gre[] = {
766 static const enum index item_gtp[] = {
772 static const enum index item_geneve[] = {
779 static const enum index item_vxlan_gpe[] = {
785 static const enum index item_arp_eth_ipv4[] = {
786 ITEM_ARP_ETH_IPV4_SHA,
787 ITEM_ARP_ETH_IPV4_SPA,
788 ITEM_ARP_ETH_IPV4_THA,
789 ITEM_ARP_ETH_IPV4_TPA,
794 static const enum index item_ipv6_ext[] = {
795 ITEM_IPV6_EXT_NEXT_HDR,
800 static const enum index item_icmp6[] = {
807 static const enum index item_icmp6_nd_ns[] = {
808 ITEM_ICMP6_ND_NS_TARGET_ADDR,
813 static const enum index item_icmp6_nd_na[] = {
814 ITEM_ICMP6_ND_NA_TARGET_ADDR,
819 static const enum index item_icmp6_nd_opt[] = {
820 ITEM_ICMP6_ND_OPT_TYPE,
825 static const enum index item_icmp6_nd_opt_sla_eth[] = {
826 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
831 static const enum index item_icmp6_nd_opt_tla_eth[] = {
832 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
837 static const enum index item_meta[] = {
843 static const enum index next_action[] = {
859 ACTION_OF_SET_MPLS_TTL,
860 ACTION_OF_DEC_MPLS_TTL,
861 ACTION_OF_SET_NW_TTL,
862 ACTION_OF_DEC_NW_TTL,
863 ACTION_OF_COPY_TTL_OUT,
864 ACTION_OF_COPY_TTL_IN,
867 ACTION_OF_SET_VLAN_VID,
868 ACTION_OF_SET_VLAN_PCP,
877 ACTION_MPLSOGRE_ENCAP,
878 ACTION_MPLSOGRE_DECAP,
879 ACTION_MPLSOUDP_ENCAP,
880 ACTION_MPLSOUDP_DECAP,
895 static const enum index action_mark[] = {
901 static const enum index action_queue[] = {
907 static const enum index action_count[] = {
914 static const enum index action_rss[] = {
925 static const enum index action_vf[] = {
932 static const enum index action_phy_port[] = {
933 ACTION_PHY_PORT_ORIGINAL,
934 ACTION_PHY_PORT_INDEX,
939 static const enum index action_port_id[] = {
940 ACTION_PORT_ID_ORIGINAL,
946 static const enum index action_meter[] = {
952 static const enum index action_of_set_mpls_ttl[] = {
953 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
958 static const enum index action_of_set_nw_ttl[] = {
959 ACTION_OF_SET_NW_TTL_NW_TTL,
964 static const enum index action_of_push_vlan[] = {
965 ACTION_OF_PUSH_VLAN_ETHERTYPE,
970 static const enum index action_of_set_vlan_vid[] = {
971 ACTION_OF_SET_VLAN_VID_VLAN_VID,
976 static const enum index action_of_set_vlan_pcp[] = {
977 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
982 static const enum index action_of_pop_mpls[] = {
983 ACTION_OF_POP_MPLS_ETHERTYPE,
988 static const enum index action_of_push_mpls[] = {
989 ACTION_OF_PUSH_MPLS_ETHERTYPE,
994 static const enum index action_set_ipv4_src[] = {
995 ACTION_SET_IPV4_SRC_IPV4_SRC,
1000 static const enum index action_set_mac_src[] = {
1001 ACTION_SET_MAC_SRC_MAC_SRC,
1006 static const enum index action_set_ipv4_dst[] = {
1007 ACTION_SET_IPV4_DST_IPV4_DST,
1012 static const enum index action_set_ipv6_src[] = {
1013 ACTION_SET_IPV6_SRC_IPV6_SRC,
1018 static const enum index action_set_ipv6_dst[] = {
1019 ACTION_SET_IPV6_DST_IPV6_DST,
1024 static const enum index action_set_tp_src[] = {
1025 ACTION_SET_TP_SRC_TP_SRC,
1030 static const enum index action_set_tp_dst[] = {
1031 ACTION_SET_TP_DST_TP_DST,
1036 static const enum index action_set_ttl[] = {
1042 static const enum index action_jump[] = {
1048 static const enum index action_set_mac_dst[] = {
1049 ACTION_SET_MAC_DST_MAC_DST,
1054 static int parse_init(struct context *, const struct token *,
1055 const char *, unsigned int,
1056 void *, unsigned int);
1057 static int parse_vc(struct context *, const struct token *,
1058 const char *, unsigned int,
1059 void *, unsigned int);
1060 static int parse_vc_spec(struct context *, const struct token *,
1061 const char *, unsigned int, void *, unsigned int);
1062 static int parse_vc_conf(struct context *, const struct token *,
1063 const char *, unsigned int, void *, unsigned int);
1064 static int parse_vc_action_rss(struct context *, const struct token *,
1065 const char *, unsigned int, void *,
1067 static int parse_vc_action_rss_func(struct context *, const struct token *,
1068 const char *, unsigned int, void *,
1070 static int parse_vc_action_rss_type(struct context *, const struct token *,
1071 const char *, unsigned int, void *,
1073 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1074 const char *, unsigned int, void *,
1076 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1077 const char *, unsigned int, void *,
1079 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1080 const char *, unsigned int, void *,
1082 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1083 const char *, unsigned int, void *,
1085 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1086 const char *, unsigned int, void *,
1088 static int parse_vc_action_mplsogre_encap(struct context *,
1089 const struct token *, const char *,
1090 unsigned int, void *, unsigned int);
1091 static int parse_vc_action_mplsogre_decap(struct context *,
1092 const struct token *, const char *,
1093 unsigned int, void *, unsigned int);
1094 static int parse_vc_action_mplsoudp_encap(struct context *,
1095 const struct token *, const char *,
1096 unsigned int, void *, unsigned int);
1097 static int parse_vc_action_mplsoudp_decap(struct context *,
1098 const struct token *, const char *,
1099 unsigned int, void *, unsigned int);
1100 static int parse_destroy(struct context *, const struct token *,
1101 const char *, unsigned int,
1102 void *, unsigned int);
1103 static int parse_flush(struct context *, const struct token *,
1104 const char *, unsigned int,
1105 void *, unsigned int);
1106 static int parse_query(struct context *, const struct token *,
1107 const char *, unsigned int,
1108 void *, unsigned int);
1109 static int parse_action(struct context *, const struct token *,
1110 const char *, unsigned int,
1111 void *, unsigned int);
1112 static int parse_list(struct context *, const struct token *,
1113 const char *, unsigned int,
1114 void *, unsigned int);
1115 static int parse_isolate(struct context *, const struct token *,
1116 const char *, unsigned int,
1117 void *, unsigned int);
1118 static int parse_int(struct context *, const struct token *,
1119 const char *, unsigned int,
1120 void *, unsigned int);
1121 static int parse_prefix(struct context *, const struct token *,
1122 const char *, unsigned int,
1123 void *, unsigned int);
1124 static int parse_boolean(struct context *, const struct token *,
1125 const char *, unsigned int,
1126 void *, unsigned int);
1127 static int parse_string(struct context *, const struct token *,
1128 const char *, unsigned int,
1129 void *, unsigned int);
1130 static int parse_mac_addr(struct context *, const struct token *,
1131 const char *, unsigned int,
1132 void *, unsigned int);
1133 static int parse_ipv4_addr(struct context *, const struct token *,
1134 const char *, unsigned int,
1135 void *, unsigned int);
1136 static int parse_ipv6_addr(struct context *, const struct token *,
1137 const char *, unsigned int,
1138 void *, unsigned int);
1139 static int parse_port(struct context *, const struct token *,
1140 const char *, unsigned int,
1141 void *, unsigned int);
1142 static int comp_none(struct context *, const struct token *,
1143 unsigned int, char *, unsigned int);
1144 static int comp_boolean(struct context *, const struct token *,
1145 unsigned int, char *, unsigned int);
1146 static int comp_action(struct context *, const struct token *,
1147 unsigned int, char *, unsigned int);
1148 static int comp_port(struct context *, const struct token *,
1149 unsigned int, char *, unsigned int);
1150 static int comp_rule_id(struct context *, const struct token *,
1151 unsigned int, char *, unsigned int);
1152 static int comp_vc_action_rss_type(struct context *, const struct token *,
1153 unsigned int, char *, unsigned int);
1154 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1155 unsigned int, char *, unsigned int);
1157 /** Token definitions. */
1158 static const struct token token_list[] = {
1159 /* Special tokens. */
1162 .help = "null entry, abused as the entry point",
1163 .next = NEXT(NEXT_ENTRY(FLOW)),
1168 .help = "command may end here",
1170 /* Common tokens. */
1174 .help = "integer value",
1179 .name = "{unsigned}",
1181 .help = "unsigned integer value",
1188 .help = "prefix length for bit-mask",
1189 .call = parse_prefix,
1193 .name = "{boolean}",
1195 .help = "any boolean value",
1196 .call = parse_boolean,
1197 .comp = comp_boolean,
1202 .help = "fixed string",
1203 .call = parse_string,
1207 .name = "{MAC address}",
1209 .help = "standard MAC address notation",
1210 .call = parse_mac_addr,
1214 .name = "{IPv4 address}",
1215 .type = "IPV4 ADDRESS",
1216 .help = "standard IPv4 address notation",
1217 .call = parse_ipv4_addr,
1221 .name = "{IPv6 address}",
1222 .type = "IPV6 ADDRESS",
1223 .help = "standard IPv6 address notation",
1224 .call = parse_ipv6_addr,
1228 .name = "{rule id}",
1230 .help = "rule identifier",
1232 .comp = comp_rule_id,
1235 .name = "{port_id}",
1237 .help = "port identifier",
1242 .name = "{group_id}",
1244 .help = "group identifier",
1248 [PRIORITY_LEVEL] = {
1251 .help = "priority level",
1255 /* Top-level command. */
1258 .type = "{command} {port_id} [{arg} [...]]",
1259 .help = "manage ingress/egress flow rules",
1260 .next = NEXT(NEXT_ENTRY
1270 /* Sub-level commands. */
1273 .help = "check whether a flow rule can be created",
1274 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1275 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1280 .help = "create a flow rule",
1281 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1282 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1287 .help = "destroy specific flow rules",
1288 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1289 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1290 .call = parse_destroy,
1294 .help = "destroy all flow rules",
1295 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1296 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1297 .call = parse_flush,
1301 .help = "query an existing flow rule",
1302 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1303 NEXT_ENTRY(RULE_ID),
1304 NEXT_ENTRY(PORT_ID)),
1305 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1306 ARGS_ENTRY(struct buffer, args.query.rule),
1307 ARGS_ENTRY(struct buffer, port)),
1308 .call = parse_query,
1312 .help = "list existing flow rules",
1313 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1314 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1319 .help = "restrict ingress traffic to the defined flow rules",
1320 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1321 NEXT_ENTRY(PORT_ID)),
1322 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1323 ARGS_ENTRY(struct buffer, port)),
1324 .call = parse_isolate,
1326 /* Destroy arguments. */
1329 .help = "specify a rule identifier",
1330 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1331 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1332 .call = parse_destroy,
1334 /* Query arguments. */
1338 .help = "action to query, must be part of the rule",
1339 .call = parse_action,
1340 .comp = comp_action,
1342 /* List arguments. */
1345 .help = "specify a group",
1346 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1347 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1350 /* Validate/create attributes. */
1353 .help = "specify a group",
1354 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1355 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1360 .help = "specify a priority level",
1361 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1362 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1367 .help = "affect rule to ingress",
1368 .next = NEXT(next_vc_attr),
1373 .help = "affect rule to egress",
1374 .next = NEXT(next_vc_attr),
1379 .help = "apply rule directly to endpoints found in pattern",
1380 .next = NEXT(next_vc_attr),
1383 /* Validate/create pattern. */
1386 .help = "submit a list of pattern items",
1387 .next = NEXT(next_item),
1392 .help = "match value perfectly (with full bit-mask)",
1393 .call = parse_vc_spec,
1395 [ITEM_PARAM_SPEC] = {
1397 .help = "match value according to configured bit-mask",
1398 .call = parse_vc_spec,
1400 [ITEM_PARAM_LAST] = {
1402 .help = "specify upper bound to establish a range",
1403 .call = parse_vc_spec,
1405 [ITEM_PARAM_MASK] = {
1407 .help = "specify bit-mask with relevant bits set to one",
1408 .call = parse_vc_spec,
1410 [ITEM_PARAM_PREFIX] = {
1412 .help = "generate bit-mask from a prefix length",
1413 .call = parse_vc_spec,
1417 .help = "specify next pattern item",
1418 .next = NEXT(next_item),
1422 .help = "end list of pattern items",
1423 .priv = PRIV_ITEM(END, 0),
1424 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1429 .help = "no-op pattern item",
1430 .priv = PRIV_ITEM(VOID, 0),
1431 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1436 .help = "perform actions when pattern does not match",
1437 .priv = PRIV_ITEM(INVERT, 0),
1438 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1443 .help = "match any protocol for the current layer",
1444 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1445 .next = NEXT(item_any),
1450 .help = "number of layers covered",
1451 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1452 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1456 .help = "match traffic from/to the physical function",
1457 .priv = PRIV_ITEM(PF, 0),
1458 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1463 .help = "match traffic from/to a virtual function ID",
1464 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1465 .next = NEXT(item_vf),
1471 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1472 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1476 .help = "match traffic from/to a specific physical port",
1477 .priv = PRIV_ITEM(PHY_PORT,
1478 sizeof(struct rte_flow_item_phy_port)),
1479 .next = NEXT(item_phy_port),
1482 [ITEM_PHY_PORT_INDEX] = {
1484 .help = "physical port index",
1485 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1486 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1490 .help = "match traffic from/to a given DPDK port ID",
1491 .priv = PRIV_ITEM(PORT_ID,
1492 sizeof(struct rte_flow_item_port_id)),
1493 .next = NEXT(item_port_id),
1496 [ITEM_PORT_ID_ID] = {
1498 .help = "DPDK port ID",
1499 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1500 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1504 .help = "match traffic against value set in previously matched rule",
1505 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1506 .next = NEXT(item_mark),
1511 .help = "Integer value to match against",
1512 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1513 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1517 .help = "match an arbitrary byte string",
1518 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1519 .next = NEXT(item_raw),
1522 [ITEM_RAW_RELATIVE] = {
1524 .help = "look for pattern after the previous item",
1525 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1526 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1529 [ITEM_RAW_SEARCH] = {
1531 .help = "search pattern from offset (see also limit)",
1532 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1533 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1536 [ITEM_RAW_OFFSET] = {
1538 .help = "absolute or relative offset for pattern",
1539 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1540 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1542 [ITEM_RAW_LIMIT] = {
1544 .help = "search area limit for start of pattern",
1545 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1546 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1548 [ITEM_RAW_PATTERN] = {
1550 .help = "byte string to look for",
1551 .next = NEXT(item_raw,
1553 NEXT_ENTRY(ITEM_PARAM_IS,
1556 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1557 ARGS_ENTRY(struct rte_flow_item_raw, length),
1558 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1559 ITEM_RAW_PATTERN_SIZE)),
1563 .help = "match Ethernet header",
1564 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1565 .next = NEXT(item_eth),
1570 .help = "destination MAC",
1571 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1572 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1576 .help = "source MAC",
1577 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1578 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1582 .help = "EtherType",
1583 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1584 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1588 .help = "match 802.1Q/ad VLAN tag",
1589 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1590 .next = NEXT(item_vlan),
1595 .help = "tag control information",
1596 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1597 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1601 .help = "priority code point",
1602 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1603 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1608 .help = "drop eligible indicator",
1609 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1610 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1615 .help = "VLAN identifier",
1616 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1617 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1620 [ITEM_VLAN_INNER_TYPE] = {
1621 .name = "inner_type",
1622 .help = "inner EtherType",
1623 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1624 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1629 .help = "match IPv4 header",
1630 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1631 .next = NEXT(item_ipv4),
1636 .help = "type of service",
1637 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1638 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1639 hdr.type_of_service)),
1643 .help = "time to live",
1644 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1645 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1648 [ITEM_IPV4_PROTO] = {
1650 .help = "next protocol ID",
1651 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1652 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1653 hdr.next_proto_id)),
1657 .help = "source address",
1658 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1659 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1664 .help = "destination address",
1665 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1666 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1671 .help = "match IPv6 header",
1672 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1673 .next = NEXT(item_ipv6),
1678 .help = "traffic class",
1679 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1680 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1682 "\x0f\xf0\x00\x00")),
1684 [ITEM_IPV6_FLOW] = {
1686 .help = "flow label",
1687 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1688 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1690 "\x00\x0f\xff\xff")),
1692 [ITEM_IPV6_PROTO] = {
1694 .help = "protocol (next header)",
1695 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1696 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1701 .help = "hop limit",
1702 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1703 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1708 .help = "source address",
1709 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1710 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1715 .help = "destination address",
1716 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1717 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1722 .help = "match ICMP header",
1723 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1724 .next = NEXT(item_icmp),
1727 [ITEM_ICMP_TYPE] = {
1729 .help = "ICMP packet type",
1730 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1731 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1734 [ITEM_ICMP_CODE] = {
1736 .help = "ICMP packet code",
1737 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1738 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1743 .help = "match UDP header",
1744 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1745 .next = NEXT(item_udp),
1750 .help = "UDP source port",
1751 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1752 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1757 .help = "UDP destination port",
1758 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1759 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1764 .help = "match TCP header",
1765 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1766 .next = NEXT(item_tcp),
1771 .help = "TCP source port",
1772 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1773 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1778 .help = "TCP destination port",
1779 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1780 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1783 [ITEM_TCP_FLAGS] = {
1785 .help = "TCP flags",
1786 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1787 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1792 .help = "match SCTP header",
1793 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1794 .next = NEXT(item_sctp),
1799 .help = "SCTP source port",
1800 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1801 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1806 .help = "SCTP destination port",
1807 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1808 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1813 .help = "validation tag",
1814 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1815 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1818 [ITEM_SCTP_CKSUM] = {
1821 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1822 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1827 .help = "match VXLAN header",
1828 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1829 .next = NEXT(item_vxlan),
1832 [ITEM_VXLAN_VNI] = {
1834 .help = "VXLAN identifier",
1835 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1836 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1840 .help = "match E-Tag header",
1841 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1842 .next = NEXT(item_e_tag),
1845 [ITEM_E_TAG_GRP_ECID_B] = {
1846 .name = "grp_ecid_b",
1847 .help = "GRP and E-CID base",
1848 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1849 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1855 .help = "match NVGRE header",
1856 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1857 .next = NEXT(item_nvgre),
1860 [ITEM_NVGRE_TNI] = {
1862 .help = "virtual subnet ID",
1863 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1864 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1868 .help = "match MPLS header",
1869 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1870 .next = NEXT(item_mpls),
1873 [ITEM_MPLS_LABEL] = {
1875 .help = "MPLS label",
1876 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1877 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1883 .help = "match GRE header",
1884 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1885 .next = NEXT(item_gre),
1888 [ITEM_GRE_PROTO] = {
1890 .help = "GRE protocol type",
1891 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1892 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1897 .help = "fuzzy pattern match, expect faster than default",
1898 .priv = PRIV_ITEM(FUZZY,
1899 sizeof(struct rte_flow_item_fuzzy)),
1900 .next = NEXT(item_fuzzy),
1903 [ITEM_FUZZY_THRESH] = {
1905 .help = "match accuracy threshold",
1906 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1907 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1912 .help = "match GTP header",
1913 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1914 .next = NEXT(item_gtp),
1919 .help = "tunnel endpoint identifier",
1920 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1921 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1925 .help = "match GTP header",
1926 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1927 .next = NEXT(item_gtp),
1932 .help = "match GTP header",
1933 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1934 .next = NEXT(item_gtp),
1939 .help = "match GENEVE header",
1940 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1941 .next = NEXT(item_geneve),
1944 [ITEM_GENEVE_VNI] = {
1946 .help = "virtual network identifier",
1947 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1948 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
1950 [ITEM_GENEVE_PROTO] = {
1952 .help = "GENEVE protocol type",
1953 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1954 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
1957 [ITEM_VXLAN_GPE] = {
1958 .name = "vxlan-gpe",
1959 .help = "match VXLAN-GPE header",
1960 .priv = PRIV_ITEM(VXLAN_GPE,
1961 sizeof(struct rte_flow_item_vxlan_gpe)),
1962 .next = NEXT(item_vxlan_gpe),
1965 [ITEM_VXLAN_GPE_VNI] = {
1967 .help = "VXLAN-GPE identifier",
1968 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
1969 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
1972 [ITEM_ARP_ETH_IPV4] = {
1973 .name = "arp_eth_ipv4",
1974 .help = "match ARP header for Ethernet/IPv4",
1975 .priv = PRIV_ITEM(ARP_ETH_IPV4,
1976 sizeof(struct rte_flow_item_arp_eth_ipv4)),
1977 .next = NEXT(item_arp_eth_ipv4),
1980 [ITEM_ARP_ETH_IPV4_SHA] = {
1982 .help = "sender hardware address",
1983 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
1985 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1988 [ITEM_ARP_ETH_IPV4_SPA] = {
1990 .help = "sender IPv4 address",
1991 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
1993 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
1996 [ITEM_ARP_ETH_IPV4_THA] = {
1998 .help = "target hardware address",
1999 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2001 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2004 [ITEM_ARP_ETH_IPV4_TPA] = {
2006 .help = "target IPv4 address",
2007 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2009 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2014 .help = "match presence of any IPv6 extension header",
2015 .priv = PRIV_ITEM(IPV6_EXT,
2016 sizeof(struct rte_flow_item_ipv6_ext)),
2017 .next = NEXT(item_ipv6_ext),
2020 [ITEM_IPV6_EXT_NEXT_HDR] = {
2022 .help = "next header",
2023 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2024 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2029 .help = "match any ICMPv6 header",
2030 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2031 .next = NEXT(item_icmp6),
2034 [ITEM_ICMP6_TYPE] = {
2036 .help = "ICMPv6 type",
2037 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2038 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2041 [ITEM_ICMP6_CODE] = {
2043 .help = "ICMPv6 code",
2044 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2045 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2048 [ITEM_ICMP6_ND_NS] = {
2049 .name = "icmp6_nd_ns",
2050 .help = "match ICMPv6 neighbor discovery solicitation",
2051 .priv = PRIV_ITEM(ICMP6_ND_NS,
2052 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2053 .next = NEXT(item_icmp6_nd_ns),
2056 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2057 .name = "target_addr",
2058 .help = "target address",
2059 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2061 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2064 [ITEM_ICMP6_ND_NA] = {
2065 .name = "icmp6_nd_na",
2066 .help = "match ICMPv6 neighbor discovery advertisement",
2067 .priv = PRIV_ITEM(ICMP6_ND_NA,
2068 sizeof(struct rte_flow_item_icmp6_nd_na)),
2069 .next = NEXT(item_icmp6_nd_na),
2072 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2073 .name = "target_addr",
2074 .help = "target address",
2075 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2077 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2080 [ITEM_ICMP6_ND_OPT] = {
2081 .name = "icmp6_nd_opt",
2082 .help = "match presence of any ICMPv6 neighbor discovery"
2084 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2085 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2086 .next = NEXT(item_icmp6_nd_opt),
2089 [ITEM_ICMP6_ND_OPT_TYPE] = {
2091 .help = "ND option type",
2092 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2094 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2097 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2098 .name = "icmp6_nd_opt_sla_eth",
2099 .help = "match ICMPv6 neighbor discovery source Ethernet"
2100 " link-layer address option",
2102 (ICMP6_ND_OPT_SLA_ETH,
2103 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2104 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2107 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2109 .help = "source Ethernet LLA",
2110 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2112 .args = ARGS(ARGS_ENTRY_HTON
2113 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2115 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2116 .name = "icmp6_nd_opt_tla_eth",
2117 .help = "match ICMPv6 neighbor discovery target Ethernet"
2118 " link-layer address option",
2120 (ICMP6_ND_OPT_TLA_ETH,
2121 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2122 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2125 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2127 .help = "target Ethernet LLA",
2128 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2130 .args = ARGS(ARGS_ENTRY_HTON
2131 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2135 .help = "match metadata header",
2136 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2137 .next = NEXT(item_meta),
2140 [ITEM_META_DATA] = {
2142 .help = "metadata value",
2143 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param_is),
2144 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2145 data, "\xff\xff\xff\xff")),
2148 /* Validate/create actions. */
2151 .help = "submit a list of associated actions",
2152 .next = NEXT(next_action),
2157 .help = "specify next action",
2158 .next = NEXT(next_action),
2162 .help = "end list of actions",
2163 .priv = PRIV_ACTION(END, 0),
2168 .help = "no-op action",
2169 .priv = PRIV_ACTION(VOID, 0),
2170 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2173 [ACTION_PASSTHRU] = {
2175 .help = "let subsequent rule process matched packets",
2176 .priv = PRIV_ACTION(PASSTHRU, 0),
2177 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2182 .help = "redirect traffic to a given group",
2183 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2184 .next = NEXT(action_jump),
2187 [ACTION_JUMP_GROUP] = {
2189 .help = "group to redirect traffic to",
2190 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2191 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2192 .call = parse_vc_conf,
2196 .help = "attach 32 bit value to packets",
2197 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2198 .next = NEXT(action_mark),
2201 [ACTION_MARK_ID] = {
2203 .help = "32 bit value to return with packets",
2204 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2205 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2206 .call = parse_vc_conf,
2210 .help = "flag packets",
2211 .priv = PRIV_ACTION(FLAG, 0),
2212 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2217 .help = "assign packets to a given queue index",
2218 .priv = PRIV_ACTION(QUEUE,
2219 sizeof(struct rte_flow_action_queue)),
2220 .next = NEXT(action_queue),
2223 [ACTION_QUEUE_INDEX] = {
2225 .help = "queue index to use",
2226 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2227 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2228 .call = parse_vc_conf,
2232 .help = "drop packets (note: passthru has priority)",
2233 .priv = PRIV_ACTION(DROP, 0),
2234 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2239 .help = "enable counters for this rule",
2240 .priv = PRIV_ACTION(COUNT,
2241 sizeof(struct rte_flow_action_count)),
2242 .next = NEXT(action_count),
2245 [ACTION_COUNT_ID] = {
2246 .name = "identifier",
2247 .help = "counter identifier to use",
2248 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2249 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2250 .call = parse_vc_conf,
2252 [ACTION_COUNT_SHARED] = {
2254 .help = "shared counter",
2255 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2256 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2258 .call = parse_vc_conf,
2262 .help = "spread packets among several queues",
2263 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2264 .next = NEXT(action_rss),
2265 .call = parse_vc_action_rss,
2267 [ACTION_RSS_FUNC] = {
2269 .help = "RSS hash function to apply",
2270 .next = NEXT(action_rss,
2271 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2272 ACTION_RSS_FUNC_TOEPLITZ,
2273 ACTION_RSS_FUNC_SIMPLE_XOR)),
2275 [ACTION_RSS_FUNC_DEFAULT] = {
2277 .help = "default hash function",
2278 .call = parse_vc_action_rss_func,
2280 [ACTION_RSS_FUNC_TOEPLITZ] = {
2282 .help = "Toeplitz hash function",
2283 .call = parse_vc_action_rss_func,
2285 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2286 .name = "simple_xor",
2287 .help = "simple XOR hash function",
2288 .call = parse_vc_action_rss_func,
2290 [ACTION_RSS_LEVEL] = {
2292 .help = "encapsulation level for \"types\"",
2293 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2294 .args = ARGS(ARGS_ENTRY_ARB
2295 (offsetof(struct action_rss_data, conf) +
2296 offsetof(struct rte_flow_action_rss, level),
2297 sizeof(((struct rte_flow_action_rss *)0)->
2300 [ACTION_RSS_TYPES] = {
2302 .help = "specific RSS hash types",
2303 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2305 [ACTION_RSS_TYPE] = {
2307 .help = "RSS hash type",
2308 .call = parse_vc_action_rss_type,
2309 .comp = comp_vc_action_rss_type,
2311 [ACTION_RSS_KEY] = {
2313 .help = "RSS hash key",
2314 .next = NEXT(action_rss, NEXT_ENTRY(STRING)),
2315 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2317 (offsetof(struct action_rss_data, conf) +
2318 offsetof(struct rte_flow_action_rss, key_len),
2319 sizeof(((struct rte_flow_action_rss *)0)->
2321 ARGS_ENTRY(struct action_rss_data, key)),
2323 [ACTION_RSS_KEY_LEN] = {
2325 .help = "RSS hash key length in bytes",
2326 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2327 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2328 (offsetof(struct action_rss_data, conf) +
2329 offsetof(struct rte_flow_action_rss, key_len),
2330 sizeof(((struct rte_flow_action_rss *)0)->
2333 RSS_HASH_KEY_LENGTH)),
2335 [ACTION_RSS_QUEUES] = {
2337 .help = "queue indices to use",
2338 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2339 .call = parse_vc_conf,
2341 [ACTION_RSS_QUEUE] = {
2343 .help = "queue index",
2344 .call = parse_vc_action_rss_queue,
2345 .comp = comp_vc_action_rss_queue,
2349 .help = "direct traffic to physical function",
2350 .priv = PRIV_ACTION(PF, 0),
2351 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2356 .help = "direct traffic to a virtual function ID",
2357 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2358 .next = NEXT(action_vf),
2361 [ACTION_VF_ORIGINAL] = {
2363 .help = "use original VF ID if possible",
2364 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2365 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2367 .call = parse_vc_conf,
2372 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2373 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2374 .call = parse_vc_conf,
2376 [ACTION_PHY_PORT] = {
2378 .help = "direct packets to physical port index",
2379 .priv = PRIV_ACTION(PHY_PORT,
2380 sizeof(struct rte_flow_action_phy_port)),
2381 .next = NEXT(action_phy_port),
2384 [ACTION_PHY_PORT_ORIGINAL] = {
2386 .help = "use original port index if possible",
2387 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2388 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2390 .call = parse_vc_conf,
2392 [ACTION_PHY_PORT_INDEX] = {
2394 .help = "physical port index",
2395 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2396 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2398 .call = parse_vc_conf,
2400 [ACTION_PORT_ID] = {
2402 .help = "direct matching traffic to a given DPDK port ID",
2403 .priv = PRIV_ACTION(PORT_ID,
2404 sizeof(struct rte_flow_action_port_id)),
2405 .next = NEXT(action_port_id),
2408 [ACTION_PORT_ID_ORIGINAL] = {
2410 .help = "use original DPDK port ID if possible",
2411 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2412 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2414 .call = parse_vc_conf,
2416 [ACTION_PORT_ID_ID] = {
2418 .help = "DPDK port ID",
2419 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2420 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2421 .call = parse_vc_conf,
2425 .help = "meter the directed packets at given id",
2426 .priv = PRIV_ACTION(METER,
2427 sizeof(struct rte_flow_action_meter)),
2428 .next = NEXT(action_meter),
2431 [ACTION_METER_ID] = {
2433 .help = "meter id to use",
2434 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2435 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2436 .call = parse_vc_conf,
2438 [ACTION_OF_SET_MPLS_TTL] = {
2439 .name = "of_set_mpls_ttl",
2440 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2443 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2444 .next = NEXT(action_of_set_mpls_ttl),
2447 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2450 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2451 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2453 .call = parse_vc_conf,
2455 [ACTION_OF_DEC_MPLS_TTL] = {
2456 .name = "of_dec_mpls_ttl",
2457 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2458 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2459 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2462 [ACTION_OF_SET_NW_TTL] = {
2463 .name = "of_set_nw_ttl",
2464 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2467 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2468 .next = NEXT(action_of_set_nw_ttl),
2471 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2474 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2475 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2477 .call = parse_vc_conf,
2479 [ACTION_OF_DEC_NW_TTL] = {
2480 .name = "of_dec_nw_ttl",
2481 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2482 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2483 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2486 [ACTION_OF_COPY_TTL_OUT] = {
2487 .name = "of_copy_ttl_out",
2488 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2489 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2490 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2493 [ACTION_OF_COPY_TTL_IN] = {
2494 .name = "of_copy_ttl_in",
2495 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2496 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2497 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2500 [ACTION_OF_POP_VLAN] = {
2501 .name = "of_pop_vlan",
2502 .help = "OpenFlow's OFPAT_POP_VLAN",
2503 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2504 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2507 [ACTION_OF_PUSH_VLAN] = {
2508 .name = "of_push_vlan",
2509 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2512 sizeof(struct rte_flow_action_of_push_vlan)),
2513 .next = NEXT(action_of_push_vlan),
2516 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2517 .name = "ethertype",
2518 .help = "EtherType",
2519 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2520 .args = ARGS(ARGS_ENTRY_HTON
2521 (struct rte_flow_action_of_push_vlan,
2523 .call = parse_vc_conf,
2525 [ACTION_OF_SET_VLAN_VID] = {
2526 .name = "of_set_vlan_vid",
2527 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2530 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2531 .next = NEXT(action_of_set_vlan_vid),
2534 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2537 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2538 .args = ARGS(ARGS_ENTRY_HTON
2539 (struct rte_flow_action_of_set_vlan_vid,
2541 .call = parse_vc_conf,
2543 [ACTION_OF_SET_VLAN_PCP] = {
2544 .name = "of_set_vlan_pcp",
2545 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2548 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2549 .next = NEXT(action_of_set_vlan_pcp),
2552 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2554 .help = "VLAN priority",
2555 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2556 .args = ARGS(ARGS_ENTRY_HTON
2557 (struct rte_flow_action_of_set_vlan_pcp,
2559 .call = parse_vc_conf,
2561 [ACTION_OF_POP_MPLS] = {
2562 .name = "of_pop_mpls",
2563 .help = "OpenFlow's OFPAT_POP_MPLS",
2564 .priv = PRIV_ACTION(OF_POP_MPLS,
2565 sizeof(struct rte_flow_action_of_pop_mpls)),
2566 .next = NEXT(action_of_pop_mpls),
2569 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2570 .name = "ethertype",
2571 .help = "EtherType",
2572 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2573 .args = ARGS(ARGS_ENTRY_HTON
2574 (struct rte_flow_action_of_pop_mpls,
2576 .call = parse_vc_conf,
2578 [ACTION_OF_PUSH_MPLS] = {
2579 .name = "of_push_mpls",
2580 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2583 sizeof(struct rte_flow_action_of_push_mpls)),
2584 .next = NEXT(action_of_push_mpls),
2587 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2588 .name = "ethertype",
2589 .help = "EtherType",
2590 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2591 .args = ARGS(ARGS_ENTRY_HTON
2592 (struct rte_flow_action_of_push_mpls,
2594 .call = parse_vc_conf,
2596 [ACTION_VXLAN_ENCAP] = {
2597 .name = "vxlan_encap",
2598 .help = "VXLAN encapsulation, uses configuration set by \"set"
2600 .priv = PRIV_ACTION(VXLAN_ENCAP,
2601 sizeof(struct action_vxlan_encap_data)),
2602 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2603 .call = parse_vc_action_vxlan_encap,
2605 [ACTION_VXLAN_DECAP] = {
2606 .name = "vxlan_decap",
2607 .help = "Performs a decapsulation action by stripping all"
2608 " headers of the VXLAN tunnel network overlay from the"
2610 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2611 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2614 [ACTION_NVGRE_ENCAP] = {
2615 .name = "nvgre_encap",
2616 .help = "NVGRE encapsulation, uses configuration set by \"set"
2618 .priv = PRIV_ACTION(NVGRE_ENCAP,
2619 sizeof(struct action_nvgre_encap_data)),
2620 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2621 .call = parse_vc_action_nvgre_encap,
2623 [ACTION_NVGRE_DECAP] = {
2624 .name = "nvgre_decap",
2625 .help = "Performs a decapsulation action by stripping all"
2626 " headers of the NVGRE tunnel network overlay from the"
2628 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2629 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2632 [ACTION_L2_ENCAP] = {
2634 .help = "l2 encap, uses configuration set by"
2635 " \"set l2_encap\"",
2636 .priv = PRIV_ACTION(RAW_ENCAP,
2637 sizeof(struct action_raw_encap_data)),
2638 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2639 .call = parse_vc_action_l2_encap,
2641 [ACTION_L2_DECAP] = {
2643 .help = "l2 decap, uses configuration set by"
2644 " \"set l2_decap\"",
2645 .priv = PRIV_ACTION(RAW_DECAP,
2646 sizeof(struct action_raw_decap_data)),
2647 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2648 .call = parse_vc_action_l2_decap,
2650 [ACTION_MPLSOGRE_ENCAP] = {
2651 .name = "mplsogre_encap",
2652 .help = "mplsogre encapsulation, uses configuration set by"
2653 " \"set mplsogre_encap\"",
2654 .priv = PRIV_ACTION(RAW_ENCAP,
2655 sizeof(struct action_raw_encap_data)),
2656 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2657 .call = parse_vc_action_mplsogre_encap,
2659 [ACTION_MPLSOGRE_DECAP] = {
2660 .name = "mplsogre_decap",
2661 .help = "mplsogre decapsulation, uses configuration set by"
2662 " \"set mplsogre_decap\"",
2663 .priv = PRIV_ACTION(RAW_DECAP,
2664 sizeof(struct action_raw_decap_data)),
2665 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2666 .call = parse_vc_action_mplsogre_decap,
2668 [ACTION_MPLSOUDP_ENCAP] = {
2669 .name = "mplsoudp_encap",
2670 .help = "mplsoudp encapsulation, uses configuration set by"
2671 " \"set mplsoudp_encap\"",
2672 .priv = PRIV_ACTION(RAW_ENCAP,
2673 sizeof(struct action_raw_encap_data)),
2674 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2675 .call = parse_vc_action_mplsoudp_encap,
2677 [ACTION_MPLSOUDP_DECAP] = {
2678 .name = "mplsoudp_decap",
2679 .help = "mplsoudp decapsulation, uses configuration set by"
2680 " \"set mplsoudp_decap\"",
2681 .priv = PRIV_ACTION(RAW_DECAP,
2682 sizeof(struct action_raw_decap_data)),
2683 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2684 .call = parse_vc_action_mplsoudp_decap,
2686 [ACTION_SET_IPV4_SRC] = {
2687 .name = "set_ipv4_src",
2688 .help = "Set a new IPv4 source address in the outermost"
2690 .priv = PRIV_ACTION(SET_IPV4_SRC,
2691 sizeof(struct rte_flow_action_set_ipv4)),
2692 .next = NEXT(action_set_ipv4_src),
2695 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2696 .name = "ipv4_addr",
2697 .help = "new IPv4 source address to set",
2698 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2699 .args = ARGS(ARGS_ENTRY_HTON
2700 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2701 .call = parse_vc_conf,
2703 [ACTION_SET_IPV4_DST] = {
2704 .name = "set_ipv4_dst",
2705 .help = "Set a new IPv4 destination address in the outermost"
2707 .priv = PRIV_ACTION(SET_IPV4_DST,
2708 sizeof(struct rte_flow_action_set_ipv4)),
2709 .next = NEXT(action_set_ipv4_dst),
2712 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2713 .name = "ipv4_addr",
2714 .help = "new IPv4 destination address to set",
2715 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2716 .args = ARGS(ARGS_ENTRY_HTON
2717 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2718 .call = parse_vc_conf,
2720 [ACTION_SET_IPV6_SRC] = {
2721 .name = "set_ipv6_src",
2722 .help = "Set a new IPv6 source address in the outermost"
2724 .priv = PRIV_ACTION(SET_IPV6_SRC,
2725 sizeof(struct rte_flow_action_set_ipv6)),
2726 .next = NEXT(action_set_ipv6_src),
2729 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2730 .name = "ipv6_addr",
2731 .help = "new IPv6 source address to set",
2732 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2733 .args = ARGS(ARGS_ENTRY_HTON
2734 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2735 .call = parse_vc_conf,
2737 [ACTION_SET_IPV6_DST] = {
2738 .name = "set_ipv6_dst",
2739 .help = "Set a new IPv6 destination address in the outermost"
2741 .priv = PRIV_ACTION(SET_IPV6_DST,
2742 sizeof(struct rte_flow_action_set_ipv6)),
2743 .next = NEXT(action_set_ipv6_dst),
2746 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2747 .name = "ipv6_addr",
2748 .help = "new IPv6 destination address to set",
2749 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2750 .args = ARGS(ARGS_ENTRY_HTON
2751 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2752 .call = parse_vc_conf,
2754 [ACTION_SET_TP_SRC] = {
2755 .name = "set_tp_src",
2756 .help = "set a new source port number in the outermost"
2758 .priv = PRIV_ACTION(SET_TP_SRC,
2759 sizeof(struct rte_flow_action_set_tp)),
2760 .next = NEXT(action_set_tp_src),
2763 [ACTION_SET_TP_SRC_TP_SRC] = {
2765 .help = "new source port number to set",
2766 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2767 .args = ARGS(ARGS_ENTRY_HTON
2768 (struct rte_flow_action_set_tp, port)),
2769 .call = parse_vc_conf,
2771 [ACTION_SET_TP_DST] = {
2772 .name = "set_tp_dst",
2773 .help = "set a new destination port number in the outermost"
2775 .priv = PRIV_ACTION(SET_TP_DST,
2776 sizeof(struct rte_flow_action_set_tp)),
2777 .next = NEXT(action_set_tp_dst),
2780 [ACTION_SET_TP_DST_TP_DST] = {
2782 .help = "new destination port number to set",
2783 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2784 .args = ARGS(ARGS_ENTRY_HTON
2785 (struct rte_flow_action_set_tp, port)),
2786 .call = parse_vc_conf,
2788 [ACTION_MAC_SWAP] = {
2790 .help = "Swap the source and destination MAC addresses"
2791 " in the outermost Ethernet header",
2792 .priv = PRIV_ACTION(MAC_SWAP, 0),
2793 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2796 [ACTION_DEC_TTL] = {
2798 .help = "decrease network TTL if available",
2799 .priv = PRIV_ACTION(DEC_TTL, 0),
2800 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2803 [ACTION_SET_TTL] = {
2805 .help = "set ttl value",
2806 .priv = PRIV_ACTION(SET_TTL,
2807 sizeof(struct rte_flow_action_set_ttl)),
2808 .next = NEXT(action_set_ttl),
2811 [ACTION_SET_TTL_TTL] = {
2812 .name = "ttl_value",
2813 .help = "new ttl value to set",
2814 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
2815 .args = ARGS(ARGS_ENTRY_HTON
2816 (struct rte_flow_action_set_ttl, ttl_value)),
2817 .call = parse_vc_conf,
2819 [ACTION_SET_MAC_SRC] = {
2820 .name = "set_mac_src",
2821 .help = "set source mac address",
2822 .priv = PRIV_ACTION(SET_MAC_SRC,
2823 sizeof(struct rte_flow_action_set_mac)),
2824 .next = NEXT(action_set_mac_src),
2827 [ACTION_SET_MAC_SRC_MAC_SRC] = {
2829 .help = "new source mac address",
2830 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
2831 .args = ARGS(ARGS_ENTRY_HTON
2832 (struct rte_flow_action_set_mac, mac_addr)),
2833 .call = parse_vc_conf,
2835 [ACTION_SET_MAC_DST] = {
2836 .name = "set_mac_dst",
2837 .help = "set destination mac address",
2838 .priv = PRIV_ACTION(SET_MAC_DST,
2839 sizeof(struct rte_flow_action_set_mac)),
2840 .next = NEXT(action_set_mac_dst),
2843 [ACTION_SET_MAC_DST_MAC_DST] = {
2845 .help = "new destination mac address to set",
2846 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
2847 .args = ARGS(ARGS_ENTRY_HTON
2848 (struct rte_flow_action_set_mac, mac_addr)),
2849 .call = parse_vc_conf,
2853 /** Remove and return last entry from argument stack. */
2854 static const struct arg *
2855 pop_args(struct context *ctx)
2857 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2860 /** Add entry on top of the argument stack. */
2862 push_args(struct context *ctx, const struct arg *arg)
2864 if (ctx->args_num == CTX_STACK_SIZE)
2866 ctx->args[ctx->args_num++] = arg;
2870 /** Spread value into buffer according to bit-mask. */
2872 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2874 uint32_t i = arg->size;
2882 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2891 unsigned int shift = 0;
2892 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
2894 for (shift = 0; arg->mask[i] >> shift; ++shift) {
2895 if (!(arg->mask[i] & (1 << shift)))
2900 *buf &= ~(1 << shift);
2901 *buf |= (val & 1) << shift;
2909 /** Compare a string with a partial one of a given length. */
2911 strcmp_partial(const char *full, const char *partial, size_t partial_len)
2913 int r = strncmp(full, partial, partial_len);
2917 if (strlen(full) <= partial_len)
2919 return full[partial_len];
2923 * Parse a prefix length and generate a bit-mask.
2925 * Last argument (ctx->args) is retrieved to determine mask size, storage
2926 * location and whether the result must use network byte ordering.
2929 parse_prefix(struct context *ctx, const struct token *token,
2930 const char *str, unsigned int len,
2931 void *buf, unsigned int size)
2933 const struct arg *arg = pop_args(ctx);
2934 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
2941 /* Argument is expected. */
2945 u = strtoumax(str, &end, 0);
2946 if (errno || (size_t)(end - str) != len)
2951 extra = arg_entry_bf_fill(NULL, 0, arg);
2960 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
2961 !arg_entry_bf_fill(ctx->objmask, -1, arg))
2968 if (bytes > size || bytes + !!extra > size)
2972 buf = (uint8_t *)ctx->object + arg->offset;
2973 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2975 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
2976 memset(buf, 0x00, size - bytes);
2978 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
2982 memset(buf, 0xff, bytes);
2983 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
2985 ((uint8_t *)buf)[bytes] = conv[extra];
2988 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
2991 push_args(ctx, arg);
2995 /** Default parsing function for token name matching. */
2997 parse_default(struct context *ctx, const struct token *token,
2998 const char *str, unsigned int len,
2999 void *buf, unsigned int size)
3004 if (strcmp_partial(token->name, str, len))
3009 /** Parse flow command, initialize output buffer for subsequent tokens. */
3011 parse_init(struct context *ctx, const struct token *token,
3012 const char *str, unsigned int len,
3013 void *buf, unsigned int size)
3015 struct buffer *out = buf;
3017 /* Token name must match. */
3018 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3020 /* Nothing else to do if there is no buffer. */
3023 /* Make sure buffer is large enough. */
3024 if (size < sizeof(*out))
3026 /* Initialize buffer. */
3027 memset(out, 0x00, sizeof(*out));
3028 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3031 ctx->objmask = NULL;
3035 /** Parse tokens for validate/create commands. */
3037 parse_vc(struct context *ctx, const struct token *token,
3038 const char *str, unsigned int len,
3039 void *buf, unsigned int size)
3041 struct buffer *out = buf;
3045 /* Token name must match. */
3046 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3048 /* Nothing else to do if there is no buffer. */
3051 if (!out->command) {
3052 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3054 if (sizeof(*out) > size)
3056 out->command = ctx->curr;
3059 ctx->objmask = NULL;
3060 out->args.vc.data = (uint8_t *)out + size;
3064 ctx->object = &out->args.vc.attr;
3065 ctx->objmask = NULL;
3066 switch (ctx->curr) {
3071 out->args.vc.attr.ingress = 1;
3074 out->args.vc.attr.egress = 1;
3077 out->args.vc.attr.transfer = 1;
3080 out->args.vc.pattern =
3081 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3083 ctx->object = out->args.vc.pattern;
3084 ctx->objmask = NULL;
3087 out->args.vc.actions =
3088 (void *)RTE_ALIGN_CEIL((uintptr_t)
3089 (out->args.vc.pattern +
3090 out->args.vc.pattern_n),
3092 ctx->object = out->args.vc.actions;
3093 ctx->objmask = NULL;
3100 if (!out->args.vc.actions) {
3101 const struct parse_item_priv *priv = token->priv;
3102 struct rte_flow_item *item =
3103 out->args.vc.pattern + out->args.vc.pattern_n;
3105 data_size = priv->size * 3; /* spec, last, mask */
3106 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3107 (out->args.vc.data - data_size),
3109 if ((uint8_t *)item + sizeof(*item) > data)
3111 *item = (struct rte_flow_item){
3114 ++out->args.vc.pattern_n;
3116 ctx->objmask = NULL;
3118 const struct parse_action_priv *priv = token->priv;
3119 struct rte_flow_action *action =
3120 out->args.vc.actions + out->args.vc.actions_n;
3122 data_size = priv->size; /* configuration */
3123 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3124 (out->args.vc.data - data_size),
3126 if ((uint8_t *)action + sizeof(*action) > data)
3128 *action = (struct rte_flow_action){
3130 .conf = data_size ? data : NULL,
3132 ++out->args.vc.actions_n;
3133 ctx->object = action;
3134 ctx->objmask = NULL;
3136 memset(data, 0, data_size);
3137 out->args.vc.data = data;
3138 ctx->objdata = data_size;
3142 /** Parse pattern item parameter type. */
3144 parse_vc_spec(struct context *ctx, const struct token *token,
3145 const char *str, unsigned int len,
3146 void *buf, unsigned int size)
3148 struct buffer *out = buf;
3149 struct rte_flow_item *item;
3155 /* Token name must match. */
3156 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3158 /* Parse parameter types. */
3159 switch (ctx->curr) {
3160 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3166 case ITEM_PARAM_SPEC:
3169 case ITEM_PARAM_LAST:
3172 case ITEM_PARAM_PREFIX:
3173 /* Modify next token to expect a prefix. */
3174 if (ctx->next_num < 2)
3176 ctx->next[ctx->next_num - 2] = prefix;
3178 case ITEM_PARAM_MASK:
3184 /* Nothing else to do if there is no buffer. */
3187 if (!out->args.vc.pattern_n)
3189 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3190 data_size = ctx->objdata / 3; /* spec, last, mask */
3191 /* Point to selected object. */
3192 ctx->object = out->args.vc.data + (data_size * index);
3194 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3195 item->mask = ctx->objmask;
3197 ctx->objmask = NULL;
3198 /* Update relevant item pointer. */
3199 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3204 /** Parse action configuration field. */
3206 parse_vc_conf(struct context *ctx, const struct token *token,
3207 const char *str, unsigned int len,
3208 void *buf, unsigned int size)
3210 struct buffer *out = buf;
3213 /* Token name must match. */
3214 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3216 /* Nothing else to do if there is no buffer. */
3219 /* Point to selected object. */
3220 ctx->object = out->args.vc.data;
3221 ctx->objmask = NULL;
3225 /** Parse RSS action. */
3227 parse_vc_action_rss(struct context *ctx, const struct token *token,
3228 const char *str, unsigned int len,
3229 void *buf, unsigned int size)
3231 struct buffer *out = buf;
3232 struct rte_flow_action *action;
3233 struct action_rss_data *action_rss_data;
3237 ret = parse_vc(ctx, token, str, len, buf, size);
3240 /* Nothing else to do if there is no buffer. */
3243 if (!out->args.vc.actions_n)
3245 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3246 /* Point to selected object. */
3247 ctx->object = out->args.vc.data;
3248 ctx->objmask = NULL;
3249 /* Set up default configuration. */
3250 action_rss_data = ctx->object;
3251 *action_rss_data = (struct action_rss_data){
3252 .conf = (struct rte_flow_action_rss){
3253 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3256 .key_len = sizeof(action_rss_data->key),
3257 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3258 .key = action_rss_data->key,
3259 .queue = action_rss_data->queue,
3261 .key = "testpmd's default RSS hash key, "
3262 "override it for better balancing",
3265 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3266 action_rss_data->queue[i] = i;
3267 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3268 ctx->port != (portid_t)RTE_PORT_ALL) {
3269 struct rte_eth_dev_info info;
3271 rte_eth_dev_info_get(ctx->port, &info);
3272 action_rss_data->conf.key_len =
3273 RTE_MIN(sizeof(action_rss_data->key),
3274 info.hash_key_size);
3276 action->conf = &action_rss_data->conf;
3281 * Parse func field for RSS action.
3283 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3284 * ACTION_RSS_FUNC_* index that called this function.
3287 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3288 const char *str, unsigned int len,
3289 void *buf, unsigned int size)
3291 struct action_rss_data *action_rss_data;
3292 enum rte_eth_hash_function func;
3296 /* Token name must match. */
3297 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3299 switch (ctx->curr) {
3300 case ACTION_RSS_FUNC_DEFAULT:
3301 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3303 case ACTION_RSS_FUNC_TOEPLITZ:
3304 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3306 case ACTION_RSS_FUNC_SIMPLE_XOR:
3307 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3314 action_rss_data = ctx->object;
3315 action_rss_data->conf.func = func;
3320 * Parse type field for RSS action.
3322 * Valid tokens are type field names and the "end" token.
3325 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3326 const char *str, unsigned int len,
3327 void *buf, unsigned int size)
3329 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3330 struct action_rss_data *action_rss_data;
3336 if (ctx->curr != ACTION_RSS_TYPE)
3338 if (!(ctx->objdata >> 16) && ctx->object) {
3339 action_rss_data = ctx->object;
3340 action_rss_data->conf.types = 0;
3342 if (!strcmp_partial("end", str, len)) {
3343 ctx->objdata &= 0xffff;
3346 for (i = 0; rss_type_table[i].str; ++i)
3347 if (!strcmp_partial(rss_type_table[i].str, str, len))
3349 if (!rss_type_table[i].str)
3351 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3353 if (ctx->next_num == RTE_DIM(ctx->next))
3355 ctx->next[ctx->next_num++] = next;
3358 action_rss_data = ctx->object;
3359 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3364 * Parse queue field for RSS action.
3366 * Valid tokens are queue indices and the "end" token.
3369 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3370 const char *str, unsigned int len,
3371 void *buf, unsigned int size)
3373 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3374 struct action_rss_data *action_rss_data;
3381 if (ctx->curr != ACTION_RSS_QUEUE)
3383 i = ctx->objdata >> 16;
3384 if (!strcmp_partial("end", str, len)) {
3385 ctx->objdata &= 0xffff;
3388 if (i >= ACTION_RSS_QUEUE_NUM)
3391 ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3392 i * sizeof(action_rss_data->queue[i]),
3393 sizeof(action_rss_data->queue[i]))))
3395 ret = parse_int(ctx, token, str, len, NULL, 0);
3401 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3403 if (ctx->next_num == RTE_DIM(ctx->next))
3405 ctx->next[ctx->next_num++] = next;
3409 action_rss_data = ctx->object;
3410 action_rss_data->conf.queue_num = i;
3411 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3415 /** Parse VXLAN encap action. */
3417 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3418 const char *str, unsigned int len,
3419 void *buf, unsigned int size)
3421 struct buffer *out = buf;
3422 struct rte_flow_action *action;
3423 struct action_vxlan_encap_data *action_vxlan_encap_data;
3426 ret = parse_vc(ctx, token, str, len, buf, size);
3429 /* Nothing else to do if there is no buffer. */
3432 if (!out->args.vc.actions_n)
3434 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3435 /* Point to selected object. */
3436 ctx->object = out->args.vc.data;
3437 ctx->objmask = NULL;
3438 /* Set up default configuration. */
3439 action_vxlan_encap_data = ctx->object;
3440 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3441 .conf = (struct rte_flow_action_vxlan_encap){
3442 .definition = action_vxlan_encap_data->items,
3446 .type = RTE_FLOW_ITEM_TYPE_ETH,
3447 .spec = &action_vxlan_encap_data->item_eth,
3448 .mask = &rte_flow_item_eth_mask,
3451 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3452 .spec = &action_vxlan_encap_data->item_vlan,
3453 .mask = &rte_flow_item_vlan_mask,
3456 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3457 .spec = &action_vxlan_encap_data->item_ipv4,
3458 .mask = &rte_flow_item_ipv4_mask,
3461 .type = RTE_FLOW_ITEM_TYPE_UDP,
3462 .spec = &action_vxlan_encap_data->item_udp,
3463 .mask = &rte_flow_item_udp_mask,
3466 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3467 .spec = &action_vxlan_encap_data->item_vxlan,
3468 .mask = &rte_flow_item_vxlan_mask,
3471 .type = RTE_FLOW_ITEM_TYPE_END,
3476 .tci = vxlan_encap_conf.vlan_tci,
3480 .src_addr = vxlan_encap_conf.ipv4_src,
3481 .dst_addr = vxlan_encap_conf.ipv4_dst,
3484 .src_port = vxlan_encap_conf.udp_src,
3485 .dst_port = vxlan_encap_conf.udp_dst,
3487 .item_vxlan.flags = 0,
3489 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3490 vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN);
3491 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3492 vxlan_encap_conf.eth_src, ETHER_ADDR_LEN);
3493 if (!vxlan_encap_conf.select_ipv4) {
3494 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3495 &vxlan_encap_conf.ipv6_src,
3496 sizeof(vxlan_encap_conf.ipv6_src));
3497 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3498 &vxlan_encap_conf.ipv6_dst,
3499 sizeof(vxlan_encap_conf.ipv6_dst));
3500 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3501 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3502 .spec = &action_vxlan_encap_data->item_ipv6,
3503 .mask = &rte_flow_item_ipv6_mask,
3506 if (!vxlan_encap_conf.select_vlan)
3507 action_vxlan_encap_data->items[1].type =
3508 RTE_FLOW_ITEM_TYPE_VOID;
3509 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3510 RTE_DIM(vxlan_encap_conf.vni));
3511 action->conf = &action_vxlan_encap_data->conf;
3515 /** Parse NVGRE encap action. */
3517 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3518 const char *str, unsigned int len,
3519 void *buf, unsigned int size)
3521 struct buffer *out = buf;
3522 struct rte_flow_action *action;
3523 struct action_nvgre_encap_data *action_nvgre_encap_data;
3526 ret = parse_vc(ctx, token, str, len, buf, size);
3529 /* Nothing else to do if there is no buffer. */
3532 if (!out->args.vc.actions_n)
3534 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3535 /* Point to selected object. */
3536 ctx->object = out->args.vc.data;
3537 ctx->objmask = NULL;
3538 /* Set up default configuration. */
3539 action_nvgre_encap_data = ctx->object;
3540 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3541 .conf = (struct rte_flow_action_nvgre_encap){
3542 .definition = action_nvgre_encap_data->items,
3546 .type = RTE_FLOW_ITEM_TYPE_ETH,
3547 .spec = &action_nvgre_encap_data->item_eth,
3548 .mask = &rte_flow_item_eth_mask,
3551 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3552 .spec = &action_nvgre_encap_data->item_vlan,
3553 .mask = &rte_flow_item_vlan_mask,
3556 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3557 .spec = &action_nvgre_encap_data->item_ipv4,
3558 .mask = &rte_flow_item_ipv4_mask,
3561 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3562 .spec = &action_nvgre_encap_data->item_nvgre,
3563 .mask = &rte_flow_item_nvgre_mask,
3566 .type = RTE_FLOW_ITEM_TYPE_END,
3571 .tci = nvgre_encap_conf.vlan_tci,
3575 .src_addr = nvgre_encap_conf.ipv4_src,
3576 .dst_addr = nvgre_encap_conf.ipv4_dst,
3578 .item_nvgre.flow_id = 0,
3580 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3581 nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3582 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3583 nvgre_encap_conf.eth_src, ETHER_ADDR_LEN);
3584 if (!nvgre_encap_conf.select_ipv4) {
3585 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3586 &nvgre_encap_conf.ipv6_src,
3587 sizeof(nvgre_encap_conf.ipv6_src));
3588 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3589 &nvgre_encap_conf.ipv6_dst,
3590 sizeof(nvgre_encap_conf.ipv6_dst));
3591 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3592 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3593 .spec = &action_nvgre_encap_data->item_ipv6,
3594 .mask = &rte_flow_item_ipv6_mask,
3597 if (!nvgre_encap_conf.select_vlan)
3598 action_nvgre_encap_data->items[1].type =
3599 RTE_FLOW_ITEM_TYPE_VOID;
3600 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3601 RTE_DIM(nvgre_encap_conf.tni));
3602 action->conf = &action_nvgre_encap_data->conf;
3606 /** Parse l2 encap action. */
3608 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3609 const char *str, unsigned int len,
3610 void *buf, unsigned int size)
3612 struct buffer *out = buf;
3613 struct rte_flow_action *action;
3614 struct action_raw_encap_data *action_encap_data;
3615 struct rte_flow_item_eth eth = { .type = 0, };
3616 struct rte_flow_item_vlan vlan = {
3617 .tci = mplsoudp_encap_conf.vlan_tci,
3623 ret = parse_vc(ctx, token, str, len, buf, size);
3626 /* Nothing else to do if there is no buffer. */
3629 if (!out->args.vc.actions_n)
3631 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3632 /* Point to selected object. */
3633 ctx->object = out->args.vc.data;
3634 ctx->objmask = NULL;
3635 /* Copy the headers to the buffer. */
3636 action_encap_data = ctx->object;
3637 *action_encap_data = (struct action_raw_encap_data) {
3638 .conf = (struct rte_flow_action_raw_encap){
3639 .data = action_encap_data->data,
3643 header = action_encap_data->data;
3644 if (l2_encap_conf.select_vlan)
3645 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3646 else if (l2_encap_conf.select_ipv4)
3647 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3649 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3650 memcpy(eth.dst.addr_bytes,
3651 l2_encap_conf.eth_dst, ETHER_ADDR_LEN);
3652 memcpy(eth.src.addr_bytes,
3653 l2_encap_conf.eth_src, ETHER_ADDR_LEN);
3654 memcpy(header, ð, sizeof(eth));
3655 header += sizeof(eth);
3656 if (l2_encap_conf.select_vlan) {
3657 if (l2_encap_conf.select_ipv4)
3658 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3660 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3661 memcpy(header, &vlan, sizeof(vlan));
3662 header += sizeof(vlan);
3664 action_encap_data->conf.size = header -
3665 action_encap_data->data;
3666 action->conf = &action_encap_data->conf;
3670 /** Parse l2 decap action. */
3672 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3673 const char *str, unsigned int len,
3674 void *buf, unsigned int size)
3676 struct buffer *out = buf;
3677 struct rte_flow_action *action;
3678 struct action_raw_decap_data *action_decap_data;
3679 struct rte_flow_item_eth eth = { .type = 0, };
3680 struct rte_flow_item_vlan vlan = {
3681 .tci = mplsoudp_encap_conf.vlan_tci,
3687 ret = parse_vc(ctx, token, str, len, buf, size);
3690 /* Nothing else to do if there is no buffer. */
3693 if (!out->args.vc.actions_n)
3695 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3696 /* Point to selected object. */
3697 ctx->object = out->args.vc.data;
3698 ctx->objmask = NULL;
3699 /* Copy the headers to the buffer. */
3700 action_decap_data = ctx->object;
3701 *action_decap_data = (struct action_raw_decap_data) {
3702 .conf = (struct rte_flow_action_raw_decap){
3703 .data = action_decap_data->data,
3707 header = action_decap_data->data;
3708 if (l2_decap_conf.select_vlan)
3709 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3710 memcpy(header, ð, sizeof(eth));
3711 header += sizeof(eth);
3712 if (l2_decap_conf.select_vlan) {
3713 memcpy(header, &vlan, sizeof(vlan));
3714 header += sizeof(vlan);
3716 action_decap_data->conf.size = header -
3717 action_decap_data->data;
3718 action->conf = &action_decap_data->conf;
3722 #define ETHER_TYPE_MPLS_UNICAST 0x8847
3724 /** Parse MPLSOGRE encap action. */
3726 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
3727 const char *str, unsigned int len,
3728 void *buf, unsigned int size)
3730 struct buffer *out = buf;
3731 struct rte_flow_action *action;
3732 struct action_raw_encap_data *action_encap_data;
3733 struct rte_flow_item_eth eth = { .type = 0, };
3734 struct rte_flow_item_vlan vlan = {
3735 .tci = mplsogre_encap_conf.vlan_tci,
3738 struct rte_flow_item_ipv4 ipv4 = {
3740 .src_addr = mplsogre_encap_conf.ipv4_src,
3741 .dst_addr = mplsogre_encap_conf.ipv4_dst,
3742 .next_proto_id = IPPROTO_GRE,
3745 struct rte_flow_item_ipv6 ipv6 = {
3747 .proto = IPPROTO_GRE,
3750 struct rte_flow_item_gre gre = {
3751 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3753 struct rte_flow_item_mpls mpls;
3757 ret = parse_vc(ctx, token, str, len, buf, size);
3760 /* Nothing else to do if there is no buffer. */
3763 if (!out->args.vc.actions_n)
3765 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3766 /* Point to selected object. */
3767 ctx->object = out->args.vc.data;
3768 ctx->objmask = NULL;
3769 /* Copy the headers to the buffer. */
3770 action_encap_data = ctx->object;
3771 *action_encap_data = (struct action_raw_encap_data) {
3772 .conf = (struct rte_flow_action_raw_encap){
3773 .data = action_encap_data->data,
3778 header = action_encap_data->data;
3779 if (mplsogre_encap_conf.select_vlan)
3780 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3781 else if (mplsogre_encap_conf.select_ipv4)
3782 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3784 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3785 memcpy(eth.dst.addr_bytes,
3786 mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3787 memcpy(eth.src.addr_bytes,
3788 mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
3789 memcpy(header, ð, sizeof(eth));
3790 header += sizeof(eth);
3791 if (mplsogre_encap_conf.select_vlan) {
3792 if (mplsogre_encap_conf.select_ipv4)
3793 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3795 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3796 memcpy(header, &vlan, sizeof(vlan));
3797 header += sizeof(vlan);
3799 if (mplsogre_encap_conf.select_ipv4) {
3800 memcpy(header, &ipv4, sizeof(ipv4));
3801 header += sizeof(ipv4);
3803 memcpy(&ipv6.hdr.src_addr,
3804 &mplsogre_encap_conf.ipv6_src,
3805 sizeof(mplsogre_encap_conf.ipv6_src));
3806 memcpy(&ipv6.hdr.dst_addr,
3807 &mplsogre_encap_conf.ipv6_dst,
3808 sizeof(mplsogre_encap_conf.ipv6_dst));
3809 memcpy(header, &ipv6, sizeof(ipv6));
3810 header += sizeof(ipv6);
3812 memcpy(header, &gre, sizeof(gre));
3813 header += sizeof(gre);
3814 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
3815 RTE_DIM(mplsogre_encap_conf.label));
3816 memcpy(header, &mpls, sizeof(mpls));
3817 header += sizeof(mpls);
3818 action_encap_data->conf.size = header -
3819 action_encap_data->data;
3820 action->conf = &action_encap_data->conf;
3824 /** Parse MPLSOGRE decap action. */
3826 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
3827 const char *str, unsigned int len,
3828 void *buf, unsigned int size)
3830 struct buffer *out = buf;
3831 struct rte_flow_action *action;
3832 struct action_raw_decap_data *action_decap_data;
3833 struct rte_flow_item_eth eth = { .type = 0, };
3834 struct rte_flow_item_vlan vlan = {.tci = 0};
3835 struct rte_flow_item_ipv4 ipv4 = {
3837 .next_proto_id = IPPROTO_GRE,
3840 struct rte_flow_item_ipv6 ipv6 = {
3842 .proto = IPPROTO_GRE,
3845 struct rte_flow_item_gre gre = {
3846 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3848 struct rte_flow_item_mpls mpls;
3852 ret = parse_vc(ctx, token, str, len, buf, size);
3855 /* Nothing else to do if there is no buffer. */
3858 if (!out->args.vc.actions_n)
3860 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3861 /* Point to selected object. */
3862 ctx->object = out->args.vc.data;
3863 ctx->objmask = NULL;
3864 /* Copy the headers to the buffer. */
3865 action_decap_data = ctx->object;
3866 *action_decap_data = (struct action_raw_decap_data) {
3867 .conf = (struct rte_flow_action_raw_decap){
3868 .data = action_decap_data->data,
3872 header = action_decap_data->data;
3873 if (mplsogre_decap_conf.select_vlan)
3874 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3875 else if (mplsogre_encap_conf.select_ipv4)
3876 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3878 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3879 memcpy(eth.dst.addr_bytes,
3880 mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN);
3881 memcpy(eth.src.addr_bytes,
3882 mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN);
3883 memcpy(header, ð, sizeof(eth));
3884 header += sizeof(eth);
3885 if (mplsogre_encap_conf.select_vlan) {
3886 if (mplsogre_encap_conf.select_ipv4)
3887 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3889 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3890 memcpy(header, &vlan, sizeof(vlan));
3891 header += sizeof(vlan);
3893 if (mplsogre_encap_conf.select_ipv4) {
3894 memcpy(header, &ipv4, sizeof(ipv4));
3895 header += sizeof(ipv4);
3897 memcpy(header, &ipv6, sizeof(ipv6));
3898 header += sizeof(ipv6);
3900 memcpy(header, &gre, sizeof(gre));
3901 header += sizeof(gre);
3902 memset(&mpls, 0, sizeof(mpls));
3903 memcpy(header, &mpls, sizeof(mpls));
3904 header += sizeof(mpls);
3905 action_decap_data->conf.size = header -
3906 action_decap_data->data;
3907 action->conf = &action_decap_data->conf;
3911 /** Parse MPLSOUDP encap action. */
3913 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
3914 const char *str, unsigned int len,
3915 void *buf, unsigned int size)
3917 struct buffer *out = buf;
3918 struct rte_flow_action *action;
3919 struct action_raw_encap_data *action_encap_data;
3920 struct rte_flow_item_eth eth = { .type = 0, };
3921 struct rte_flow_item_vlan vlan = {
3922 .tci = mplsoudp_encap_conf.vlan_tci,
3925 struct rte_flow_item_ipv4 ipv4 = {
3927 .src_addr = mplsoudp_encap_conf.ipv4_src,
3928 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
3929 .next_proto_id = IPPROTO_UDP,
3932 struct rte_flow_item_ipv6 ipv6 = {
3934 .proto = IPPROTO_UDP,
3937 struct rte_flow_item_udp udp = {
3939 .src_port = mplsoudp_encap_conf.udp_src,
3940 .dst_port = mplsoudp_encap_conf.udp_dst,
3943 struct rte_flow_item_mpls mpls;
3947 ret = parse_vc(ctx, token, str, len, buf, size);
3950 /* Nothing else to do if there is no buffer. */
3953 if (!out->args.vc.actions_n)
3955 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3956 /* Point to selected object. */
3957 ctx->object = out->args.vc.data;
3958 ctx->objmask = NULL;
3959 /* Copy the headers to the buffer. */
3960 action_encap_data = ctx->object;
3961 *action_encap_data = (struct action_raw_encap_data) {
3962 .conf = (struct rte_flow_action_raw_encap){
3963 .data = action_encap_data->data,
3968 header = action_encap_data->data;
3969 if (mplsoudp_encap_conf.select_vlan)
3970 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
3971 else if (mplsoudp_encap_conf.select_ipv4)
3972 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3974 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3975 memcpy(eth.dst.addr_bytes,
3976 mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
3977 memcpy(eth.src.addr_bytes,
3978 mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
3979 memcpy(header, ð, sizeof(eth));
3980 header += sizeof(eth);
3981 if (mplsoudp_encap_conf.select_vlan) {
3982 if (mplsoudp_encap_conf.select_ipv4)
3983 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
3985 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
3986 memcpy(header, &vlan, sizeof(vlan));
3987 header += sizeof(vlan);
3989 if (mplsoudp_encap_conf.select_ipv4) {
3990 memcpy(header, &ipv4, sizeof(ipv4));
3991 header += sizeof(ipv4);
3993 memcpy(&ipv6.hdr.src_addr,
3994 &mplsoudp_encap_conf.ipv6_src,
3995 sizeof(mplsoudp_encap_conf.ipv6_src));
3996 memcpy(&ipv6.hdr.dst_addr,
3997 &mplsoudp_encap_conf.ipv6_dst,
3998 sizeof(mplsoudp_encap_conf.ipv6_dst));
3999 memcpy(header, &ipv6, sizeof(ipv6));
4000 header += sizeof(ipv6);
4002 memcpy(header, &udp, sizeof(udp));
4003 header += sizeof(udp);
4004 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4005 RTE_DIM(mplsoudp_encap_conf.label));
4006 memcpy(header, &mpls, sizeof(mpls));
4007 header += sizeof(mpls);
4008 action_encap_data->conf.size = header -
4009 action_encap_data->data;
4010 action->conf = &action_encap_data->conf;
4014 /** Parse MPLSOUDP decap action. */
4016 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4017 const char *str, unsigned int len,
4018 void *buf, unsigned int size)
4020 struct buffer *out = buf;
4021 struct rte_flow_action *action;
4022 struct action_raw_decap_data *action_decap_data;
4023 struct rte_flow_item_eth eth = { .type = 0, };
4024 struct rte_flow_item_vlan vlan = {.tci = 0};
4025 struct rte_flow_item_ipv4 ipv4 = {
4027 .next_proto_id = IPPROTO_UDP,
4030 struct rte_flow_item_ipv6 ipv6 = {
4032 .proto = IPPROTO_UDP,
4035 struct rte_flow_item_udp udp = {
4037 .dst_port = rte_cpu_to_be_16(6635),
4040 struct rte_flow_item_mpls mpls;
4044 ret = parse_vc(ctx, token, str, len, buf, size);
4047 /* Nothing else to do if there is no buffer. */
4050 if (!out->args.vc.actions_n)
4052 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4053 /* Point to selected object. */
4054 ctx->object = out->args.vc.data;
4055 ctx->objmask = NULL;
4056 /* Copy the headers to the buffer. */
4057 action_decap_data = ctx->object;
4058 *action_decap_data = (struct action_raw_decap_data) {
4059 .conf = (struct rte_flow_action_raw_decap){
4060 .data = action_decap_data->data,
4064 header = action_decap_data->data;
4065 if (mplsoudp_decap_conf.select_vlan)
4066 eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN);
4067 else if (mplsoudp_encap_conf.select_ipv4)
4068 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4070 eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4071 memcpy(eth.dst.addr_bytes,
4072 mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN);
4073 memcpy(eth.src.addr_bytes,
4074 mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN);
4075 memcpy(header, ð, sizeof(eth));
4076 header += sizeof(eth);
4077 if (mplsoudp_encap_conf.select_vlan) {
4078 if (mplsoudp_encap_conf.select_ipv4)
4079 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
4081 vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6);
4082 memcpy(header, &vlan, sizeof(vlan));
4083 header += sizeof(vlan);
4085 if (mplsoudp_encap_conf.select_ipv4) {
4086 memcpy(header, &ipv4, sizeof(ipv4));
4087 header += sizeof(ipv4);
4089 memcpy(header, &ipv6, sizeof(ipv6));
4090 header += sizeof(ipv6);
4092 memcpy(header, &udp, sizeof(udp));
4093 header += sizeof(udp);
4094 memset(&mpls, 0, sizeof(mpls));
4095 memcpy(header, &mpls, sizeof(mpls));
4096 header += sizeof(mpls);
4097 action_decap_data->conf.size = header -
4098 action_decap_data->data;
4099 action->conf = &action_decap_data->conf;
4103 /** Parse tokens for destroy command. */
4105 parse_destroy(struct context *ctx, const struct token *token,
4106 const char *str, unsigned int len,
4107 void *buf, unsigned int size)
4109 struct buffer *out = buf;
4111 /* Token name must match. */
4112 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4114 /* Nothing else to do if there is no buffer. */
4117 if (!out->command) {
4118 if (ctx->curr != DESTROY)
4120 if (sizeof(*out) > size)
4122 out->command = ctx->curr;
4125 ctx->objmask = NULL;
4126 out->args.destroy.rule =
4127 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4131 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4132 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4135 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4136 ctx->objmask = NULL;
4140 /** Parse tokens for flush command. */
4142 parse_flush(struct context *ctx, const struct token *token,
4143 const char *str, unsigned int len,
4144 void *buf, unsigned int size)
4146 struct buffer *out = buf;
4148 /* Token name must match. */
4149 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4151 /* Nothing else to do if there is no buffer. */
4154 if (!out->command) {
4155 if (ctx->curr != FLUSH)
4157 if (sizeof(*out) > size)
4159 out->command = ctx->curr;
4162 ctx->objmask = NULL;
4167 /** Parse tokens for query command. */
4169 parse_query(struct context *ctx, const struct token *token,
4170 const char *str, unsigned int len,
4171 void *buf, unsigned int size)
4173 struct buffer *out = buf;
4175 /* Token name must match. */
4176 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4178 /* Nothing else to do if there is no buffer. */
4181 if (!out->command) {
4182 if (ctx->curr != QUERY)
4184 if (sizeof(*out) > size)
4186 out->command = ctx->curr;
4189 ctx->objmask = NULL;
4194 /** Parse action names. */
4196 parse_action(struct context *ctx, const struct token *token,
4197 const char *str, unsigned int len,
4198 void *buf, unsigned int size)
4200 struct buffer *out = buf;
4201 const struct arg *arg = pop_args(ctx);
4205 /* Argument is expected. */
4208 /* Parse action name. */
4209 for (i = 0; next_action[i]; ++i) {
4210 const struct parse_action_priv *priv;
4212 token = &token_list[next_action[i]];
4213 if (strcmp_partial(token->name, str, len))
4219 memcpy((uint8_t *)ctx->object + arg->offset,
4225 push_args(ctx, arg);
4229 /** Parse tokens for list command. */
4231 parse_list(struct context *ctx, const struct token *token,
4232 const char *str, unsigned int len,
4233 void *buf, unsigned int size)
4235 struct buffer *out = buf;
4237 /* Token name must match. */
4238 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4240 /* Nothing else to do if there is no buffer. */
4243 if (!out->command) {
4244 if (ctx->curr != LIST)
4246 if (sizeof(*out) > size)
4248 out->command = ctx->curr;
4251 ctx->objmask = NULL;
4252 out->args.list.group =
4253 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4257 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4258 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4261 ctx->object = out->args.list.group + out->args.list.group_n++;
4262 ctx->objmask = NULL;
4266 /** Parse tokens for isolate command. */
4268 parse_isolate(struct context *ctx, const struct token *token,
4269 const char *str, unsigned int len,
4270 void *buf, unsigned int size)
4272 struct buffer *out = buf;
4274 /* Token name must match. */
4275 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4277 /* Nothing else to do if there is no buffer. */
4280 if (!out->command) {
4281 if (ctx->curr != ISOLATE)
4283 if (sizeof(*out) > size)
4285 out->command = ctx->curr;
4288 ctx->objmask = NULL;
4294 * Parse signed/unsigned integers 8 to 64-bit long.
4296 * Last argument (ctx->args) is retrieved to determine integer type and
4300 parse_int(struct context *ctx, const struct token *token,
4301 const char *str, unsigned int len,
4302 void *buf, unsigned int size)
4304 const struct arg *arg = pop_args(ctx);
4309 /* Argument is expected. */
4314 (uintmax_t)strtoimax(str, &end, 0) :
4315 strtoumax(str, &end, 0);
4316 if (errno || (size_t)(end - str) != len)
4319 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4320 (intmax_t)u > (intmax_t)arg->max)) ||
4321 (!arg->sign && (u < arg->min || u > arg->max))))
4326 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4327 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4331 buf = (uint8_t *)ctx->object + arg->offset;
4335 case sizeof(uint8_t):
4336 *(uint8_t *)buf = u;
4338 case sizeof(uint16_t):
4339 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4341 case sizeof(uint8_t [3]):
4342 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4344 ((uint8_t *)buf)[0] = u;
4345 ((uint8_t *)buf)[1] = u >> 8;
4346 ((uint8_t *)buf)[2] = u >> 16;
4350 ((uint8_t *)buf)[0] = u >> 16;
4351 ((uint8_t *)buf)[1] = u >> 8;
4352 ((uint8_t *)buf)[2] = u;
4354 case sizeof(uint32_t):
4355 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4357 case sizeof(uint64_t):
4358 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4363 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4365 buf = (uint8_t *)ctx->objmask + arg->offset;
4370 push_args(ctx, arg);
4377 * Three arguments (ctx->args) are retrieved from the stack to store data,
4378 * its actual length and address (in that order).
4381 parse_string(struct context *ctx, const struct token *token,
4382 const char *str, unsigned int len,
4383 void *buf, unsigned int size)
4385 const struct arg *arg_data = pop_args(ctx);
4386 const struct arg *arg_len = pop_args(ctx);
4387 const struct arg *arg_addr = pop_args(ctx);
4388 char tmp[16]; /* Ought to be enough. */
4391 /* Arguments are expected. */
4395 push_args(ctx, arg_data);
4399 push_args(ctx, arg_len);
4400 push_args(ctx, arg_data);
4403 size = arg_data->size;
4404 /* Bit-mask fill is not supported. */
4405 if (arg_data->mask || size < len)
4409 /* Let parse_int() fill length information first. */
4410 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4413 push_args(ctx, arg_len);
4414 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4419 buf = (uint8_t *)ctx->object + arg_data->offset;
4420 /* Output buffer is not necessarily NUL-terminated. */
4421 memcpy(buf, str, len);
4422 memset((uint8_t *)buf + len, 0x00, size - len);
4424 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4425 /* Save address if requested. */
4426 if (arg_addr->size) {
4427 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4429 (uint8_t *)ctx->object + arg_data->offset
4433 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4435 (uint8_t *)ctx->objmask + arg_data->offset
4441 push_args(ctx, arg_addr);
4442 push_args(ctx, arg_len);
4443 push_args(ctx, arg_data);
4448 * Parse a MAC address.
4450 * Last argument (ctx->args) is retrieved to determine storage size and
4454 parse_mac_addr(struct context *ctx, const struct token *token,
4455 const char *str, unsigned int len,
4456 void *buf, unsigned int size)
4458 const struct arg *arg = pop_args(ctx);
4459 struct ether_addr tmp;
4463 /* Argument is expected. */
4467 /* Bit-mask fill is not supported. */
4468 if (arg->mask || size != sizeof(tmp))
4470 /* Only network endian is supported. */
4473 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
4474 if (ret < 0 || (unsigned int)ret != len)
4478 buf = (uint8_t *)ctx->object + arg->offset;
4479 memcpy(buf, &tmp, size);
4481 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4484 push_args(ctx, arg);
4489 * Parse an IPv4 address.
4491 * Last argument (ctx->args) is retrieved to determine storage size and
4495 parse_ipv4_addr(struct context *ctx, const struct token *token,
4496 const char *str, unsigned int len,
4497 void *buf, unsigned int size)
4499 const struct arg *arg = pop_args(ctx);
4504 /* Argument is expected. */
4508 /* Bit-mask fill is not supported. */
4509 if (arg->mask || size != sizeof(tmp))
4511 /* Only network endian is supported. */
4514 memcpy(str2, str, len);
4516 ret = inet_pton(AF_INET, str2, &tmp);
4518 /* Attempt integer parsing. */
4519 push_args(ctx, arg);
4520 return parse_int(ctx, token, str, len, buf, size);
4524 buf = (uint8_t *)ctx->object + arg->offset;
4525 memcpy(buf, &tmp, size);
4527 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4530 push_args(ctx, arg);
4535 * Parse an IPv6 address.
4537 * Last argument (ctx->args) is retrieved to determine storage size and
4541 parse_ipv6_addr(struct context *ctx, const struct token *token,
4542 const char *str, unsigned int len,
4543 void *buf, unsigned int size)
4545 const struct arg *arg = pop_args(ctx);
4547 struct in6_addr tmp;
4551 /* Argument is expected. */
4555 /* Bit-mask fill is not supported. */
4556 if (arg->mask || size != sizeof(tmp))
4558 /* Only network endian is supported. */
4561 memcpy(str2, str, len);
4563 ret = inet_pton(AF_INET6, str2, &tmp);
4568 buf = (uint8_t *)ctx->object + arg->offset;
4569 memcpy(buf, &tmp, size);
4571 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4574 push_args(ctx, arg);
4578 /** Boolean values (even indices stand for false). */
4579 static const char *const boolean_name[] = {
4589 * Parse a boolean value.
4591 * Last argument (ctx->args) is retrieved to determine storage size and
4595 parse_boolean(struct context *ctx, const struct token *token,
4596 const char *str, unsigned int len,
4597 void *buf, unsigned int size)
4599 const struct arg *arg = pop_args(ctx);
4603 /* Argument is expected. */
4606 for (i = 0; boolean_name[i]; ++i)
4607 if (!strcmp_partial(boolean_name[i], str, len))
4609 /* Process token as integer. */
4610 if (boolean_name[i])
4611 str = i & 1 ? "1" : "0";
4612 push_args(ctx, arg);
4613 ret = parse_int(ctx, token, str, strlen(str), buf, size);
4614 return ret > 0 ? (int)len : ret;
4617 /** Parse port and update context. */
4619 parse_port(struct context *ctx, const struct token *token,
4620 const char *str, unsigned int len,
4621 void *buf, unsigned int size)
4623 struct buffer *out = &(struct buffer){ .port = 0 };
4631 ctx->objmask = NULL;
4632 size = sizeof(*out);
4634 ret = parse_int(ctx, token, str, len, out, size);
4636 ctx->port = out->port;
4642 /** No completion. */
4644 comp_none(struct context *ctx, const struct token *token,
4645 unsigned int ent, char *buf, unsigned int size)
4655 /** Complete boolean values. */
4657 comp_boolean(struct context *ctx, const struct token *token,
4658 unsigned int ent, char *buf, unsigned int size)
4664 for (i = 0; boolean_name[i]; ++i)
4665 if (buf && i == ent)
4666 return snprintf(buf, size, "%s", boolean_name[i]);
4672 /** Complete action names. */
4674 comp_action(struct context *ctx, const struct token *token,
4675 unsigned int ent, char *buf, unsigned int size)
4681 for (i = 0; next_action[i]; ++i)
4682 if (buf && i == ent)
4683 return snprintf(buf, size, "%s",
4684 token_list[next_action[i]].name);
4690 /** Complete available ports. */
4692 comp_port(struct context *ctx, const struct token *token,
4693 unsigned int ent, char *buf, unsigned int size)
4700 RTE_ETH_FOREACH_DEV(p) {
4701 if (buf && i == ent)
4702 return snprintf(buf, size, "%u", p);
4710 /** Complete available rule IDs. */
4712 comp_rule_id(struct context *ctx, const struct token *token,
4713 unsigned int ent, char *buf, unsigned int size)
4716 struct rte_port *port;
4717 struct port_flow *pf;
4720 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4721 ctx->port == (portid_t)RTE_PORT_ALL)
4723 port = &ports[ctx->port];
4724 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4725 if (buf && i == ent)
4726 return snprintf(buf, size, "%u", pf->id);
4734 /** Complete type field for RSS action. */
4736 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
4737 unsigned int ent, char *buf, unsigned int size)
4743 for (i = 0; rss_type_table[i].str; ++i)
4748 return snprintf(buf, size, "%s", rss_type_table[ent].str);
4750 return snprintf(buf, size, "end");
4754 /** Complete queue field for RSS action. */
4756 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
4757 unsigned int ent, char *buf, unsigned int size)
4764 return snprintf(buf, size, "%u", ent);
4766 return snprintf(buf, size, "end");
4770 /** Internal context. */
4771 static struct context cmd_flow_context;
4773 /** Global parser instance (cmdline API). */
4774 cmdline_parse_inst_t cmd_flow;
4776 /** Initialize context. */
4778 cmd_flow_context_init(struct context *ctx)
4780 /* A full memset() is not necessary. */
4790 ctx->objmask = NULL;
4793 /** Parse a token (cmdline API). */
4795 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
4798 struct context *ctx = &cmd_flow_context;
4799 const struct token *token;
4800 const enum index *list;
4805 token = &token_list[ctx->curr];
4806 /* Check argument length. */
4809 for (len = 0; src[len]; ++len)
4810 if (src[len] == '#' || isspace(src[len]))
4814 /* Last argument and EOL detection. */
4815 for (i = len; src[i]; ++i)
4816 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
4818 else if (!isspace(src[i])) {
4823 if (src[i] == '\r' || src[i] == '\n') {
4827 /* Initialize context if necessary. */
4828 if (!ctx->next_num) {
4831 ctx->next[ctx->next_num++] = token->next[0];
4833 /* Process argument through candidates. */
4834 ctx->prev = ctx->curr;
4835 list = ctx->next[ctx->next_num - 1];
4836 for (i = 0; list[i]; ++i) {
4837 const struct token *next = &token_list[list[i]];
4840 ctx->curr = list[i];
4842 tmp = next->call(ctx, next, src, len, result, size);
4844 tmp = parse_default(ctx, next, src, len, result, size);
4845 if (tmp == -1 || tmp != len)
4853 /* Push subsequent tokens if any. */
4855 for (i = 0; token->next[i]; ++i) {
4856 if (ctx->next_num == RTE_DIM(ctx->next))
4858 ctx->next[ctx->next_num++] = token->next[i];
4860 /* Push arguments if any. */
4862 for (i = 0; token->args[i]; ++i) {
4863 if (ctx->args_num == RTE_DIM(ctx->args))
4865 ctx->args[ctx->args_num++] = token->args[i];
4870 /** Return number of completion entries (cmdline API). */
4872 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
4874 struct context *ctx = &cmd_flow_context;
4875 const struct token *token = &token_list[ctx->curr];
4876 const enum index *list;
4880 /* Count number of tokens in current list. */
4882 list = ctx->next[ctx->next_num - 1];
4884 list = token->next[0];
4885 for (i = 0; list[i]; ++i)
4890 * If there is a single token, use its completion callback, otherwise
4891 * return the number of entries.
4893 token = &token_list[list[0]];
4894 if (i == 1 && token->comp) {
4895 /* Save index for cmd_flow_get_help(). */
4896 ctx->prev = list[0];
4897 return token->comp(ctx, token, 0, NULL, 0);
4902 /** Return a completion entry (cmdline API). */
4904 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
4905 char *dst, unsigned int size)
4907 struct context *ctx = &cmd_flow_context;
4908 const struct token *token = &token_list[ctx->curr];
4909 const enum index *list;
4913 /* Count number of tokens in current list. */
4915 list = ctx->next[ctx->next_num - 1];
4917 list = token->next[0];
4918 for (i = 0; list[i]; ++i)
4922 /* If there is a single token, use its completion callback. */
4923 token = &token_list[list[0]];
4924 if (i == 1 && token->comp) {
4925 /* Save index for cmd_flow_get_help(). */
4926 ctx->prev = list[0];
4927 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
4929 /* Otherwise make sure the index is valid and use defaults. */
4932 token = &token_list[list[index]];
4933 snprintf(dst, size, "%s", token->name);
4934 /* Save index for cmd_flow_get_help(). */
4935 ctx->prev = list[index];
4939 /** Populate help strings for current token (cmdline API). */
4941 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
4943 struct context *ctx = &cmd_flow_context;
4944 const struct token *token = &token_list[ctx->prev];
4949 /* Set token type and update global help with details. */
4950 snprintf(dst, size, "%s", (token->type ? token->type : "TOKEN"));
4952 cmd_flow.help_str = token->help;
4954 cmd_flow.help_str = token->name;
4958 /** Token definition template (cmdline API). */
4959 static struct cmdline_token_hdr cmd_flow_token_hdr = {
4960 .ops = &(struct cmdline_token_ops){
4961 .parse = cmd_flow_parse,
4962 .complete_get_nb = cmd_flow_complete_get_nb,
4963 .complete_get_elt = cmd_flow_complete_get_elt,
4964 .get_help = cmd_flow_get_help,
4969 /** Populate the next dynamic token. */
4971 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
4972 cmdline_parse_token_hdr_t **hdr_inst)
4974 struct context *ctx = &cmd_flow_context;
4976 /* Always reinitialize context before requesting the first token. */
4977 if (!(hdr_inst - cmd_flow.tokens))
4978 cmd_flow_context_init(ctx);
4979 /* Return NULL when no more tokens are expected. */
4980 if (!ctx->next_num && ctx->curr) {
4984 /* Determine if command should end here. */
4985 if (ctx->eol && ctx->last && ctx->next_num) {
4986 const enum index *list = ctx->next[ctx->next_num - 1];
4989 for (i = 0; list[i]; ++i) {
4996 *hdr = &cmd_flow_token_hdr;
4999 /** Dispatch parsed buffer to function calls. */
5001 cmd_flow_parsed(const struct buffer *in)
5003 switch (in->command) {
5005 port_flow_validate(in->port, &in->args.vc.attr,
5006 in->args.vc.pattern, in->args.vc.actions);
5009 port_flow_create(in->port, &in->args.vc.attr,
5010 in->args.vc.pattern, in->args.vc.actions);
5013 port_flow_destroy(in->port, in->args.destroy.rule_n,
5014 in->args.destroy.rule);
5017 port_flow_flush(in->port);
5020 port_flow_query(in->port, in->args.query.rule,
5021 &in->args.query.action);
5024 port_flow_list(in->port, in->args.list.group_n,
5025 in->args.list.group);
5028 port_flow_isolate(in->port, in->args.isolate.set);
5035 /** Token generator and output processing callback (cmdline API). */
5037 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5040 cmd_flow_tok(arg0, arg2);
5042 cmd_flow_parsed(arg0);
5045 /** Global parser instance (cmdline API). */
5046 cmdline_parse_inst_t cmd_flow = {
5048 .data = NULL, /**< Unused. */
5049 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5052 }, /**< Tokens are returned by cmd_flow_tok(). */