1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
13 #include <arpa/inet.h>
14 #include <sys/socket.h>
16 #include <rte_string_fns.h>
17 #include <rte_common.h>
18 #include <rte_ethdev.h>
19 #include <rte_byteorder.h>
20 #include <cmdline_parse.h>
21 #include <cmdline_parse_etheraddr.h>
26 /** Parser token indices. */
47 /* Top-level command. */
50 /* Sub-level commands. */
59 /* Destroy arguments. */
62 /* Query arguments. */
68 /* Validate/create arguments. */
75 /* Validate/create pattern. */
112 ITEM_VLAN_INNER_TYPE,
144 ITEM_E_TAG_GRP_ECID_B,
163 ITEM_ARP_ETH_IPV4_SHA,
164 ITEM_ARP_ETH_IPV4_SPA,
165 ITEM_ARP_ETH_IPV4_THA,
166 ITEM_ARP_ETH_IPV4_TPA,
168 ITEM_IPV6_EXT_NEXT_HDR,
173 ITEM_ICMP6_ND_NS_TARGET_ADDR,
175 ITEM_ICMP6_ND_NA_TARGET_ADDR,
177 ITEM_ICMP6_ND_OPT_TYPE,
178 ITEM_ICMP6_ND_OPT_SLA_ETH,
179 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
180 ITEM_ICMP6_ND_OPT_TLA_ETH,
181 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
185 /* Validate/create actions. */
205 ACTION_RSS_FUNC_DEFAULT,
206 ACTION_RSS_FUNC_TOEPLITZ,
207 ACTION_RSS_FUNC_SIMPLE_XOR,
219 ACTION_PHY_PORT_ORIGINAL,
220 ACTION_PHY_PORT_INDEX,
222 ACTION_PORT_ID_ORIGINAL,
226 ACTION_OF_SET_MPLS_TTL,
227 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
228 ACTION_OF_DEC_MPLS_TTL,
229 ACTION_OF_SET_NW_TTL,
230 ACTION_OF_SET_NW_TTL_NW_TTL,
231 ACTION_OF_DEC_NW_TTL,
232 ACTION_OF_COPY_TTL_OUT,
233 ACTION_OF_COPY_TTL_IN,
236 ACTION_OF_PUSH_VLAN_ETHERTYPE,
237 ACTION_OF_SET_VLAN_VID,
238 ACTION_OF_SET_VLAN_VID_VLAN_VID,
239 ACTION_OF_SET_VLAN_PCP,
240 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
242 ACTION_OF_POP_MPLS_ETHERTYPE,
244 ACTION_OF_PUSH_MPLS_ETHERTYPE,
251 ACTION_MPLSOGRE_ENCAP,
252 ACTION_MPLSOGRE_DECAP,
253 ACTION_MPLSOUDP_ENCAP,
254 ACTION_MPLSOUDP_DECAP,
256 ACTION_SET_IPV4_SRC_IPV4_SRC,
258 ACTION_SET_IPV4_DST_IPV4_DST,
260 ACTION_SET_IPV6_SRC_IPV6_SRC,
262 ACTION_SET_IPV6_DST_IPV6_DST,
264 ACTION_SET_TP_SRC_TP_SRC,
266 ACTION_SET_TP_DST_TP_DST,
272 ACTION_SET_MAC_SRC_MAC_SRC,
274 ACTION_SET_MAC_DST_MAC_DST,
276 ACTION_INC_TCP_SEQ_VALUE,
278 ACTION_DEC_TCP_SEQ_VALUE,
280 ACTION_INC_TCP_ACK_VALUE,
282 ACTION_DEC_TCP_ACK_VALUE,
285 /** Maximum size for pattern in struct rte_flow_item_raw. */
286 #define ITEM_RAW_PATTERN_SIZE 40
288 /** Storage size for struct rte_flow_item_raw including pattern. */
289 #define ITEM_RAW_SIZE \
290 (sizeof(struct rte_flow_item_raw) + ITEM_RAW_PATTERN_SIZE)
292 /** Maximum number of queue indices in struct rte_flow_action_rss. */
293 #define ACTION_RSS_QUEUE_NUM 32
295 /** Storage for struct rte_flow_action_rss including external data. */
296 struct action_rss_data {
297 struct rte_flow_action_rss conf;
298 uint8_t key[RSS_HASH_KEY_LENGTH];
299 uint16_t queue[ACTION_RSS_QUEUE_NUM];
302 /** Maximum number of items in struct rte_flow_action_vxlan_encap. */
303 #define ACTION_VXLAN_ENCAP_ITEMS_NUM 6
305 /** Storage for struct rte_flow_action_vxlan_encap including external data. */
306 struct action_vxlan_encap_data {
307 struct rte_flow_action_vxlan_encap conf;
308 struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM];
309 struct rte_flow_item_eth item_eth;
310 struct rte_flow_item_vlan item_vlan;
312 struct rte_flow_item_ipv4 item_ipv4;
313 struct rte_flow_item_ipv6 item_ipv6;
315 struct rte_flow_item_udp item_udp;
316 struct rte_flow_item_vxlan item_vxlan;
319 /** Maximum number of items in struct rte_flow_action_nvgre_encap. */
320 #define ACTION_NVGRE_ENCAP_ITEMS_NUM 5
322 /** Storage for struct rte_flow_action_nvgre_encap including external data. */
323 struct action_nvgre_encap_data {
324 struct rte_flow_action_nvgre_encap conf;
325 struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM];
326 struct rte_flow_item_eth item_eth;
327 struct rte_flow_item_vlan item_vlan;
329 struct rte_flow_item_ipv4 item_ipv4;
330 struct rte_flow_item_ipv6 item_ipv6;
332 struct rte_flow_item_nvgre item_nvgre;
335 /** Maximum data size in struct rte_flow_action_raw_encap. */
336 #define ACTION_RAW_ENCAP_MAX_DATA 128
338 /** Storage for struct rte_flow_action_raw_encap including external data. */
339 struct action_raw_encap_data {
340 struct rte_flow_action_raw_encap conf;
341 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
342 uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA];
345 /** Storage for struct rte_flow_action_raw_decap including external data. */
346 struct action_raw_decap_data {
347 struct rte_flow_action_raw_decap conf;
348 uint8_t data[ACTION_RAW_ENCAP_MAX_DATA];
351 /** Maximum number of subsequent tokens and arguments on the stack. */
352 #define CTX_STACK_SIZE 16
354 /** Parser context. */
356 /** Stack of subsequent token lists to process. */
357 const enum index *next[CTX_STACK_SIZE];
358 /** Arguments for stacked tokens. */
359 const void *args[CTX_STACK_SIZE];
360 enum index curr; /**< Current token index. */
361 enum index prev; /**< Index of the last token seen. */
362 int next_num; /**< Number of entries in next[]. */
363 int args_num; /**< Number of entries in args[]. */
364 uint32_t eol:1; /**< EOL has been detected. */
365 uint32_t last:1; /**< No more arguments. */
366 portid_t port; /**< Current port ID (for completions). */
367 uint32_t objdata; /**< Object-specific data. */
368 void *object; /**< Address of current object for relative offsets. */
369 void *objmask; /**< Object a full mask must be written to. */
372 /** Token argument. */
374 uint32_t hton:1; /**< Use network byte ordering. */
375 uint32_t sign:1; /**< Value is signed. */
376 uint32_t bounded:1; /**< Value is bounded. */
377 uintmax_t min; /**< Minimum value if bounded. */
378 uintmax_t max; /**< Maximum value if bounded. */
379 uint32_t offset; /**< Relative offset from ctx->object. */
380 uint32_t size; /**< Field size. */
381 const uint8_t *mask; /**< Bit-mask to use instead of offset/size. */
384 /** Parser token definition. */
386 /** Type displayed during completion (defaults to "TOKEN"). */
388 /** Help displayed during completion (defaults to token name). */
390 /** Private data used by parser functions. */
393 * Lists of subsequent tokens to push on the stack. Each call to the
394 * parser consumes the last entry of that stack.
396 const enum index *const *next;
397 /** Arguments stack for subsequent tokens that need them. */
398 const struct arg *const *args;
400 * Token-processing callback, returns -1 in case of error, the
401 * length of the matched string otherwise. If NULL, attempts to
402 * match the token name.
404 * If buf is not NULL, the result should be stored in it according
405 * to context. An error is returned if not large enough.
407 int (*call)(struct context *ctx, const struct token *token,
408 const char *str, unsigned int len,
409 void *buf, unsigned int size);
411 * Callback that provides possible values for this token, used for
412 * completion. Returns -1 in case of error, the number of possible
413 * values otherwise. If NULL, the token name is used.
415 * If buf is not NULL, entry index ent is written to buf and the
416 * full length of the entry is returned (same behavior as
419 int (*comp)(struct context *ctx, const struct token *token,
420 unsigned int ent, char *buf, unsigned int size);
421 /** Mandatory token name, no default value. */
425 /** Static initializer for the next field. */
426 #define NEXT(...) (const enum index *const []){ __VA_ARGS__, NULL, }
428 /** Static initializer for a NEXT() entry. */
429 #define NEXT_ENTRY(...) (const enum index []){ __VA_ARGS__, ZERO, }
431 /** Static initializer for the args field. */
432 #define ARGS(...) (const struct arg *const []){ __VA_ARGS__, NULL, }
434 /** Static initializer for ARGS() to target a field. */
435 #define ARGS_ENTRY(s, f) \
436 (&(const struct arg){ \
437 .offset = offsetof(s, f), \
438 .size = sizeof(((s *)0)->f), \
441 /** Static initializer for ARGS() to target a bit-field. */
442 #define ARGS_ENTRY_BF(s, f, b) \
443 (&(const struct arg){ \
445 .mask = (const void *)&(const s){ .f = (1 << (b)) - 1 }, \
448 /** Static initializer for ARGS() to target an arbitrary bit-mask. */
449 #define ARGS_ENTRY_MASK(s, f, m) \
450 (&(const struct arg){ \
451 .offset = offsetof(s, f), \
452 .size = sizeof(((s *)0)->f), \
453 .mask = (const void *)(m), \
456 /** Same as ARGS_ENTRY_MASK() using network byte ordering for the value. */
457 #define ARGS_ENTRY_MASK_HTON(s, f, m) \
458 (&(const struct arg){ \
460 .offset = offsetof(s, f), \
461 .size = sizeof(((s *)0)->f), \
462 .mask = (const void *)(m), \
465 /** Static initializer for ARGS() to target a pointer. */
466 #define ARGS_ENTRY_PTR(s, f) \
467 (&(const struct arg){ \
468 .size = sizeof(*((s *)0)->f), \
471 /** Static initializer for ARGS() with arbitrary offset and size. */
472 #define ARGS_ENTRY_ARB(o, s) \
473 (&(const struct arg){ \
478 /** Same as ARGS_ENTRY_ARB() with bounded values. */
479 #define ARGS_ENTRY_ARB_BOUNDED(o, s, i, a) \
480 (&(const struct arg){ \
488 /** Same as ARGS_ENTRY() using network byte ordering. */
489 #define ARGS_ENTRY_HTON(s, f) \
490 (&(const struct arg){ \
492 .offset = offsetof(s, f), \
493 .size = sizeof(((s *)0)->f), \
496 /** Same as ARGS_ENTRY_HTON() for a single argument, without structure. */
497 #define ARG_ENTRY_HTON(s) \
498 (&(const struct arg){ \
504 /** Parser output buffer layout expected by cmd_flow_parsed(). */
506 enum index command; /**< Flow command. */
507 portid_t port; /**< Affected port ID. */
510 struct rte_flow_attr attr;
511 struct rte_flow_item *pattern;
512 struct rte_flow_action *actions;
516 } vc; /**< Validate/create arguments. */
520 } destroy; /**< Destroy arguments. */
523 struct rte_flow_action action;
524 } query; /**< Query arguments. */
528 } list; /**< List arguments. */
531 } isolate; /**< Isolated mode arguments. */
532 } args; /**< Command arguments. */
535 /** Private data for pattern items. */
536 struct parse_item_priv {
537 enum rte_flow_item_type type; /**< Item type. */
538 uint32_t size; /**< Size of item specification structure. */
541 #define PRIV_ITEM(t, s) \
542 (&(const struct parse_item_priv){ \
543 .type = RTE_FLOW_ITEM_TYPE_ ## t, \
547 /** Private data for actions. */
548 struct parse_action_priv {
549 enum rte_flow_action_type type; /**< Action type. */
550 uint32_t size; /**< Size of action configuration structure. */
553 #define PRIV_ACTION(t, s) \
554 (&(const struct parse_action_priv){ \
555 .type = RTE_FLOW_ACTION_TYPE_ ## t, \
559 static const enum index next_vc_attr[] = {
569 static const enum index next_destroy_attr[] = {
575 static const enum index next_list_attr[] = {
581 static const enum index item_param[] = {
590 static const enum index next_item[] = {
626 ITEM_ICMP6_ND_OPT_SLA_ETH,
627 ITEM_ICMP6_ND_OPT_TLA_ETH,
632 static const enum index item_fuzzy[] = {
638 static const enum index item_any[] = {
644 static const enum index item_vf[] = {
650 static const enum index item_phy_port[] = {
656 static const enum index item_port_id[] = {
662 static const enum index item_mark[] = {
668 static const enum index item_raw[] = {
678 static const enum index item_eth[] = {
686 static const enum index item_vlan[] = {
691 ITEM_VLAN_INNER_TYPE,
696 static const enum index item_ipv4[] = {
706 static const enum index item_ipv6[] = {
717 static const enum index item_icmp[] = {
724 static const enum index item_udp[] = {
731 static const enum index item_tcp[] = {
739 static const enum index item_sctp[] = {
748 static const enum index item_vxlan[] = {
754 static const enum index item_e_tag[] = {
755 ITEM_E_TAG_GRP_ECID_B,
760 static const enum index item_nvgre[] = {
766 static const enum index item_mpls[] = {
772 static const enum index item_gre[] = {
778 static const enum index item_gtp[] = {
784 static const enum index item_geneve[] = {
791 static const enum index item_vxlan_gpe[] = {
797 static const enum index item_arp_eth_ipv4[] = {
798 ITEM_ARP_ETH_IPV4_SHA,
799 ITEM_ARP_ETH_IPV4_SPA,
800 ITEM_ARP_ETH_IPV4_THA,
801 ITEM_ARP_ETH_IPV4_TPA,
806 static const enum index item_ipv6_ext[] = {
807 ITEM_IPV6_EXT_NEXT_HDR,
812 static const enum index item_icmp6[] = {
819 static const enum index item_icmp6_nd_ns[] = {
820 ITEM_ICMP6_ND_NS_TARGET_ADDR,
825 static const enum index item_icmp6_nd_na[] = {
826 ITEM_ICMP6_ND_NA_TARGET_ADDR,
831 static const enum index item_icmp6_nd_opt[] = {
832 ITEM_ICMP6_ND_OPT_TYPE,
837 static const enum index item_icmp6_nd_opt_sla_eth[] = {
838 ITEM_ICMP6_ND_OPT_SLA_ETH_SLA,
843 static const enum index item_icmp6_nd_opt_tla_eth[] = {
844 ITEM_ICMP6_ND_OPT_TLA_ETH_TLA,
849 static const enum index item_meta[] = {
855 static const enum index next_action[] = {
871 ACTION_OF_SET_MPLS_TTL,
872 ACTION_OF_DEC_MPLS_TTL,
873 ACTION_OF_SET_NW_TTL,
874 ACTION_OF_DEC_NW_TTL,
875 ACTION_OF_COPY_TTL_OUT,
876 ACTION_OF_COPY_TTL_IN,
879 ACTION_OF_SET_VLAN_VID,
880 ACTION_OF_SET_VLAN_PCP,
889 ACTION_MPLSOGRE_ENCAP,
890 ACTION_MPLSOGRE_DECAP,
891 ACTION_MPLSOUDP_ENCAP,
892 ACTION_MPLSOUDP_DECAP,
911 static const enum index action_mark[] = {
917 static const enum index action_queue[] = {
923 static const enum index action_count[] = {
930 static const enum index action_rss[] = {
941 static const enum index action_vf[] = {
948 static const enum index action_phy_port[] = {
949 ACTION_PHY_PORT_ORIGINAL,
950 ACTION_PHY_PORT_INDEX,
955 static const enum index action_port_id[] = {
956 ACTION_PORT_ID_ORIGINAL,
962 static const enum index action_meter[] = {
968 static const enum index action_of_set_mpls_ttl[] = {
969 ACTION_OF_SET_MPLS_TTL_MPLS_TTL,
974 static const enum index action_of_set_nw_ttl[] = {
975 ACTION_OF_SET_NW_TTL_NW_TTL,
980 static const enum index action_of_push_vlan[] = {
981 ACTION_OF_PUSH_VLAN_ETHERTYPE,
986 static const enum index action_of_set_vlan_vid[] = {
987 ACTION_OF_SET_VLAN_VID_VLAN_VID,
992 static const enum index action_of_set_vlan_pcp[] = {
993 ACTION_OF_SET_VLAN_PCP_VLAN_PCP,
998 static const enum index action_of_pop_mpls[] = {
999 ACTION_OF_POP_MPLS_ETHERTYPE,
1004 static const enum index action_of_push_mpls[] = {
1005 ACTION_OF_PUSH_MPLS_ETHERTYPE,
1010 static const enum index action_set_ipv4_src[] = {
1011 ACTION_SET_IPV4_SRC_IPV4_SRC,
1016 static const enum index action_set_mac_src[] = {
1017 ACTION_SET_MAC_SRC_MAC_SRC,
1022 static const enum index action_set_ipv4_dst[] = {
1023 ACTION_SET_IPV4_DST_IPV4_DST,
1028 static const enum index action_set_ipv6_src[] = {
1029 ACTION_SET_IPV6_SRC_IPV6_SRC,
1034 static const enum index action_set_ipv6_dst[] = {
1035 ACTION_SET_IPV6_DST_IPV6_DST,
1040 static const enum index action_set_tp_src[] = {
1041 ACTION_SET_TP_SRC_TP_SRC,
1046 static const enum index action_set_tp_dst[] = {
1047 ACTION_SET_TP_DST_TP_DST,
1052 static const enum index action_set_ttl[] = {
1058 static const enum index action_jump[] = {
1064 static const enum index action_set_mac_dst[] = {
1065 ACTION_SET_MAC_DST_MAC_DST,
1070 static const enum index action_inc_tcp_seq[] = {
1071 ACTION_INC_TCP_SEQ_VALUE,
1076 static const enum index action_dec_tcp_seq[] = {
1077 ACTION_DEC_TCP_SEQ_VALUE,
1082 static const enum index action_inc_tcp_ack[] = {
1083 ACTION_INC_TCP_ACK_VALUE,
1088 static const enum index action_dec_tcp_ack[] = {
1089 ACTION_DEC_TCP_ACK_VALUE,
1094 static int parse_init(struct context *, const struct token *,
1095 const char *, unsigned int,
1096 void *, unsigned int);
1097 static int parse_vc(struct context *, const struct token *,
1098 const char *, unsigned int,
1099 void *, unsigned int);
1100 static int parse_vc_spec(struct context *, const struct token *,
1101 const char *, unsigned int, void *, unsigned int);
1102 static int parse_vc_conf(struct context *, const struct token *,
1103 const char *, unsigned int, void *, unsigned int);
1104 static int parse_vc_action_rss(struct context *, const struct token *,
1105 const char *, unsigned int, void *,
1107 static int parse_vc_action_rss_func(struct context *, const struct token *,
1108 const char *, unsigned int, void *,
1110 static int parse_vc_action_rss_type(struct context *, const struct token *,
1111 const char *, unsigned int, void *,
1113 static int parse_vc_action_rss_queue(struct context *, const struct token *,
1114 const char *, unsigned int, void *,
1116 static int parse_vc_action_vxlan_encap(struct context *, const struct token *,
1117 const char *, unsigned int, void *,
1119 static int parse_vc_action_nvgre_encap(struct context *, const struct token *,
1120 const char *, unsigned int, void *,
1122 static int parse_vc_action_l2_encap(struct context *, const struct token *,
1123 const char *, unsigned int, void *,
1125 static int parse_vc_action_l2_decap(struct context *, const struct token *,
1126 const char *, unsigned int, void *,
1128 static int parse_vc_action_mplsogre_encap(struct context *,
1129 const struct token *, const char *,
1130 unsigned int, void *, unsigned int);
1131 static int parse_vc_action_mplsogre_decap(struct context *,
1132 const struct token *, const char *,
1133 unsigned int, void *, unsigned int);
1134 static int parse_vc_action_mplsoudp_encap(struct context *,
1135 const struct token *, const char *,
1136 unsigned int, void *, unsigned int);
1137 static int parse_vc_action_mplsoudp_decap(struct context *,
1138 const struct token *, const char *,
1139 unsigned int, void *, unsigned int);
1140 static int parse_destroy(struct context *, const struct token *,
1141 const char *, unsigned int,
1142 void *, unsigned int);
1143 static int parse_flush(struct context *, const struct token *,
1144 const char *, unsigned int,
1145 void *, unsigned int);
1146 static int parse_query(struct context *, const struct token *,
1147 const char *, unsigned int,
1148 void *, unsigned int);
1149 static int parse_action(struct context *, const struct token *,
1150 const char *, unsigned int,
1151 void *, unsigned int);
1152 static int parse_list(struct context *, const struct token *,
1153 const char *, unsigned int,
1154 void *, unsigned int);
1155 static int parse_isolate(struct context *, const struct token *,
1156 const char *, unsigned int,
1157 void *, unsigned int);
1158 static int parse_int(struct context *, const struct token *,
1159 const char *, unsigned int,
1160 void *, unsigned int);
1161 static int parse_prefix(struct context *, const struct token *,
1162 const char *, unsigned int,
1163 void *, unsigned int);
1164 static int parse_boolean(struct context *, const struct token *,
1165 const char *, unsigned int,
1166 void *, unsigned int);
1167 static int parse_string(struct context *, const struct token *,
1168 const char *, unsigned int,
1169 void *, unsigned int);
1170 static int parse_hex(struct context *ctx, const struct token *token,
1171 const char *str, unsigned int len,
1172 void *buf, unsigned int size);
1173 static int parse_mac_addr(struct context *, const struct token *,
1174 const char *, unsigned int,
1175 void *, unsigned int);
1176 static int parse_ipv4_addr(struct context *, const struct token *,
1177 const char *, unsigned int,
1178 void *, unsigned int);
1179 static int parse_ipv6_addr(struct context *, const struct token *,
1180 const char *, unsigned int,
1181 void *, unsigned int);
1182 static int parse_port(struct context *, const struct token *,
1183 const char *, unsigned int,
1184 void *, unsigned int);
1185 static int comp_none(struct context *, const struct token *,
1186 unsigned int, char *, unsigned int);
1187 static int comp_boolean(struct context *, const struct token *,
1188 unsigned int, char *, unsigned int);
1189 static int comp_action(struct context *, const struct token *,
1190 unsigned int, char *, unsigned int);
1191 static int comp_port(struct context *, const struct token *,
1192 unsigned int, char *, unsigned int);
1193 static int comp_rule_id(struct context *, const struct token *,
1194 unsigned int, char *, unsigned int);
1195 static int comp_vc_action_rss_type(struct context *, const struct token *,
1196 unsigned int, char *, unsigned int);
1197 static int comp_vc_action_rss_queue(struct context *, const struct token *,
1198 unsigned int, char *, unsigned int);
1200 /** Token definitions. */
1201 static const struct token token_list[] = {
1202 /* Special tokens. */
1205 .help = "null entry, abused as the entry point",
1206 .next = NEXT(NEXT_ENTRY(FLOW)),
1211 .help = "command may end here",
1213 /* Common tokens. */
1217 .help = "integer value",
1222 .name = "{unsigned}",
1224 .help = "unsigned integer value",
1231 .help = "prefix length for bit-mask",
1232 .call = parse_prefix,
1236 .name = "{boolean}",
1238 .help = "any boolean value",
1239 .call = parse_boolean,
1240 .comp = comp_boolean,
1245 .help = "fixed string",
1246 .call = parse_string,
1252 .help = "fixed string",
1257 .name = "{MAC address}",
1259 .help = "standard MAC address notation",
1260 .call = parse_mac_addr,
1264 .name = "{IPv4 address}",
1265 .type = "IPV4 ADDRESS",
1266 .help = "standard IPv4 address notation",
1267 .call = parse_ipv4_addr,
1271 .name = "{IPv6 address}",
1272 .type = "IPV6 ADDRESS",
1273 .help = "standard IPv6 address notation",
1274 .call = parse_ipv6_addr,
1278 .name = "{rule id}",
1280 .help = "rule identifier",
1282 .comp = comp_rule_id,
1285 .name = "{port_id}",
1287 .help = "port identifier",
1292 .name = "{group_id}",
1294 .help = "group identifier",
1298 [PRIORITY_LEVEL] = {
1301 .help = "priority level",
1305 /* Top-level command. */
1308 .type = "{command} {port_id} [{arg} [...]]",
1309 .help = "manage ingress/egress flow rules",
1310 .next = NEXT(NEXT_ENTRY
1320 /* Sub-level commands. */
1323 .help = "check whether a flow rule can be created",
1324 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1325 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1330 .help = "create a flow rule",
1331 .next = NEXT(next_vc_attr, NEXT_ENTRY(PORT_ID)),
1332 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1337 .help = "destroy specific flow rules",
1338 .next = NEXT(NEXT_ENTRY(DESTROY_RULE), NEXT_ENTRY(PORT_ID)),
1339 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1340 .call = parse_destroy,
1344 .help = "destroy all flow rules",
1345 .next = NEXT(NEXT_ENTRY(PORT_ID)),
1346 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1347 .call = parse_flush,
1351 .help = "query an existing flow rule",
1352 .next = NEXT(NEXT_ENTRY(QUERY_ACTION),
1353 NEXT_ENTRY(RULE_ID),
1354 NEXT_ENTRY(PORT_ID)),
1355 .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type),
1356 ARGS_ENTRY(struct buffer, args.query.rule),
1357 ARGS_ENTRY(struct buffer, port)),
1358 .call = parse_query,
1362 .help = "list existing flow rules",
1363 .next = NEXT(next_list_attr, NEXT_ENTRY(PORT_ID)),
1364 .args = ARGS(ARGS_ENTRY(struct buffer, port)),
1369 .help = "restrict ingress traffic to the defined flow rules",
1370 .next = NEXT(NEXT_ENTRY(BOOLEAN),
1371 NEXT_ENTRY(PORT_ID)),
1372 .args = ARGS(ARGS_ENTRY(struct buffer, args.isolate.set),
1373 ARGS_ENTRY(struct buffer, port)),
1374 .call = parse_isolate,
1376 /* Destroy arguments. */
1379 .help = "specify a rule identifier",
1380 .next = NEXT(next_destroy_attr, NEXT_ENTRY(RULE_ID)),
1381 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.destroy.rule)),
1382 .call = parse_destroy,
1384 /* Query arguments. */
1388 .help = "action to query, must be part of the rule",
1389 .call = parse_action,
1390 .comp = comp_action,
1392 /* List arguments. */
1395 .help = "specify a group",
1396 .next = NEXT(next_list_attr, NEXT_ENTRY(GROUP_ID)),
1397 .args = ARGS(ARGS_ENTRY_PTR(struct buffer, args.list.group)),
1400 /* Validate/create attributes. */
1403 .help = "specify a group",
1404 .next = NEXT(next_vc_attr, NEXT_ENTRY(GROUP_ID)),
1405 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, group)),
1410 .help = "specify a priority level",
1411 .next = NEXT(next_vc_attr, NEXT_ENTRY(PRIORITY_LEVEL)),
1412 .args = ARGS(ARGS_ENTRY(struct rte_flow_attr, priority)),
1417 .help = "affect rule to ingress",
1418 .next = NEXT(next_vc_attr),
1423 .help = "affect rule to egress",
1424 .next = NEXT(next_vc_attr),
1429 .help = "apply rule directly to endpoints found in pattern",
1430 .next = NEXT(next_vc_attr),
1433 /* Validate/create pattern. */
1436 .help = "submit a list of pattern items",
1437 .next = NEXT(next_item),
1442 .help = "match value perfectly (with full bit-mask)",
1443 .call = parse_vc_spec,
1445 [ITEM_PARAM_SPEC] = {
1447 .help = "match value according to configured bit-mask",
1448 .call = parse_vc_spec,
1450 [ITEM_PARAM_LAST] = {
1452 .help = "specify upper bound to establish a range",
1453 .call = parse_vc_spec,
1455 [ITEM_PARAM_MASK] = {
1457 .help = "specify bit-mask with relevant bits set to one",
1458 .call = parse_vc_spec,
1460 [ITEM_PARAM_PREFIX] = {
1462 .help = "generate bit-mask from a prefix length",
1463 .call = parse_vc_spec,
1467 .help = "specify next pattern item",
1468 .next = NEXT(next_item),
1472 .help = "end list of pattern items",
1473 .priv = PRIV_ITEM(END, 0),
1474 .next = NEXT(NEXT_ENTRY(ACTIONS)),
1479 .help = "no-op pattern item",
1480 .priv = PRIV_ITEM(VOID, 0),
1481 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1486 .help = "perform actions when pattern does not match",
1487 .priv = PRIV_ITEM(INVERT, 0),
1488 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1493 .help = "match any protocol for the current layer",
1494 .priv = PRIV_ITEM(ANY, sizeof(struct rte_flow_item_any)),
1495 .next = NEXT(item_any),
1500 .help = "number of layers covered",
1501 .next = NEXT(item_any, NEXT_ENTRY(UNSIGNED), item_param),
1502 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_any, num)),
1506 .help = "match traffic from/to the physical function",
1507 .priv = PRIV_ITEM(PF, 0),
1508 .next = NEXT(NEXT_ENTRY(ITEM_NEXT)),
1513 .help = "match traffic from/to a virtual function ID",
1514 .priv = PRIV_ITEM(VF, sizeof(struct rte_flow_item_vf)),
1515 .next = NEXT(item_vf),
1521 .next = NEXT(item_vf, NEXT_ENTRY(UNSIGNED), item_param),
1522 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_vf, id)),
1526 .help = "match traffic from/to a specific physical port",
1527 .priv = PRIV_ITEM(PHY_PORT,
1528 sizeof(struct rte_flow_item_phy_port)),
1529 .next = NEXT(item_phy_port),
1532 [ITEM_PHY_PORT_INDEX] = {
1534 .help = "physical port index",
1535 .next = NEXT(item_phy_port, NEXT_ENTRY(UNSIGNED), item_param),
1536 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_phy_port, index)),
1540 .help = "match traffic from/to a given DPDK port ID",
1541 .priv = PRIV_ITEM(PORT_ID,
1542 sizeof(struct rte_flow_item_port_id)),
1543 .next = NEXT(item_port_id),
1546 [ITEM_PORT_ID_ID] = {
1548 .help = "DPDK port ID",
1549 .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param),
1550 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)),
1554 .help = "match traffic against value set in previously matched rule",
1555 .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
1556 .next = NEXT(item_mark),
1561 .help = "Integer value to match against",
1562 .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param),
1563 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)),
1567 .help = "match an arbitrary byte string",
1568 .priv = PRIV_ITEM(RAW, ITEM_RAW_SIZE),
1569 .next = NEXT(item_raw),
1572 [ITEM_RAW_RELATIVE] = {
1574 .help = "look for pattern after the previous item",
1575 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1576 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1579 [ITEM_RAW_SEARCH] = {
1581 .help = "search pattern from offset (see also limit)",
1582 .next = NEXT(item_raw, NEXT_ENTRY(BOOLEAN), item_param),
1583 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_item_raw,
1586 [ITEM_RAW_OFFSET] = {
1588 .help = "absolute or relative offset for pattern",
1589 .next = NEXT(item_raw, NEXT_ENTRY(INTEGER), item_param),
1590 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, offset)),
1592 [ITEM_RAW_LIMIT] = {
1594 .help = "search area limit for start of pattern",
1595 .next = NEXT(item_raw, NEXT_ENTRY(UNSIGNED), item_param),
1596 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, limit)),
1598 [ITEM_RAW_PATTERN] = {
1600 .help = "byte string to look for",
1601 .next = NEXT(item_raw,
1603 NEXT_ENTRY(ITEM_PARAM_IS,
1606 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_raw, pattern),
1607 ARGS_ENTRY(struct rte_flow_item_raw, length),
1608 ARGS_ENTRY_ARB(sizeof(struct rte_flow_item_raw),
1609 ITEM_RAW_PATTERN_SIZE)),
1613 .help = "match Ethernet header",
1614 .priv = PRIV_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
1615 .next = NEXT(item_eth),
1620 .help = "destination MAC",
1621 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1622 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, dst)),
1626 .help = "source MAC",
1627 .next = NEXT(item_eth, NEXT_ENTRY(MAC_ADDR), item_param),
1628 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, src)),
1632 .help = "EtherType",
1633 .next = NEXT(item_eth, NEXT_ENTRY(UNSIGNED), item_param),
1634 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_eth, type)),
1638 .help = "match 802.1Q/ad VLAN tag",
1639 .priv = PRIV_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
1640 .next = NEXT(item_vlan),
1645 .help = "tag control information",
1646 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1647 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan, tci)),
1651 .help = "priority code point",
1652 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1653 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1658 .help = "drop eligible indicator",
1659 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1660 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1665 .help = "VLAN identifier",
1666 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1667 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_vlan,
1670 [ITEM_VLAN_INNER_TYPE] = {
1671 .name = "inner_type",
1672 .help = "inner EtherType",
1673 .next = NEXT(item_vlan, NEXT_ENTRY(UNSIGNED), item_param),
1674 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vlan,
1679 .help = "match IPv4 header",
1680 .priv = PRIV_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
1681 .next = NEXT(item_ipv4),
1686 .help = "type of service",
1687 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1688 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1689 hdr.type_of_service)),
1693 .help = "time to live",
1694 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1695 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1698 [ITEM_IPV4_PROTO] = {
1700 .help = "next protocol ID",
1701 .next = NEXT(item_ipv4, NEXT_ENTRY(UNSIGNED), item_param),
1702 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1703 hdr.next_proto_id)),
1707 .help = "source address",
1708 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1709 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1714 .help = "destination address",
1715 .next = NEXT(item_ipv4, NEXT_ENTRY(IPV4_ADDR), item_param),
1716 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv4,
1721 .help = "match IPv6 header",
1722 .priv = PRIV_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
1723 .next = NEXT(item_ipv6),
1728 .help = "traffic class",
1729 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1730 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1732 "\x0f\xf0\x00\x00")),
1734 [ITEM_IPV6_FLOW] = {
1736 .help = "flow label",
1737 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1738 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_ipv6,
1740 "\x00\x0f\xff\xff")),
1742 [ITEM_IPV6_PROTO] = {
1744 .help = "protocol (next header)",
1745 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1746 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1751 .help = "hop limit",
1752 .next = NEXT(item_ipv6, NEXT_ENTRY(UNSIGNED), item_param),
1753 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1758 .help = "source address",
1759 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1760 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1765 .help = "destination address",
1766 .next = NEXT(item_ipv6, NEXT_ENTRY(IPV6_ADDR), item_param),
1767 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6,
1772 .help = "match ICMP header",
1773 .priv = PRIV_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
1774 .next = NEXT(item_icmp),
1777 [ITEM_ICMP_TYPE] = {
1779 .help = "ICMP packet type",
1780 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1781 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1784 [ITEM_ICMP_CODE] = {
1786 .help = "ICMP packet code",
1787 .next = NEXT(item_icmp, NEXT_ENTRY(UNSIGNED), item_param),
1788 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp,
1793 .help = "match UDP header",
1794 .priv = PRIV_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
1795 .next = NEXT(item_udp),
1800 .help = "UDP source port",
1801 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1802 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1807 .help = "UDP destination port",
1808 .next = NEXT(item_udp, NEXT_ENTRY(UNSIGNED), item_param),
1809 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_udp,
1814 .help = "match TCP header",
1815 .priv = PRIV_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
1816 .next = NEXT(item_tcp),
1821 .help = "TCP source port",
1822 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1823 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1828 .help = "TCP destination port",
1829 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1830 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1833 [ITEM_TCP_FLAGS] = {
1835 .help = "TCP flags",
1836 .next = NEXT(item_tcp, NEXT_ENTRY(UNSIGNED), item_param),
1837 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_tcp,
1842 .help = "match SCTP header",
1843 .priv = PRIV_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
1844 .next = NEXT(item_sctp),
1849 .help = "SCTP source port",
1850 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1851 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1856 .help = "SCTP destination port",
1857 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1858 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1863 .help = "validation tag",
1864 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1865 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1868 [ITEM_SCTP_CKSUM] = {
1871 .next = NEXT(item_sctp, NEXT_ENTRY(UNSIGNED), item_param),
1872 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_sctp,
1877 .help = "match VXLAN header",
1878 .priv = PRIV_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
1879 .next = NEXT(item_vxlan),
1882 [ITEM_VXLAN_VNI] = {
1884 .help = "VXLAN identifier",
1885 .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param),
1886 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)),
1890 .help = "match E-Tag header",
1891 .priv = PRIV_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
1892 .next = NEXT(item_e_tag),
1895 [ITEM_E_TAG_GRP_ECID_B] = {
1896 .name = "grp_ecid_b",
1897 .help = "GRP and E-CID base",
1898 .next = NEXT(item_e_tag, NEXT_ENTRY(UNSIGNED), item_param),
1899 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_e_tag,
1905 .help = "match NVGRE header",
1906 .priv = PRIV_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
1907 .next = NEXT(item_nvgre),
1910 [ITEM_NVGRE_TNI] = {
1912 .help = "virtual subnet ID",
1913 .next = NEXT(item_nvgre, NEXT_ENTRY(UNSIGNED), item_param),
1914 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_nvgre, tni)),
1918 .help = "match MPLS header",
1919 .priv = PRIV_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
1920 .next = NEXT(item_mpls),
1923 [ITEM_MPLS_LABEL] = {
1925 .help = "MPLS label",
1926 .next = NEXT(item_mpls, NEXT_ENTRY(UNSIGNED), item_param),
1927 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_mpls,
1933 .help = "match GRE header",
1934 .priv = PRIV_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
1935 .next = NEXT(item_gre),
1938 [ITEM_GRE_PROTO] = {
1940 .help = "GRE protocol type",
1941 .next = NEXT(item_gre, NEXT_ENTRY(UNSIGNED), item_param),
1942 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gre,
1947 .help = "fuzzy pattern match, expect faster than default",
1948 .priv = PRIV_ITEM(FUZZY,
1949 sizeof(struct rte_flow_item_fuzzy)),
1950 .next = NEXT(item_fuzzy),
1953 [ITEM_FUZZY_THRESH] = {
1955 .help = "match accuracy threshold",
1956 .next = NEXT(item_fuzzy, NEXT_ENTRY(UNSIGNED), item_param),
1957 .args = ARGS(ARGS_ENTRY(struct rte_flow_item_fuzzy,
1962 .help = "match GTP header",
1963 .priv = PRIV_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
1964 .next = NEXT(item_gtp),
1969 .help = "tunnel endpoint identifier",
1970 .next = NEXT(item_gtp, NEXT_ENTRY(UNSIGNED), item_param),
1971 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_gtp, teid)),
1975 .help = "match GTP header",
1976 .priv = PRIV_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
1977 .next = NEXT(item_gtp),
1982 .help = "match GTP header",
1983 .priv = PRIV_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
1984 .next = NEXT(item_gtp),
1989 .help = "match GENEVE header",
1990 .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
1991 .next = NEXT(item_geneve),
1994 [ITEM_GENEVE_VNI] = {
1996 .help = "virtual network identifier",
1997 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
1998 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)),
2000 [ITEM_GENEVE_PROTO] = {
2002 .help = "GENEVE protocol type",
2003 .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param),
2004 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve,
2007 [ITEM_VXLAN_GPE] = {
2008 .name = "vxlan-gpe",
2009 .help = "match VXLAN-GPE header",
2010 .priv = PRIV_ITEM(VXLAN_GPE,
2011 sizeof(struct rte_flow_item_vxlan_gpe)),
2012 .next = NEXT(item_vxlan_gpe),
2015 [ITEM_VXLAN_GPE_VNI] = {
2017 .help = "VXLAN-GPE identifier",
2018 .next = NEXT(item_vxlan_gpe, NEXT_ENTRY(UNSIGNED), item_param),
2019 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe,
2022 [ITEM_ARP_ETH_IPV4] = {
2023 .name = "arp_eth_ipv4",
2024 .help = "match ARP header for Ethernet/IPv4",
2025 .priv = PRIV_ITEM(ARP_ETH_IPV4,
2026 sizeof(struct rte_flow_item_arp_eth_ipv4)),
2027 .next = NEXT(item_arp_eth_ipv4),
2030 [ITEM_ARP_ETH_IPV4_SHA] = {
2032 .help = "sender hardware address",
2033 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2035 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2038 [ITEM_ARP_ETH_IPV4_SPA] = {
2040 .help = "sender IPv4 address",
2041 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2043 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2046 [ITEM_ARP_ETH_IPV4_THA] = {
2048 .help = "target hardware address",
2049 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR),
2051 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2054 [ITEM_ARP_ETH_IPV4_TPA] = {
2056 .help = "target IPv4 address",
2057 .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR),
2059 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4,
2064 .help = "match presence of any IPv6 extension header",
2065 .priv = PRIV_ITEM(IPV6_EXT,
2066 sizeof(struct rte_flow_item_ipv6_ext)),
2067 .next = NEXT(item_ipv6_ext),
2070 [ITEM_IPV6_EXT_NEXT_HDR] = {
2072 .help = "next header",
2073 .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param),
2074 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext,
2079 .help = "match any ICMPv6 header",
2080 .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
2081 .next = NEXT(item_icmp6),
2084 [ITEM_ICMP6_TYPE] = {
2086 .help = "ICMPv6 type",
2087 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2088 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2091 [ITEM_ICMP6_CODE] = {
2093 .help = "ICMPv6 code",
2094 .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param),
2095 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6,
2098 [ITEM_ICMP6_ND_NS] = {
2099 .name = "icmp6_nd_ns",
2100 .help = "match ICMPv6 neighbor discovery solicitation",
2101 .priv = PRIV_ITEM(ICMP6_ND_NS,
2102 sizeof(struct rte_flow_item_icmp6_nd_ns)),
2103 .next = NEXT(item_icmp6_nd_ns),
2106 [ITEM_ICMP6_ND_NS_TARGET_ADDR] = {
2107 .name = "target_addr",
2108 .help = "target address",
2109 .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR),
2111 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns,
2114 [ITEM_ICMP6_ND_NA] = {
2115 .name = "icmp6_nd_na",
2116 .help = "match ICMPv6 neighbor discovery advertisement",
2117 .priv = PRIV_ITEM(ICMP6_ND_NA,
2118 sizeof(struct rte_flow_item_icmp6_nd_na)),
2119 .next = NEXT(item_icmp6_nd_na),
2122 [ITEM_ICMP6_ND_NA_TARGET_ADDR] = {
2123 .name = "target_addr",
2124 .help = "target address",
2125 .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR),
2127 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na,
2130 [ITEM_ICMP6_ND_OPT] = {
2131 .name = "icmp6_nd_opt",
2132 .help = "match presence of any ICMPv6 neighbor discovery"
2134 .priv = PRIV_ITEM(ICMP6_ND_OPT,
2135 sizeof(struct rte_flow_item_icmp6_nd_opt)),
2136 .next = NEXT(item_icmp6_nd_opt),
2139 [ITEM_ICMP6_ND_OPT_TYPE] = {
2141 .help = "ND option type",
2142 .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED),
2144 .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt,
2147 [ITEM_ICMP6_ND_OPT_SLA_ETH] = {
2148 .name = "icmp6_nd_opt_sla_eth",
2149 .help = "match ICMPv6 neighbor discovery source Ethernet"
2150 " link-layer address option",
2152 (ICMP6_ND_OPT_SLA_ETH,
2153 sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
2154 .next = NEXT(item_icmp6_nd_opt_sla_eth),
2157 [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = {
2159 .help = "source Ethernet LLA",
2160 .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR),
2162 .args = ARGS(ARGS_ENTRY_HTON
2163 (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)),
2165 [ITEM_ICMP6_ND_OPT_TLA_ETH] = {
2166 .name = "icmp6_nd_opt_tla_eth",
2167 .help = "match ICMPv6 neighbor discovery target Ethernet"
2168 " link-layer address option",
2170 (ICMP6_ND_OPT_TLA_ETH,
2171 sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
2172 .next = NEXT(item_icmp6_nd_opt_tla_eth),
2175 [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = {
2177 .help = "target Ethernet LLA",
2178 .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR),
2180 .args = ARGS(ARGS_ENTRY_HTON
2181 (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)),
2185 .help = "match metadata header",
2186 .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)),
2187 .next = NEXT(item_meta),
2190 [ITEM_META_DATA] = {
2192 .help = "metadata value",
2193 .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param),
2194 .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta,
2195 data, "\xff\xff\xff\xff")),
2198 /* Validate/create actions. */
2201 .help = "submit a list of associated actions",
2202 .next = NEXT(next_action),
2207 .help = "specify next action",
2208 .next = NEXT(next_action),
2212 .help = "end list of actions",
2213 .priv = PRIV_ACTION(END, 0),
2218 .help = "no-op action",
2219 .priv = PRIV_ACTION(VOID, 0),
2220 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2223 [ACTION_PASSTHRU] = {
2225 .help = "let subsequent rule process matched packets",
2226 .priv = PRIV_ACTION(PASSTHRU, 0),
2227 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2232 .help = "redirect traffic to a given group",
2233 .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
2234 .next = NEXT(action_jump),
2237 [ACTION_JUMP_GROUP] = {
2239 .help = "group to redirect traffic to",
2240 .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)),
2241 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)),
2242 .call = parse_vc_conf,
2246 .help = "attach 32 bit value to packets",
2247 .priv = PRIV_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
2248 .next = NEXT(action_mark),
2251 [ACTION_MARK_ID] = {
2253 .help = "32 bit value to return with packets",
2254 .next = NEXT(action_mark, NEXT_ENTRY(UNSIGNED)),
2255 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_mark, id)),
2256 .call = parse_vc_conf,
2260 .help = "flag packets",
2261 .priv = PRIV_ACTION(FLAG, 0),
2262 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2267 .help = "assign packets to a given queue index",
2268 .priv = PRIV_ACTION(QUEUE,
2269 sizeof(struct rte_flow_action_queue)),
2270 .next = NEXT(action_queue),
2273 [ACTION_QUEUE_INDEX] = {
2275 .help = "queue index to use",
2276 .next = NEXT(action_queue, NEXT_ENTRY(UNSIGNED)),
2277 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_queue, index)),
2278 .call = parse_vc_conf,
2282 .help = "drop packets (note: passthru has priority)",
2283 .priv = PRIV_ACTION(DROP, 0),
2284 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2289 .help = "enable counters for this rule",
2290 .priv = PRIV_ACTION(COUNT,
2291 sizeof(struct rte_flow_action_count)),
2292 .next = NEXT(action_count),
2295 [ACTION_COUNT_ID] = {
2296 .name = "identifier",
2297 .help = "counter identifier to use",
2298 .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)),
2299 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)),
2300 .call = parse_vc_conf,
2302 [ACTION_COUNT_SHARED] = {
2304 .help = "shared counter",
2305 .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)),
2306 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count,
2308 .call = parse_vc_conf,
2312 .help = "spread packets among several queues",
2313 .priv = PRIV_ACTION(RSS, sizeof(struct action_rss_data)),
2314 .next = NEXT(action_rss),
2315 .call = parse_vc_action_rss,
2317 [ACTION_RSS_FUNC] = {
2319 .help = "RSS hash function to apply",
2320 .next = NEXT(action_rss,
2321 NEXT_ENTRY(ACTION_RSS_FUNC_DEFAULT,
2322 ACTION_RSS_FUNC_TOEPLITZ,
2323 ACTION_RSS_FUNC_SIMPLE_XOR)),
2325 [ACTION_RSS_FUNC_DEFAULT] = {
2327 .help = "default hash function",
2328 .call = parse_vc_action_rss_func,
2330 [ACTION_RSS_FUNC_TOEPLITZ] = {
2332 .help = "Toeplitz hash function",
2333 .call = parse_vc_action_rss_func,
2335 [ACTION_RSS_FUNC_SIMPLE_XOR] = {
2336 .name = "simple_xor",
2337 .help = "simple XOR hash function",
2338 .call = parse_vc_action_rss_func,
2340 [ACTION_RSS_LEVEL] = {
2342 .help = "encapsulation level for \"types\"",
2343 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2344 .args = ARGS(ARGS_ENTRY_ARB
2345 (offsetof(struct action_rss_data, conf) +
2346 offsetof(struct rte_flow_action_rss, level),
2347 sizeof(((struct rte_flow_action_rss *)0)->
2350 [ACTION_RSS_TYPES] = {
2352 .help = "specific RSS hash types",
2353 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_TYPE)),
2355 [ACTION_RSS_TYPE] = {
2357 .help = "RSS hash type",
2358 .call = parse_vc_action_rss_type,
2359 .comp = comp_vc_action_rss_type,
2361 [ACTION_RSS_KEY] = {
2363 .help = "RSS hash key",
2364 .next = NEXT(action_rss, NEXT_ENTRY(HEX)),
2365 .args = ARGS(ARGS_ENTRY_ARB(0, 0),
2367 (offsetof(struct action_rss_data, conf) +
2368 offsetof(struct rte_flow_action_rss, key_len),
2369 sizeof(((struct rte_flow_action_rss *)0)->
2371 ARGS_ENTRY(struct action_rss_data, key)),
2373 [ACTION_RSS_KEY_LEN] = {
2375 .help = "RSS hash key length in bytes",
2376 .next = NEXT(action_rss, NEXT_ENTRY(UNSIGNED)),
2377 .args = ARGS(ARGS_ENTRY_ARB_BOUNDED
2378 (offsetof(struct action_rss_data, conf) +
2379 offsetof(struct rte_flow_action_rss, key_len),
2380 sizeof(((struct rte_flow_action_rss *)0)->
2383 RSS_HASH_KEY_LENGTH)),
2385 [ACTION_RSS_QUEUES] = {
2387 .help = "queue indices to use",
2388 .next = NEXT(action_rss, NEXT_ENTRY(ACTION_RSS_QUEUE)),
2389 .call = parse_vc_conf,
2391 [ACTION_RSS_QUEUE] = {
2393 .help = "queue index",
2394 .call = parse_vc_action_rss_queue,
2395 .comp = comp_vc_action_rss_queue,
2399 .help = "direct traffic to physical function",
2400 .priv = PRIV_ACTION(PF, 0),
2401 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2406 .help = "direct traffic to a virtual function ID",
2407 .priv = PRIV_ACTION(VF, sizeof(struct rte_flow_action_vf)),
2408 .next = NEXT(action_vf),
2411 [ACTION_VF_ORIGINAL] = {
2413 .help = "use original VF ID if possible",
2414 .next = NEXT(action_vf, NEXT_ENTRY(BOOLEAN)),
2415 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_vf,
2417 .call = parse_vc_conf,
2422 .next = NEXT(action_vf, NEXT_ENTRY(UNSIGNED)),
2423 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_vf, id)),
2424 .call = parse_vc_conf,
2426 [ACTION_PHY_PORT] = {
2428 .help = "direct packets to physical port index",
2429 .priv = PRIV_ACTION(PHY_PORT,
2430 sizeof(struct rte_flow_action_phy_port)),
2431 .next = NEXT(action_phy_port),
2434 [ACTION_PHY_PORT_ORIGINAL] = {
2436 .help = "use original port index if possible",
2437 .next = NEXT(action_phy_port, NEXT_ENTRY(BOOLEAN)),
2438 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_phy_port,
2440 .call = parse_vc_conf,
2442 [ACTION_PHY_PORT_INDEX] = {
2444 .help = "physical port index",
2445 .next = NEXT(action_phy_port, NEXT_ENTRY(UNSIGNED)),
2446 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_phy_port,
2448 .call = parse_vc_conf,
2450 [ACTION_PORT_ID] = {
2452 .help = "direct matching traffic to a given DPDK port ID",
2453 .priv = PRIV_ACTION(PORT_ID,
2454 sizeof(struct rte_flow_action_port_id)),
2455 .next = NEXT(action_port_id),
2458 [ACTION_PORT_ID_ORIGINAL] = {
2460 .help = "use original DPDK port ID if possible",
2461 .next = NEXT(action_port_id, NEXT_ENTRY(BOOLEAN)),
2462 .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_port_id,
2464 .call = parse_vc_conf,
2466 [ACTION_PORT_ID_ID] = {
2468 .help = "DPDK port ID",
2469 .next = NEXT(action_port_id, NEXT_ENTRY(UNSIGNED)),
2470 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_port_id, id)),
2471 .call = parse_vc_conf,
2475 .help = "meter the directed packets at given id",
2476 .priv = PRIV_ACTION(METER,
2477 sizeof(struct rte_flow_action_meter)),
2478 .next = NEXT(action_meter),
2481 [ACTION_METER_ID] = {
2483 .help = "meter id to use",
2484 .next = NEXT(action_meter, NEXT_ENTRY(UNSIGNED)),
2485 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)),
2486 .call = parse_vc_conf,
2488 [ACTION_OF_SET_MPLS_TTL] = {
2489 .name = "of_set_mpls_ttl",
2490 .help = "OpenFlow's OFPAT_SET_MPLS_TTL",
2493 sizeof(struct rte_flow_action_of_set_mpls_ttl)),
2494 .next = NEXT(action_of_set_mpls_ttl),
2497 [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = {
2500 .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)),
2501 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl,
2503 .call = parse_vc_conf,
2505 [ACTION_OF_DEC_MPLS_TTL] = {
2506 .name = "of_dec_mpls_ttl",
2507 .help = "OpenFlow's OFPAT_DEC_MPLS_TTL",
2508 .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0),
2509 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2512 [ACTION_OF_SET_NW_TTL] = {
2513 .name = "of_set_nw_ttl",
2514 .help = "OpenFlow's OFPAT_SET_NW_TTL",
2517 sizeof(struct rte_flow_action_of_set_nw_ttl)),
2518 .next = NEXT(action_of_set_nw_ttl),
2521 [ACTION_OF_SET_NW_TTL_NW_TTL] = {
2524 .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)),
2525 .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl,
2527 .call = parse_vc_conf,
2529 [ACTION_OF_DEC_NW_TTL] = {
2530 .name = "of_dec_nw_ttl",
2531 .help = "OpenFlow's OFPAT_DEC_NW_TTL",
2532 .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0),
2533 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2536 [ACTION_OF_COPY_TTL_OUT] = {
2537 .name = "of_copy_ttl_out",
2538 .help = "OpenFlow's OFPAT_COPY_TTL_OUT",
2539 .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0),
2540 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2543 [ACTION_OF_COPY_TTL_IN] = {
2544 .name = "of_copy_ttl_in",
2545 .help = "OpenFlow's OFPAT_COPY_TTL_IN",
2546 .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0),
2547 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2550 [ACTION_OF_POP_VLAN] = {
2551 .name = "of_pop_vlan",
2552 .help = "OpenFlow's OFPAT_POP_VLAN",
2553 .priv = PRIV_ACTION(OF_POP_VLAN, 0),
2554 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2557 [ACTION_OF_PUSH_VLAN] = {
2558 .name = "of_push_vlan",
2559 .help = "OpenFlow's OFPAT_PUSH_VLAN",
2562 sizeof(struct rte_flow_action_of_push_vlan)),
2563 .next = NEXT(action_of_push_vlan),
2566 [ACTION_OF_PUSH_VLAN_ETHERTYPE] = {
2567 .name = "ethertype",
2568 .help = "EtherType",
2569 .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)),
2570 .args = ARGS(ARGS_ENTRY_HTON
2571 (struct rte_flow_action_of_push_vlan,
2573 .call = parse_vc_conf,
2575 [ACTION_OF_SET_VLAN_VID] = {
2576 .name = "of_set_vlan_vid",
2577 .help = "OpenFlow's OFPAT_SET_VLAN_VID",
2580 sizeof(struct rte_flow_action_of_set_vlan_vid)),
2581 .next = NEXT(action_of_set_vlan_vid),
2584 [ACTION_OF_SET_VLAN_VID_VLAN_VID] = {
2587 .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)),
2588 .args = ARGS(ARGS_ENTRY_HTON
2589 (struct rte_flow_action_of_set_vlan_vid,
2591 .call = parse_vc_conf,
2593 [ACTION_OF_SET_VLAN_PCP] = {
2594 .name = "of_set_vlan_pcp",
2595 .help = "OpenFlow's OFPAT_SET_VLAN_PCP",
2598 sizeof(struct rte_flow_action_of_set_vlan_pcp)),
2599 .next = NEXT(action_of_set_vlan_pcp),
2602 [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = {
2604 .help = "VLAN priority",
2605 .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)),
2606 .args = ARGS(ARGS_ENTRY_HTON
2607 (struct rte_flow_action_of_set_vlan_pcp,
2609 .call = parse_vc_conf,
2611 [ACTION_OF_POP_MPLS] = {
2612 .name = "of_pop_mpls",
2613 .help = "OpenFlow's OFPAT_POP_MPLS",
2614 .priv = PRIV_ACTION(OF_POP_MPLS,
2615 sizeof(struct rte_flow_action_of_pop_mpls)),
2616 .next = NEXT(action_of_pop_mpls),
2619 [ACTION_OF_POP_MPLS_ETHERTYPE] = {
2620 .name = "ethertype",
2621 .help = "EtherType",
2622 .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)),
2623 .args = ARGS(ARGS_ENTRY_HTON
2624 (struct rte_flow_action_of_pop_mpls,
2626 .call = parse_vc_conf,
2628 [ACTION_OF_PUSH_MPLS] = {
2629 .name = "of_push_mpls",
2630 .help = "OpenFlow's OFPAT_PUSH_MPLS",
2633 sizeof(struct rte_flow_action_of_push_mpls)),
2634 .next = NEXT(action_of_push_mpls),
2637 [ACTION_OF_PUSH_MPLS_ETHERTYPE] = {
2638 .name = "ethertype",
2639 .help = "EtherType",
2640 .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)),
2641 .args = ARGS(ARGS_ENTRY_HTON
2642 (struct rte_flow_action_of_push_mpls,
2644 .call = parse_vc_conf,
2646 [ACTION_VXLAN_ENCAP] = {
2647 .name = "vxlan_encap",
2648 .help = "VXLAN encapsulation, uses configuration set by \"set"
2650 .priv = PRIV_ACTION(VXLAN_ENCAP,
2651 sizeof(struct action_vxlan_encap_data)),
2652 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2653 .call = parse_vc_action_vxlan_encap,
2655 [ACTION_VXLAN_DECAP] = {
2656 .name = "vxlan_decap",
2657 .help = "Performs a decapsulation action by stripping all"
2658 " headers of the VXLAN tunnel network overlay from the"
2660 .priv = PRIV_ACTION(VXLAN_DECAP, 0),
2661 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2664 [ACTION_NVGRE_ENCAP] = {
2665 .name = "nvgre_encap",
2666 .help = "NVGRE encapsulation, uses configuration set by \"set"
2668 .priv = PRIV_ACTION(NVGRE_ENCAP,
2669 sizeof(struct action_nvgre_encap_data)),
2670 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2671 .call = parse_vc_action_nvgre_encap,
2673 [ACTION_NVGRE_DECAP] = {
2674 .name = "nvgre_decap",
2675 .help = "Performs a decapsulation action by stripping all"
2676 " headers of the NVGRE tunnel network overlay from the"
2678 .priv = PRIV_ACTION(NVGRE_DECAP, 0),
2679 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2682 [ACTION_L2_ENCAP] = {
2684 .help = "l2 encap, uses configuration set by"
2685 " \"set l2_encap\"",
2686 .priv = PRIV_ACTION(RAW_ENCAP,
2687 sizeof(struct action_raw_encap_data)),
2688 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2689 .call = parse_vc_action_l2_encap,
2691 [ACTION_L2_DECAP] = {
2693 .help = "l2 decap, uses configuration set by"
2694 " \"set l2_decap\"",
2695 .priv = PRIV_ACTION(RAW_DECAP,
2696 sizeof(struct action_raw_decap_data)),
2697 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2698 .call = parse_vc_action_l2_decap,
2700 [ACTION_MPLSOGRE_ENCAP] = {
2701 .name = "mplsogre_encap",
2702 .help = "mplsogre encapsulation, uses configuration set by"
2703 " \"set mplsogre_encap\"",
2704 .priv = PRIV_ACTION(RAW_ENCAP,
2705 sizeof(struct action_raw_encap_data)),
2706 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2707 .call = parse_vc_action_mplsogre_encap,
2709 [ACTION_MPLSOGRE_DECAP] = {
2710 .name = "mplsogre_decap",
2711 .help = "mplsogre decapsulation, uses configuration set by"
2712 " \"set mplsogre_decap\"",
2713 .priv = PRIV_ACTION(RAW_DECAP,
2714 sizeof(struct action_raw_decap_data)),
2715 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2716 .call = parse_vc_action_mplsogre_decap,
2718 [ACTION_MPLSOUDP_ENCAP] = {
2719 .name = "mplsoudp_encap",
2720 .help = "mplsoudp encapsulation, uses configuration set by"
2721 " \"set mplsoudp_encap\"",
2722 .priv = PRIV_ACTION(RAW_ENCAP,
2723 sizeof(struct action_raw_encap_data)),
2724 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2725 .call = parse_vc_action_mplsoudp_encap,
2727 [ACTION_MPLSOUDP_DECAP] = {
2728 .name = "mplsoudp_decap",
2729 .help = "mplsoudp decapsulation, uses configuration set by"
2730 " \"set mplsoudp_decap\"",
2731 .priv = PRIV_ACTION(RAW_DECAP,
2732 sizeof(struct action_raw_decap_data)),
2733 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2734 .call = parse_vc_action_mplsoudp_decap,
2736 [ACTION_SET_IPV4_SRC] = {
2737 .name = "set_ipv4_src",
2738 .help = "Set a new IPv4 source address in the outermost"
2740 .priv = PRIV_ACTION(SET_IPV4_SRC,
2741 sizeof(struct rte_flow_action_set_ipv4)),
2742 .next = NEXT(action_set_ipv4_src),
2745 [ACTION_SET_IPV4_SRC_IPV4_SRC] = {
2746 .name = "ipv4_addr",
2747 .help = "new IPv4 source address to set",
2748 .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)),
2749 .args = ARGS(ARGS_ENTRY_HTON
2750 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2751 .call = parse_vc_conf,
2753 [ACTION_SET_IPV4_DST] = {
2754 .name = "set_ipv4_dst",
2755 .help = "Set a new IPv4 destination address in the outermost"
2757 .priv = PRIV_ACTION(SET_IPV4_DST,
2758 sizeof(struct rte_flow_action_set_ipv4)),
2759 .next = NEXT(action_set_ipv4_dst),
2762 [ACTION_SET_IPV4_DST_IPV4_DST] = {
2763 .name = "ipv4_addr",
2764 .help = "new IPv4 destination address to set",
2765 .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)),
2766 .args = ARGS(ARGS_ENTRY_HTON
2767 (struct rte_flow_action_set_ipv4, ipv4_addr)),
2768 .call = parse_vc_conf,
2770 [ACTION_SET_IPV6_SRC] = {
2771 .name = "set_ipv6_src",
2772 .help = "Set a new IPv6 source address in the outermost"
2774 .priv = PRIV_ACTION(SET_IPV6_SRC,
2775 sizeof(struct rte_flow_action_set_ipv6)),
2776 .next = NEXT(action_set_ipv6_src),
2779 [ACTION_SET_IPV6_SRC_IPV6_SRC] = {
2780 .name = "ipv6_addr",
2781 .help = "new IPv6 source address to set",
2782 .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)),
2783 .args = ARGS(ARGS_ENTRY_HTON
2784 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2785 .call = parse_vc_conf,
2787 [ACTION_SET_IPV6_DST] = {
2788 .name = "set_ipv6_dst",
2789 .help = "Set a new IPv6 destination address in the outermost"
2791 .priv = PRIV_ACTION(SET_IPV6_DST,
2792 sizeof(struct rte_flow_action_set_ipv6)),
2793 .next = NEXT(action_set_ipv6_dst),
2796 [ACTION_SET_IPV6_DST_IPV6_DST] = {
2797 .name = "ipv6_addr",
2798 .help = "new IPv6 destination address to set",
2799 .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)),
2800 .args = ARGS(ARGS_ENTRY_HTON
2801 (struct rte_flow_action_set_ipv6, ipv6_addr)),
2802 .call = parse_vc_conf,
2804 [ACTION_SET_TP_SRC] = {
2805 .name = "set_tp_src",
2806 .help = "set a new source port number in the outermost"
2808 .priv = PRIV_ACTION(SET_TP_SRC,
2809 sizeof(struct rte_flow_action_set_tp)),
2810 .next = NEXT(action_set_tp_src),
2813 [ACTION_SET_TP_SRC_TP_SRC] = {
2815 .help = "new source port number to set",
2816 .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)),
2817 .args = ARGS(ARGS_ENTRY_HTON
2818 (struct rte_flow_action_set_tp, port)),
2819 .call = parse_vc_conf,
2821 [ACTION_SET_TP_DST] = {
2822 .name = "set_tp_dst",
2823 .help = "set a new destination port number in the outermost"
2825 .priv = PRIV_ACTION(SET_TP_DST,
2826 sizeof(struct rte_flow_action_set_tp)),
2827 .next = NEXT(action_set_tp_dst),
2830 [ACTION_SET_TP_DST_TP_DST] = {
2832 .help = "new destination port number to set",
2833 .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)),
2834 .args = ARGS(ARGS_ENTRY_HTON
2835 (struct rte_flow_action_set_tp, port)),
2836 .call = parse_vc_conf,
2838 [ACTION_MAC_SWAP] = {
2840 .help = "Swap the source and destination MAC addresses"
2841 " in the outermost Ethernet header",
2842 .priv = PRIV_ACTION(MAC_SWAP, 0),
2843 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2846 [ACTION_DEC_TTL] = {
2848 .help = "decrease network TTL if available",
2849 .priv = PRIV_ACTION(DEC_TTL, 0),
2850 .next = NEXT(NEXT_ENTRY(ACTION_NEXT)),
2853 [ACTION_SET_TTL] = {
2855 .help = "set ttl value",
2856 .priv = PRIV_ACTION(SET_TTL,
2857 sizeof(struct rte_flow_action_set_ttl)),
2858 .next = NEXT(action_set_ttl),
2861 [ACTION_SET_TTL_TTL] = {
2862 .name = "ttl_value",
2863 .help = "new ttl value to set",
2864 .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)),
2865 .args = ARGS(ARGS_ENTRY_HTON
2866 (struct rte_flow_action_set_ttl, ttl_value)),
2867 .call = parse_vc_conf,
2869 [ACTION_SET_MAC_SRC] = {
2870 .name = "set_mac_src",
2871 .help = "set source mac address",
2872 .priv = PRIV_ACTION(SET_MAC_SRC,
2873 sizeof(struct rte_flow_action_set_mac)),
2874 .next = NEXT(action_set_mac_src),
2877 [ACTION_SET_MAC_SRC_MAC_SRC] = {
2879 .help = "new source mac address",
2880 .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)),
2881 .args = ARGS(ARGS_ENTRY_HTON
2882 (struct rte_flow_action_set_mac, mac_addr)),
2883 .call = parse_vc_conf,
2885 [ACTION_SET_MAC_DST] = {
2886 .name = "set_mac_dst",
2887 .help = "set destination mac address",
2888 .priv = PRIV_ACTION(SET_MAC_DST,
2889 sizeof(struct rte_flow_action_set_mac)),
2890 .next = NEXT(action_set_mac_dst),
2893 [ACTION_SET_MAC_DST_MAC_DST] = {
2895 .help = "new destination mac address to set",
2896 .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)),
2897 .args = ARGS(ARGS_ENTRY_HTON
2898 (struct rte_flow_action_set_mac, mac_addr)),
2899 .call = parse_vc_conf,
2901 [ACTION_INC_TCP_SEQ] = {
2902 .name = "inc_tcp_seq",
2903 .help = "increase TCP sequence number",
2904 .priv = PRIV_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
2905 .next = NEXT(action_inc_tcp_seq),
2908 [ACTION_INC_TCP_SEQ_VALUE] = {
2910 .help = "the value to increase TCP sequence number by",
2911 .next = NEXT(action_inc_tcp_seq, NEXT_ENTRY(UNSIGNED)),
2912 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2913 .call = parse_vc_conf,
2915 [ACTION_DEC_TCP_SEQ] = {
2916 .name = "dec_tcp_seq",
2917 .help = "decrease TCP sequence number",
2918 .priv = PRIV_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
2919 .next = NEXT(action_dec_tcp_seq),
2922 [ACTION_DEC_TCP_SEQ_VALUE] = {
2924 .help = "the value to decrease TCP sequence number by",
2925 .next = NEXT(action_dec_tcp_seq, NEXT_ENTRY(UNSIGNED)),
2926 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2927 .call = parse_vc_conf,
2929 [ACTION_INC_TCP_ACK] = {
2930 .name = "inc_tcp_ack",
2931 .help = "increase TCP acknowledgment number",
2932 .priv = PRIV_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
2933 .next = NEXT(action_inc_tcp_ack),
2936 [ACTION_INC_TCP_ACK_VALUE] = {
2938 .help = "the value to increase TCP acknowledgment number by",
2939 .next = NEXT(action_inc_tcp_ack, NEXT_ENTRY(UNSIGNED)),
2940 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2941 .call = parse_vc_conf,
2943 [ACTION_DEC_TCP_ACK] = {
2944 .name = "dec_tcp_ack",
2945 .help = "decrease TCP acknowledgment number",
2946 .priv = PRIV_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
2947 .next = NEXT(action_dec_tcp_ack),
2950 [ACTION_DEC_TCP_ACK_VALUE] = {
2952 .help = "the value to decrease TCP acknowledgment number by",
2953 .next = NEXT(action_dec_tcp_ack, NEXT_ENTRY(UNSIGNED)),
2954 .args = ARGS(ARG_ENTRY_HTON(rte_be32_t)),
2955 .call = parse_vc_conf,
2959 /** Remove and return last entry from argument stack. */
2960 static const struct arg *
2961 pop_args(struct context *ctx)
2963 return ctx->args_num ? ctx->args[--ctx->args_num] : NULL;
2966 /** Add entry on top of the argument stack. */
2968 push_args(struct context *ctx, const struct arg *arg)
2970 if (ctx->args_num == CTX_STACK_SIZE)
2972 ctx->args[ctx->args_num++] = arg;
2976 /** Spread value into buffer according to bit-mask. */
2978 arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg)
2980 uint32_t i = arg->size;
2988 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
2997 unsigned int shift = 0;
2998 uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub);
3000 for (shift = 0; arg->mask[i] >> shift; ++shift) {
3001 if (!(arg->mask[i] & (1 << shift)))
3006 *buf &= ~(1 << shift);
3007 *buf |= (val & 1) << shift;
3015 /** Compare a string with a partial one of a given length. */
3017 strcmp_partial(const char *full, const char *partial, size_t partial_len)
3019 int r = strncmp(full, partial, partial_len);
3023 if (strlen(full) <= partial_len)
3025 return full[partial_len];
3029 * Parse a prefix length and generate a bit-mask.
3031 * Last argument (ctx->args) is retrieved to determine mask size, storage
3032 * location and whether the result must use network byte ordering.
3035 parse_prefix(struct context *ctx, const struct token *token,
3036 const char *str, unsigned int len,
3037 void *buf, unsigned int size)
3039 const struct arg *arg = pop_args(ctx);
3040 static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff";
3047 /* Argument is expected. */
3051 u = strtoumax(str, &end, 0);
3052 if (errno || (size_t)(end - str) != len)
3057 extra = arg_entry_bf_fill(NULL, 0, arg);
3066 if (!arg_entry_bf_fill(ctx->object, v, arg) ||
3067 !arg_entry_bf_fill(ctx->objmask, -1, arg))
3074 if (bytes > size || bytes + !!extra > size)
3078 buf = (uint8_t *)ctx->object + arg->offset;
3079 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
3081 memset((uint8_t *)buf + size - bytes, 0xff, bytes);
3082 memset(buf, 0x00, size - bytes);
3084 ((uint8_t *)buf)[size - bytes - 1] = conv[extra];
3088 memset(buf, 0xff, bytes);
3089 memset((uint8_t *)buf + bytes, 0x00, size - bytes);
3091 ((uint8_t *)buf)[bytes] = conv[extra];
3094 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
3097 push_args(ctx, arg);
3101 /** Default parsing function for token name matching. */
3103 parse_default(struct context *ctx, const struct token *token,
3104 const char *str, unsigned int len,
3105 void *buf, unsigned int size)
3110 if (strcmp_partial(token->name, str, len))
3115 /** Parse flow command, initialize output buffer for subsequent tokens. */
3117 parse_init(struct context *ctx, const struct token *token,
3118 const char *str, unsigned int len,
3119 void *buf, unsigned int size)
3121 struct buffer *out = buf;
3123 /* Token name must match. */
3124 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3126 /* Nothing else to do if there is no buffer. */
3129 /* Make sure buffer is large enough. */
3130 if (size < sizeof(*out))
3132 /* Initialize buffer. */
3133 memset(out, 0x00, sizeof(*out));
3134 memset((uint8_t *)out + sizeof(*out), 0x22, size - sizeof(*out));
3137 ctx->objmask = NULL;
3141 /** Parse tokens for validate/create commands. */
3143 parse_vc(struct context *ctx, const struct token *token,
3144 const char *str, unsigned int len,
3145 void *buf, unsigned int size)
3147 struct buffer *out = buf;
3151 /* Token name must match. */
3152 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3154 /* Nothing else to do if there is no buffer. */
3157 if (!out->command) {
3158 if (ctx->curr != VALIDATE && ctx->curr != CREATE)
3160 if (sizeof(*out) > size)
3162 out->command = ctx->curr;
3165 ctx->objmask = NULL;
3166 out->args.vc.data = (uint8_t *)out + size;
3170 ctx->object = &out->args.vc.attr;
3171 ctx->objmask = NULL;
3172 switch (ctx->curr) {
3177 out->args.vc.attr.ingress = 1;
3180 out->args.vc.attr.egress = 1;
3183 out->args.vc.attr.transfer = 1;
3186 out->args.vc.pattern =
3187 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
3189 ctx->object = out->args.vc.pattern;
3190 ctx->objmask = NULL;
3193 out->args.vc.actions =
3194 (void *)RTE_ALIGN_CEIL((uintptr_t)
3195 (out->args.vc.pattern +
3196 out->args.vc.pattern_n),
3198 ctx->object = out->args.vc.actions;
3199 ctx->objmask = NULL;
3206 if (!out->args.vc.actions) {
3207 const struct parse_item_priv *priv = token->priv;
3208 struct rte_flow_item *item =
3209 out->args.vc.pattern + out->args.vc.pattern_n;
3211 data_size = priv->size * 3; /* spec, last, mask */
3212 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3213 (out->args.vc.data - data_size),
3215 if ((uint8_t *)item + sizeof(*item) > data)
3217 *item = (struct rte_flow_item){
3220 ++out->args.vc.pattern_n;
3222 ctx->objmask = NULL;
3224 const struct parse_action_priv *priv = token->priv;
3225 struct rte_flow_action *action =
3226 out->args.vc.actions + out->args.vc.actions_n;
3228 data_size = priv->size; /* configuration */
3229 data = (void *)RTE_ALIGN_FLOOR((uintptr_t)
3230 (out->args.vc.data - data_size),
3232 if ((uint8_t *)action + sizeof(*action) > data)
3234 *action = (struct rte_flow_action){
3236 .conf = data_size ? data : NULL,
3238 ++out->args.vc.actions_n;
3239 ctx->object = action;
3240 ctx->objmask = NULL;
3242 memset(data, 0, data_size);
3243 out->args.vc.data = data;
3244 ctx->objdata = data_size;
3248 /** Parse pattern item parameter type. */
3250 parse_vc_spec(struct context *ctx, const struct token *token,
3251 const char *str, unsigned int len,
3252 void *buf, unsigned int size)
3254 struct buffer *out = buf;
3255 struct rte_flow_item *item;
3261 /* Token name must match. */
3262 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3264 /* Parse parameter types. */
3265 switch (ctx->curr) {
3266 static const enum index prefix[] = NEXT_ENTRY(PREFIX);
3272 case ITEM_PARAM_SPEC:
3275 case ITEM_PARAM_LAST:
3278 case ITEM_PARAM_PREFIX:
3279 /* Modify next token to expect a prefix. */
3280 if (ctx->next_num < 2)
3282 ctx->next[ctx->next_num - 2] = prefix;
3284 case ITEM_PARAM_MASK:
3290 /* Nothing else to do if there is no buffer. */
3293 if (!out->args.vc.pattern_n)
3295 item = &out->args.vc.pattern[out->args.vc.pattern_n - 1];
3296 data_size = ctx->objdata / 3; /* spec, last, mask */
3297 /* Point to selected object. */
3298 ctx->object = out->args.vc.data + (data_size * index);
3300 ctx->objmask = out->args.vc.data + (data_size * 2); /* mask */
3301 item->mask = ctx->objmask;
3303 ctx->objmask = NULL;
3304 /* Update relevant item pointer. */
3305 *((const void **[]){ &item->spec, &item->last, &item->mask })[index] =
3310 /** Parse action configuration field. */
3312 parse_vc_conf(struct context *ctx, const struct token *token,
3313 const char *str, unsigned int len,
3314 void *buf, unsigned int size)
3316 struct buffer *out = buf;
3319 /* Token name must match. */
3320 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3322 /* Nothing else to do if there is no buffer. */
3325 /* Point to selected object. */
3326 ctx->object = out->args.vc.data;
3327 ctx->objmask = NULL;
3331 /** Parse RSS action. */
3333 parse_vc_action_rss(struct context *ctx, const struct token *token,
3334 const char *str, unsigned int len,
3335 void *buf, unsigned int size)
3337 struct buffer *out = buf;
3338 struct rte_flow_action *action;
3339 struct action_rss_data *action_rss_data;
3343 ret = parse_vc(ctx, token, str, len, buf, size);
3346 /* Nothing else to do if there is no buffer. */
3349 if (!out->args.vc.actions_n)
3351 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3352 /* Point to selected object. */
3353 ctx->object = out->args.vc.data;
3354 ctx->objmask = NULL;
3355 /* Set up default configuration. */
3356 action_rss_data = ctx->object;
3357 *action_rss_data = (struct action_rss_data){
3358 .conf = (struct rte_flow_action_rss){
3359 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
3362 .key_len = sizeof(action_rss_data->key),
3363 .queue_num = RTE_MIN(nb_rxq, ACTION_RSS_QUEUE_NUM),
3364 .key = action_rss_data->key,
3365 .queue = action_rss_data->queue,
3367 .key = "testpmd's default RSS hash key, "
3368 "override it for better balancing",
3371 for (i = 0; i < action_rss_data->conf.queue_num; ++i)
3372 action_rss_data->queue[i] = i;
3373 if (!port_id_is_invalid(ctx->port, DISABLED_WARN) &&
3374 ctx->port != (portid_t)RTE_PORT_ALL) {
3375 struct rte_eth_dev_info info;
3377 rte_eth_dev_info_get(ctx->port, &info);
3378 action_rss_data->conf.key_len =
3379 RTE_MIN(sizeof(action_rss_data->key),
3380 info.hash_key_size);
3382 action->conf = &action_rss_data->conf;
3387 * Parse func field for RSS action.
3389 * The RTE_ETH_HASH_FUNCTION_* value to assign is derived from the
3390 * ACTION_RSS_FUNC_* index that called this function.
3393 parse_vc_action_rss_func(struct context *ctx, const struct token *token,
3394 const char *str, unsigned int len,
3395 void *buf, unsigned int size)
3397 struct action_rss_data *action_rss_data;
3398 enum rte_eth_hash_function func;
3402 /* Token name must match. */
3403 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
3405 switch (ctx->curr) {
3406 case ACTION_RSS_FUNC_DEFAULT:
3407 func = RTE_ETH_HASH_FUNCTION_DEFAULT;
3409 case ACTION_RSS_FUNC_TOEPLITZ:
3410 func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
3412 case ACTION_RSS_FUNC_SIMPLE_XOR:
3413 func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
3420 action_rss_data = ctx->object;
3421 action_rss_data->conf.func = func;
3426 * Parse type field for RSS action.
3428 * Valid tokens are type field names and the "end" token.
3431 parse_vc_action_rss_type(struct context *ctx, const struct token *token,
3432 const char *str, unsigned int len,
3433 void *buf, unsigned int size)
3435 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_TYPE);
3436 struct action_rss_data *action_rss_data;
3442 if (ctx->curr != ACTION_RSS_TYPE)
3444 if (!(ctx->objdata >> 16) && ctx->object) {
3445 action_rss_data = ctx->object;
3446 action_rss_data->conf.types = 0;
3448 if (!strcmp_partial("end", str, len)) {
3449 ctx->objdata &= 0xffff;
3452 for (i = 0; rss_type_table[i].str; ++i)
3453 if (!strcmp_partial(rss_type_table[i].str, str, len))
3455 if (!rss_type_table[i].str)
3457 ctx->objdata = 1 << 16 | (ctx->objdata & 0xffff);
3459 if (ctx->next_num == RTE_DIM(ctx->next))
3461 ctx->next[ctx->next_num++] = next;
3464 action_rss_data = ctx->object;
3465 action_rss_data->conf.types |= rss_type_table[i].rss_type;
3470 * Parse queue field for RSS action.
3472 * Valid tokens are queue indices and the "end" token.
3475 parse_vc_action_rss_queue(struct context *ctx, const struct token *token,
3476 const char *str, unsigned int len,
3477 void *buf, unsigned int size)
3479 static const enum index next[] = NEXT_ENTRY(ACTION_RSS_QUEUE);
3480 struct action_rss_data *action_rss_data;
3481 const struct arg *arg;
3488 if (ctx->curr != ACTION_RSS_QUEUE)
3490 i = ctx->objdata >> 16;
3491 if (!strcmp_partial("end", str, len)) {
3492 ctx->objdata &= 0xffff;
3495 if (i >= ACTION_RSS_QUEUE_NUM)
3497 arg = ARGS_ENTRY_ARB(offsetof(struct action_rss_data, queue) +
3498 i * sizeof(action_rss_data->queue[i]),
3499 sizeof(action_rss_data->queue[i]));
3500 if (push_args(ctx, arg))
3502 ret = parse_int(ctx, token, str, len, NULL, 0);
3508 ctx->objdata = i << 16 | (ctx->objdata & 0xffff);
3510 if (ctx->next_num == RTE_DIM(ctx->next))
3512 ctx->next[ctx->next_num++] = next;
3516 action_rss_data = ctx->object;
3517 action_rss_data->conf.queue_num = i;
3518 action_rss_data->conf.queue = i ? action_rss_data->queue : NULL;
3522 /** Parse VXLAN encap action. */
3524 parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token,
3525 const char *str, unsigned int len,
3526 void *buf, unsigned int size)
3528 struct buffer *out = buf;
3529 struct rte_flow_action *action;
3530 struct action_vxlan_encap_data *action_vxlan_encap_data;
3533 ret = parse_vc(ctx, token, str, len, buf, size);
3536 /* Nothing else to do if there is no buffer. */
3539 if (!out->args.vc.actions_n)
3541 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3542 /* Point to selected object. */
3543 ctx->object = out->args.vc.data;
3544 ctx->objmask = NULL;
3545 /* Set up default configuration. */
3546 action_vxlan_encap_data = ctx->object;
3547 *action_vxlan_encap_data = (struct action_vxlan_encap_data){
3548 .conf = (struct rte_flow_action_vxlan_encap){
3549 .definition = action_vxlan_encap_data->items,
3553 .type = RTE_FLOW_ITEM_TYPE_ETH,
3554 .spec = &action_vxlan_encap_data->item_eth,
3555 .mask = &rte_flow_item_eth_mask,
3558 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3559 .spec = &action_vxlan_encap_data->item_vlan,
3560 .mask = &rte_flow_item_vlan_mask,
3563 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3564 .spec = &action_vxlan_encap_data->item_ipv4,
3565 .mask = &rte_flow_item_ipv4_mask,
3568 .type = RTE_FLOW_ITEM_TYPE_UDP,
3569 .spec = &action_vxlan_encap_data->item_udp,
3570 .mask = &rte_flow_item_udp_mask,
3573 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
3574 .spec = &action_vxlan_encap_data->item_vxlan,
3575 .mask = &rte_flow_item_vxlan_mask,
3578 .type = RTE_FLOW_ITEM_TYPE_END,
3583 .tci = vxlan_encap_conf.vlan_tci,
3587 .src_addr = vxlan_encap_conf.ipv4_src,
3588 .dst_addr = vxlan_encap_conf.ipv4_dst,
3591 .src_port = vxlan_encap_conf.udp_src,
3592 .dst_port = vxlan_encap_conf.udp_dst,
3594 .item_vxlan.flags = 0,
3596 memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes,
3597 vxlan_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3598 memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes,
3599 vxlan_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3600 if (!vxlan_encap_conf.select_ipv4) {
3601 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr,
3602 &vxlan_encap_conf.ipv6_src,
3603 sizeof(vxlan_encap_conf.ipv6_src));
3604 memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr,
3605 &vxlan_encap_conf.ipv6_dst,
3606 sizeof(vxlan_encap_conf.ipv6_dst));
3607 action_vxlan_encap_data->items[2] = (struct rte_flow_item){
3608 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3609 .spec = &action_vxlan_encap_data->item_ipv6,
3610 .mask = &rte_flow_item_ipv6_mask,
3613 if (!vxlan_encap_conf.select_vlan)
3614 action_vxlan_encap_data->items[1].type =
3615 RTE_FLOW_ITEM_TYPE_VOID;
3616 if (vxlan_encap_conf.select_tos_ttl) {
3617 if (vxlan_encap_conf.select_ipv4) {
3618 static struct rte_flow_item_ipv4 ipv4_mask_tos;
3620 memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask,
3621 sizeof(ipv4_mask_tos));
3622 ipv4_mask_tos.hdr.type_of_service = 0xff;
3623 ipv4_mask_tos.hdr.time_to_live = 0xff;
3624 action_vxlan_encap_data->item_ipv4.hdr.type_of_service =
3625 vxlan_encap_conf.ip_tos;
3626 action_vxlan_encap_data->item_ipv4.hdr.time_to_live =
3627 vxlan_encap_conf.ip_ttl;
3628 action_vxlan_encap_data->items[2].mask =
3631 static struct rte_flow_item_ipv6 ipv6_mask_tos;
3633 memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask,
3634 sizeof(ipv6_mask_tos));
3635 ipv6_mask_tos.hdr.vtc_flow |=
3636 RTE_BE32(0xfful << RTE_IPV6_HDR_TC_SHIFT);
3637 ipv6_mask_tos.hdr.hop_limits = 0xff;
3638 action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |=
3640 ((uint32_t)vxlan_encap_conf.ip_tos <<
3641 RTE_IPV6_HDR_TC_SHIFT);
3642 action_vxlan_encap_data->item_ipv6.hdr.hop_limits =
3643 vxlan_encap_conf.ip_ttl;
3644 action_vxlan_encap_data->items[2].mask =
3648 memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni,
3649 RTE_DIM(vxlan_encap_conf.vni));
3650 action->conf = &action_vxlan_encap_data->conf;
3654 /** Parse NVGRE encap action. */
3656 parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token,
3657 const char *str, unsigned int len,
3658 void *buf, unsigned int size)
3660 struct buffer *out = buf;
3661 struct rte_flow_action *action;
3662 struct action_nvgre_encap_data *action_nvgre_encap_data;
3665 ret = parse_vc(ctx, token, str, len, buf, size);
3668 /* Nothing else to do if there is no buffer. */
3671 if (!out->args.vc.actions_n)
3673 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3674 /* Point to selected object. */
3675 ctx->object = out->args.vc.data;
3676 ctx->objmask = NULL;
3677 /* Set up default configuration. */
3678 action_nvgre_encap_data = ctx->object;
3679 *action_nvgre_encap_data = (struct action_nvgre_encap_data){
3680 .conf = (struct rte_flow_action_nvgre_encap){
3681 .definition = action_nvgre_encap_data->items,
3685 .type = RTE_FLOW_ITEM_TYPE_ETH,
3686 .spec = &action_nvgre_encap_data->item_eth,
3687 .mask = &rte_flow_item_eth_mask,
3690 .type = RTE_FLOW_ITEM_TYPE_VLAN,
3691 .spec = &action_nvgre_encap_data->item_vlan,
3692 .mask = &rte_flow_item_vlan_mask,
3695 .type = RTE_FLOW_ITEM_TYPE_IPV4,
3696 .spec = &action_nvgre_encap_data->item_ipv4,
3697 .mask = &rte_flow_item_ipv4_mask,
3700 .type = RTE_FLOW_ITEM_TYPE_NVGRE,
3701 .spec = &action_nvgre_encap_data->item_nvgre,
3702 .mask = &rte_flow_item_nvgre_mask,
3705 .type = RTE_FLOW_ITEM_TYPE_END,
3710 .tci = nvgre_encap_conf.vlan_tci,
3714 .src_addr = nvgre_encap_conf.ipv4_src,
3715 .dst_addr = nvgre_encap_conf.ipv4_dst,
3717 .item_nvgre.flow_id = 0,
3719 memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes,
3720 nvgre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3721 memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes,
3722 nvgre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3723 if (!nvgre_encap_conf.select_ipv4) {
3724 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr,
3725 &nvgre_encap_conf.ipv6_src,
3726 sizeof(nvgre_encap_conf.ipv6_src));
3727 memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr,
3728 &nvgre_encap_conf.ipv6_dst,
3729 sizeof(nvgre_encap_conf.ipv6_dst));
3730 action_nvgre_encap_data->items[2] = (struct rte_flow_item){
3731 .type = RTE_FLOW_ITEM_TYPE_IPV6,
3732 .spec = &action_nvgre_encap_data->item_ipv6,
3733 .mask = &rte_flow_item_ipv6_mask,
3736 if (!nvgre_encap_conf.select_vlan)
3737 action_nvgre_encap_data->items[1].type =
3738 RTE_FLOW_ITEM_TYPE_VOID;
3739 memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni,
3740 RTE_DIM(nvgre_encap_conf.tni));
3741 action->conf = &action_nvgre_encap_data->conf;
3745 /** Parse l2 encap action. */
3747 parse_vc_action_l2_encap(struct context *ctx, const struct token *token,
3748 const char *str, unsigned int len,
3749 void *buf, unsigned int size)
3751 struct buffer *out = buf;
3752 struct rte_flow_action *action;
3753 struct action_raw_encap_data *action_encap_data;
3754 struct rte_flow_item_eth eth = { .type = 0, };
3755 struct rte_flow_item_vlan vlan = {
3756 .tci = mplsoudp_encap_conf.vlan_tci,
3762 ret = parse_vc(ctx, token, str, len, buf, size);
3765 /* Nothing else to do if there is no buffer. */
3768 if (!out->args.vc.actions_n)
3770 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3771 /* Point to selected object. */
3772 ctx->object = out->args.vc.data;
3773 ctx->objmask = NULL;
3774 /* Copy the headers to the buffer. */
3775 action_encap_data = ctx->object;
3776 *action_encap_data = (struct action_raw_encap_data) {
3777 .conf = (struct rte_flow_action_raw_encap){
3778 .data = action_encap_data->data,
3782 header = action_encap_data->data;
3783 if (l2_encap_conf.select_vlan)
3784 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3785 else if (l2_encap_conf.select_ipv4)
3786 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3788 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3789 memcpy(eth.dst.addr_bytes,
3790 l2_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3791 memcpy(eth.src.addr_bytes,
3792 l2_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3793 memcpy(header, ð, sizeof(eth));
3794 header += sizeof(eth);
3795 if (l2_encap_conf.select_vlan) {
3796 if (l2_encap_conf.select_ipv4)
3797 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3799 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3800 memcpy(header, &vlan, sizeof(vlan));
3801 header += sizeof(vlan);
3803 action_encap_data->conf.size = header -
3804 action_encap_data->data;
3805 action->conf = &action_encap_data->conf;
3809 /** Parse l2 decap action. */
3811 parse_vc_action_l2_decap(struct context *ctx, const struct token *token,
3812 const char *str, unsigned int len,
3813 void *buf, unsigned int size)
3815 struct buffer *out = buf;
3816 struct rte_flow_action *action;
3817 struct action_raw_decap_data *action_decap_data;
3818 struct rte_flow_item_eth eth = { .type = 0, };
3819 struct rte_flow_item_vlan vlan = {
3820 .tci = mplsoudp_encap_conf.vlan_tci,
3826 ret = parse_vc(ctx, token, str, len, buf, size);
3829 /* Nothing else to do if there is no buffer. */
3832 if (!out->args.vc.actions_n)
3834 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3835 /* Point to selected object. */
3836 ctx->object = out->args.vc.data;
3837 ctx->objmask = NULL;
3838 /* Copy the headers to the buffer. */
3839 action_decap_data = ctx->object;
3840 *action_decap_data = (struct action_raw_decap_data) {
3841 .conf = (struct rte_flow_action_raw_decap){
3842 .data = action_decap_data->data,
3846 header = action_decap_data->data;
3847 if (l2_decap_conf.select_vlan)
3848 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3849 memcpy(header, ð, sizeof(eth));
3850 header += sizeof(eth);
3851 if (l2_decap_conf.select_vlan) {
3852 memcpy(header, &vlan, sizeof(vlan));
3853 header += sizeof(vlan);
3855 action_decap_data->conf.size = header -
3856 action_decap_data->data;
3857 action->conf = &action_decap_data->conf;
3861 #define ETHER_TYPE_MPLS_UNICAST 0x8847
3863 /** Parse MPLSOGRE encap action. */
3865 parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token,
3866 const char *str, unsigned int len,
3867 void *buf, unsigned int size)
3869 struct buffer *out = buf;
3870 struct rte_flow_action *action;
3871 struct action_raw_encap_data *action_encap_data;
3872 struct rte_flow_item_eth eth = { .type = 0, };
3873 struct rte_flow_item_vlan vlan = {
3874 .tci = mplsogre_encap_conf.vlan_tci,
3877 struct rte_flow_item_ipv4 ipv4 = {
3879 .src_addr = mplsogre_encap_conf.ipv4_src,
3880 .dst_addr = mplsogre_encap_conf.ipv4_dst,
3881 .next_proto_id = IPPROTO_GRE,
3882 .version_ihl = RTE_IPV4_VHL_DEF,
3883 .time_to_live = IPDEFTTL,
3886 struct rte_flow_item_ipv6 ipv6 = {
3888 .proto = IPPROTO_GRE,
3889 .hop_limits = IPDEFTTL,
3892 struct rte_flow_item_gre gre = {
3893 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3895 struct rte_flow_item_mpls mpls;
3899 ret = parse_vc(ctx, token, str, len, buf, size);
3902 /* Nothing else to do if there is no buffer. */
3905 if (!out->args.vc.actions_n)
3907 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
3908 /* Point to selected object. */
3909 ctx->object = out->args.vc.data;
3910 ctx->objmask = NULL;
3911 /* Copy the headers to the buffer. */
3912 action_encap_data = ctx->object;
3913 *action_encap_data = (struct action_raw_encap_data) {
3914 .conf = (struct rte_flow_action_raw_encap){
3915 .data = action_encap_data->data,
3920 header = action_encap_data->data;
3921 if (mplsogre_encap_conf.select_vlan)
3922 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
3923 else if (mplsogre_encap_conf.select_ipv4)
3924 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3926 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3927 memcpy(eth.dst.addr_bytes,
3928 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
3929 memcpy(eth.src.addr_bytes,
3930 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
3931 memcpy(header, ð, sizeof(eth));
3932 header += sizeof(eth);
3933 if (mplsogre_encap_conf.select_vlan) {
3934 if (mplsogre_encap_conf.select_ipv4)
3935 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
3937 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
3938 memcpy(header, &vlan, sizeof(vlan));
3939 header += sizeof(vlan);
3941 if (mplsogre_encap_conf.select_ipv4) {
3942 memcpy(header, &ipv4, sizeof(ipv4));
3943 header += sizeof(ipv4);
3945 memcpy(&ipv6.hdr.src_addr,
3946 &mplsogre_encap_conf.ipv6_src,
3947 sizeof(mplsogre_encap_conf.ipv6_src));
3948 memcpy(&ipv6.hdr.dst_addr,
3949 &mplsogre_encap_conf.ipv6_dst,
3950 sizeof(mplsogre_encap_conf.ipv6_dst));
3951 memcpy(header, &ipv6, sizeof(ipv6));
3952 header += sizeof(ipv6);
3954 memcpy(header, &gre, sizeof(gre));
3955 header += sizeof(gre);
3956 memcpy(mpls.label_tc_s, mplsogre_encap_conf.label,
3957 RTE_DIM(mplsogre_encap_conf.label));
3958 mpls.label_tc_s[2] |= 0x1;
3959 memcpy(header, &mpls, sizeof(mpls));
3960 header += sizeof(mpls);
3961 action_encap_data->conf.size = header -
3962 action_encap_data->data;
3963 action->conf = &action_encap_data->conf;
3967 /** Parse MPLSOGRE decap action. */
3969 parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token,
3970 const char *str, unsigned int len,
3971 void *buf, unsigned int size)
3973 struct buffer *out = buf;
3974 struct rte_flow_action *action;
3975 struct action_raw_decap_data *action_decap_data;
3976 struct rte_flow_item_eth eth = { .type = 0, };
3977 struct rte_flow_item_vlan vlan = {.tci = 0};
3978 struct rte_flow_item_ipv4 ipv4 = {
3980 .next_proto_id = IPPROTO_GRE,
3983 struct rte_flow_item_ipv6 ipv6 = {
3985 .proto = IPPROTO_GRE,
3988 struct rte_flow_item_gre gre = {
3989 .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST),
3991 struct rte_flow_item_mpls mpls;
3995 ret = parse_vc(ctx, token, str, len, buf, size);
3998 /* Nothing else to do if there is no buffer. */
4001 if (!out->args.vc.actions_n)
4003 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4004 /* Point to selected object. */
4005 ctx->object = out->args.vc.data;
4006 ctx->objmask = NULL;
4007 /* Copy the headers to the buffer. */
4008 action_decap_data = ctx->object;
4009 *action_decap_data = (struct action_raw_decap_data) {
4010 .conf = (struct rte_flow_action_raw_decap){
4011 .data = action_decap_data->data,
4015 header = action_decap_data->data;
4016 if (mplsogre_decap_conf.select_vlan)
4017 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4018 else if (mplsogre_encap_conf.select_ipv4)
4019 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4021 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4022 memcpy(eth.dst.addr_bytes,
4023 mplsogre_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4024 memcpy(eth.src.addr_bytes,
4025 mplsogre_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4026 memcpy(header, ð, sizeof(eth));
4027 header += sizeof(eth);
4028 if (mplsogre_encap_conf.select_vlan) {
4029 if (mplsogre_encap_conf.select_ipv4)
4030 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4032 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4033 memcpy(header, &vlan, sizeof(vlan));
4034 header += sizeof(vlan);
4036 if (mplsogre_encap_conf.select_ipv4) {
4037 memcpy(header, &ipv4, sizeof(ipv4));
4038 header += sizeof(ipv4);
4040 memcpy(header, &ipv6, sizeof(ipv6));
4041 header += sizeof(ipv6);
4043 memcpy(header, &gre, sizeof(gre));
4044 header += sizeof(gre);
4045 memset(&mpls, 0, sizeof(mpls));
4046 memcpy(header, &mpls, sizeof(mpls));
4047 header += sizeof(mpls);
4048 action_decap_data->conf.size = header -
4049 action_decap_data->data;
4050 action->conf = &action_decap_data->conf;
4054 /** Parse MPLSOUDP encap action. */
4056 parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token,
4057 const char *str, unsigned int len,
4058 void *buf, unsigned int size)
4060 struct buffer *out = buf;
4061 struct rte_flow_action *action;
4062 struct action_raw_encap_data *action_encap_data;
4063 struct rte_flow_item_eth eth = { .type = 0, };
4064 struct rte_flow_item_vlan vlan = {
4065 .tci = mplsoudp_encap_conf.vlan_tci,
4068 struct rte_flow_item_ipv4 ipv4 = {
4070 .src_addr = mplsoudp_encap_conf.ipv4_src,
4071 .dst_addr = mplsoudp_encap_conf.ipv4_dst,
4072 .next_proto_id = IPPROTO_UDP,
4073 .version_ihl = RTE_IPV4_VHL_DEF,
4074 .time_to_live = IPDEFTTL,
4077 struct rte_flow_item_ipv6 ipv6 = {
4079 .proto = IPPROTO_UDP,
4080 .hop_limits = IPDEFTTL,
4083 struct rte_flow_item_udp udp = {
4085 .src_port = mplsoudp_encap_conf.udp_src,
4086 .dst_port = mplsoudp_encap_conf.udp_dst,
4089 struct rte_flow_item_mpls mpls;
4093 ret = parse_vc(ctx, token, str, len, buf, size);
4096 /* Nothing else to do if there is no buffer. */
4099 if (!out->args.vc.actions_n)
4101 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4102 /* Point to selected object. */
4103 ctx->object = out->args.vc.data;
4104 ctx->objmask = NULL;
4105 /* Copy the headers to the buffer. */
4106 action_encap_data = ctx->object;
4107 *action_encap_data = (struct action_raw_encap_data) {
4108 .conf = (struct rte_flow_action_raw_encap){
4109 .data = action_encap_data->data,
4114 header = action_encap_data->data;
4115 if (mplsoudp_encap_conf.select_vlan)
4116 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4117 else if (mplsoudp_encap_conf.select_ipv4)
4118 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4120 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4121 memcpy(eth.dst.addr_bytes,
4122 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4123 memcpy(eth.src.addr_bytes,
4124 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4125 memcpy(header, ð, sizeof(eth));
4126 header += sizeof(eth);
4127 if (mplsoudp_encap_conf.select_vlan) {
4128 if (mplsoudp_encap_conf.select_ipv4)
4129 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4131 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4132 memcpy(header, &vlan, sizeof(vlan));
4133 header += sizeof(vlan);
4135 if (mplsoudp_encap_conf.select_ipv4) {
4136 memcpy(header, &ipv4, sizeof(ipv4));
4137 header += sizeof(ipv4);
4139 memcpy(&ipv6.hdr.src_addr,
4140 &mplsoudp_encap_conf.ipv6_src,
4141 sizeof(mplsoudp_encap_conf.ipv6_src));
4142 memcpy(&ipv6.hdr.dst_addr,
4143 &mplsoudp_encap_conf.ipv6_dst,
4144 sizeof(mplsoudp_encap_conf.ipv6_dst));
4145 memcpy(header, &ipv6, sizeof(ipv6));
4146 header += sizeof(ipv6);
4148 memcpy(header, &udp, sizeof(udp));
4149 header += sizeof(udp);
4150 memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label,
4151 RTE_DIM(mplsoudp_encap_conf.label));
4152 mpls.label_tc_s[2] |= 0x1;
4153 memcpy(header, &mpls, sizeof(mpls));
4154 header += sizeof(mpls);
4155 action_encap_data->conf.size = header -
4156 action_encap_data->data;
4157 action->conf = &action_encap_data->conf;
4161 /** Parse MPLSOUDP decap action. */
4163 parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token,
4164 const char *str, unsigned int len,
4165 void *buf, unsigned int size)
4167 struct buffer *out = buf;
4168 struct rte_flow_action *action;
4169 struct action_raw_decap_data *action_decap_data;
4170 struct rte_flow_item_eth eth = { .type = 0, };
4171 struct rte_flow_item_vlan vlan = {.tci = 0};
4172 struct rte_flow_item_ipv4 ipv4 = {
4174 .next_proto_id = IPPROTO_UDP,
4177 struct rte_flow_item_ipv6 ipv6 = {
4179 .proto = IPPROTO_UDP,
4182 struct rte_flow_item_udp udp = {
4184 .dst_port = rte_cpu_to_be_16(6635),
4187 struct rte_flow_item_mpls mpls;
4191 ret = parse_vc(ctx, token, str, len, buf, size);
4194 /* Nothing else to do if there is no buffer. */
4197 if (!out->args.vc.actions_n)
4199 action = &out->args.vc.actions[out->args.vc.actions_n - 1];
4200 /* Point to selected object. */
4201 ctx->object = out->args.vc.data;
4202 ctx->objmask = NULL;
4203 /* Copy the headers to the buffer. */
4204 action_decap_data = ctx->object;
4205 *action_decap_data = (struct action_raw_decap_data) {
4206 .conf = (struct rte_flow_action_raw_decap){
4207 .data = action_decap_data->data,
4211 header = action_decap_data->data;
4212 if (mplsoudp_decap_conf.select_vlan)
4213 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
4214 else if (mplsoudp_encap_conf.select_ipv4)
4215 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4217 eth.type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4218 memcpy(eth.dst.addr_bytes,
4219 mplsoudp_encap_conf.eth_dst, RTE_ETHER_ADDR_LEN);
4220 memcpy(eth.src.addr_bytes,
4221 mplsoudp_encap_conf.eth_src, RTE_ETHER_ADDR_LEN);
4222 memcpy(header, ð, sizeof(eth));
4223 header += sizeof(eth);
4224 if (mplsoudp_encap_conf.select_vlan) {
4225 if (mplsoudp_encap_conf.select_ipv4)
4226 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
4228 vlan.inner_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
4229 memcpy(header, &vlan, sizeof(vlan));
4230 header += sizeof(vlan);
4232 if (mplsoudp_encap_conf.select_ipv4) {
4233 memcpy(header, &ipv4, sizeof(ipv4));
4234 header += sizeof(ipv4);
4236 memcpy(header, &ipv6, sizeof(ipv6));
4237 header += sizeof(ipv6);
4239 memcpy(header, &udp, sizeof(udp));
4240 header += sizeof(udp);
4241 memset(&mpls, 0, sizeof(mpls));
4242 memcpy(header, &mpls, sizeof(mpls));
4243 header += sizeof(mpls);
4244 action_decap_data->conf.size = header -
4245 action_decap_data->data;
4246 action->conf = &action_decap_data->conf;
4250 /** Parse tokens for destroy command. */
4252 parse_destroy(struct context *ctx, const struct token *token,
4253 const char *str, unsigned int len,
4254 void *buf, unsigned int size)
4256 struct buffer *out = buf;
4258 /* Token name must match. */
4259 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4261 /* Nothing else to do if there is no buffer. */
4264 if (!out->command) {
4265 if (ctx->curr != DESTROY)
4267 if (sizeof(*out) > size)
4269 out->command = ctx->curr;
4272 ctx->objmask = NULL;
4273 out->args.destroy.rule =
4274 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4278 if (((uint8_t *)(out->args.destroy.rule + out->args.destroy.rule_n) +
4279 sizeof(*out->args.destroy.rule)) > (uint8_t *)out + size)
4282 ctx->object = out->args.destroy.rule + out->args.destroy.rule_n++;
4283 ctx->objmask = NULL;
4287 /** Parse tokens for flush command. */
4289 parse_flush(struct context *ctx, const struct token *token,
4290 const char *str, unsigned int len,
4291 void *buf, unsigned int size)
4293 struct buffer *out = buf;
4295 /* Token name must match. */
4296 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4298 /* Nothing else to do if there is no buffer. */
4301 if (!out->command) {
4302 if (ctx->curr != FLUSH)
4304 if (sizeof(*out) > size)
4306 out->command = ctx->curr;
4309 ctx->objmask = NULL;
4314 /** Parse tokens for query command. */
4316 parse_query(struct context *ctx, const struct token *token,
4317 const char *str, unsigned int len,
4318 void *buf, unsigned int size)
4320 struct buffer *out = buf;
4322 /* Token name must match. */
4323 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4325 /* Nothing else to do if there is no buffer. */
4328 if (!out->command) {
4329 if (ctx->curr != QUERY)
4331 if (sizeof(*out) > size)
4333 out->command = ctx->curr;
4336 ctx->objmask = NULL;
4341 /** Parse action names. */
4343 parse_action(struct context *ctx, const struct token *token,
4344 const char *str, unsigned int len,
4345 void *buf, unsigned int size)
4347 struct buffer *out = buf;
4348 const struct arg *arg = pop_args(ctx);
4352 /* Argument is expected. */
4355 /* Parse action name. */
4356 for (i = 0; next_action[i]; ++i) {
4357 const struct parse_action_priv *priv;
4359 token = &token_list[next_action[i]];
4360 if (strcmp_partial(token->name, str, len))
4366 memcpy((uint8_t *)ctx->object + arg->offset,
4372 push_args(ctx, arg);
4376 /** Parse tokens for list command. */
4378 parse_list(struct context *ctx, const struct token *token,
4379 const char *str, unsigned int len,
4380 void *buf, unsigned int size)
4382 struct buffer *out = buf;
4384 /* Token name must match. */
4385 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4387 /* Nothing else to do if there is no buffer. */
4390 if (!out->command) {
4391 if (ctx->curr != LIST)
4393 if (sizeof(*out) > size)
4395 out->command = ctx->curr;
4398 ctx->objmask = NULL;
4399 out->args.list.group =
4400 (void *)RTE_ALIGN_CEIL((uintptr_t)(out + 1),
4404 if (((uint8_t *)(out->args.list.group + out->args.list.group_n) +
4405 sizeof(*out->args.list.group)) > (uint8_t *)out + size)
4408 ctx->object = out->args.list.group + out->args.list.group_n++;
4409 ctx->objmask = NULL;
4413 /** Parse tokens for isolate command. */
4415 parse_isolate(struct context *ctx, const struct token *token,
4416 const char *str, unsigned int len,
4417 void *buf, unsigned int size)
4419 struct buffer *out = buf;
4421 /* Token name must match. */
4422 if (parse_default(ctx, token, str, len, NULL, 0) < 0)
4424 /* Nothing else to do if there is no buffer. */
4427 if (!out->command) {
4428 if (ctx->curr != ISOLATE)
4430 if (sizeof(*out) > size)
4432 out->command = ctx->curr;
4435 ctx->objmask = NULL;
4441 * Parse signed/unsigned integers 8 to 64-bit long.
4443 * Last argument (ctx->args) is retrieved to determine integer type and
4447 parse_int(struct context *ctx, const struct token *token,
4448 const char *str, unsigned int len,
4449 void *buf, unsigned int size)
4451 const struct arg *arg = pop_args(ctx);
4456 /* Argument is expected. */
4461 (uintmax_t)strtoimax(str, &end, 0) :
4462 strtoumax(str, &end, 0);
4463 if (errno || (size_t)(end - str) != len)
4466 ((arg->sign && ((intmax_t)u < (intmax_t)arg->min ||
4467 (intmax_t)u > (intmax_t)arg->max)) ||
4468 (!arg->sign && (u < arg->min || u > arg->max))))
4473 if (!arg_entry_bf_fill(ctx->object, u, arg) ||
4474 !arg_entry_bf_fill(ctx->objmask, -1, arg))
4478 buf = (uint8_t *)ctx->object + arg->offset;
4480 if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t))
4484 case sizeof(uint8_t):
4485 *(uint8_t *)buf = u;
4487 case sizeof(uint16_t):
4488 *(uint16_t *)buf = arg->hton ? rte_cpu_to_be_16(u) : u;
4490 case sizeof(uint8_t [3]):
4491 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
4493 ((uint8_t *)buf)[0] = u;
4494 ((uint8_t *)buf)[1] = u >> 8;
4495 ((uint8_t *)buf)[2] = u >> 16;
4499 ((uint8_t *)buf)[0] = u >> 16;
4500 ((uint8_t *)buf)[1] = u >> 8;
4501 ((uint8_t *)buf)[2] = u;
4503 case sizeof(uint32_t):
4504 *(uint32_t *)buf = arg->hton ? rte_cpu_to_be_32(u) : u;
4506 case sizeof(uint64_t):
4507 *(uint64_t *)buf = arg->hton ? rte_cpu_to_be_64(u) : u;
4512 if (ctx->objmask && buf != (uint8_t *)ctx->objmask + arg->offset) {
4514 buf = (uint8_t *)ctx->objmask + arg->offset;
4519 push_args(ctx, arg);
4526 * Three arguments (ctx->args) are retrieved from the stack to store data,
4527 * its actual length and address (in that order).
4530 parse_string(struct context *ctx, const struct token *token,
4531 const char *str, unsigned int len,
4532 void *buf, unsigned int size)
4534 const struct arg *arg_data = pop_args(ctx);
4535 const struct arg *arg_len = pop_args(ctx);
4536 const struct arg *arg_addr = pop_args(ctx);
4537 char tmp[16]; /* Ought to be enough. */
4540 /* Arguments are expected. */
4544 push_args(ctx, arg_data);
4548 push_args(ctx, arg_len);
4549 push_args(ctx, arg_data);
4552 size = arg_data->size;
4553 /* Bit-mask fill is not supported. */
4554 if (arg_data->mask || size < len)
4558 /* Let parse_int() fill length information first. */
4559 ret = snprintf(tmp, sizeof(tmp), "%u", len);
4562 push_args(ctx, arg_len);
4563 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4568 buf = (uint8_t *)ctx->object + arg_data->offset;
4569 /* Output buffer is not necessarily NUL-terminated. */
4570 memcpy(buf, str, len);
4571 memset((uint8_t *)buf + len, 0x00, size - len);
4573 memset((uint8_t *)ctx->objmask + arg_data->offset, 0xff, len);
4574 /* Save address if requested. */
4575 if (arg_addr->size) {
4576 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4578 (uint8_t *)ctx->object + arg_data->offset
4582 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4584 (uint8_t *)ctx->objmask + arg_data->offset
4590 push_args(ctx, arg_addr);
4591 push_args(ctx, arg_len);
4592 push_args(ctx, arg_data);
4597 parse_hex_string(const char *src, uint8_t *dst, uint32_t *size)
4603 /* Check input parameters */
4604 if ((src == NULL) ||
4610 /* Convert chars to bytes */
4611 for (i = 0, len = 0; i < *size; i += 2) {
4612 snprintf(tmp, 3, "%s", src + i);
4613 dst[len++] = strtoul(tmp, &c, 16);
4628 parse_hex(struct context *ctx, const struct token *token,
4629 const char *str, unsigned int len,
4630 void *buf, unsigned int size)
4632 const struct arg *arg_data = pop_args(ctx);
4633 const struct arg *arg_len = pop_args(ctx);
4634 const struct arg *arg_addr = pop_args(ctx);
4635 char tmp[16]; /* Ought to be enough. */
4637 unsigned int hexlen = len;
4638 unsigned int length = 256;
4639 uint8_t hex_tmp[length];
4641 /* Arguments are expected. */
4645 push_args(ctx, arg_data);
4649 push_args(ctx, arg_len);
4650 push_args(ctx, arg_data);
4653 size = arg_data->size;
4654 /* Bit-mask fill is not supported. */
4660 /* translate bytes string to array. */
4661 if (str[0] == '0' && ((str[1] == 'x') ||
4666 if (hexlen > length)
4668 ret = parse_hex_string(str, hex_tmp, &hexlen);
4671 /* Let parse_int() fill length information first. */
4672 ret = snprintf(tmp, sizeof(tmp), "%u", hexlen);
4675 push_args(ctx, arg_len);
4676 ret = parse_int(ctx, token, tmp, ret, NULL, 0);
4681 buf = (uint8_t *)ctx->object + arg_data->offset;
4682 /* Output buffer is not necessarily NUL-terminated. */
4683 memcpy(buf, hex_tmp, hexlen);
4684 memset((uint8_t *)buf + hexlen, 0x00, size - hexlen);
4686 memset((uint8_t *)ctx->objmask + arg_data->offset,
4688 /* Save address if requested. */
4689 if (arg_addr->size) {
4690 memcpy((uint8_t *)ctx->object + arg_addr->offset,
4692 (uint8_t *)ctx->object + arg_data->offset
4696 memcpy((uint8_t *)ctx->objmask + arg_addr->offset,
4698 (uint8_t *)ctx->objmask + arg_data->offset
4704 push_args(ctx, arg_addr);
4705 push_args(ctx, arg_len);
4706 push_args(ctx, arg_data);
4712 * Parse a MAC address.
4714 * Last argument (ctx->args) is retrieved to determine storage size and
4718 parse_mac_addr(struct context *ctx, const struct token *token,
4719 const char *str, unsigned int len,
4720 void *buf, unsigned int size)
4722 const struct arg *arg = pop_args(ctx);
4723 struct rte_ether_addr tmp;
4727 /* Argument is expected. */
4731 /* Bit-mask fill is not supported. */
4732 if (arg->mask || size != sizeof(tmp))
4734 /* Only network endian is supported. */
4737 ret = cmdline_parse_etheraddr(NULL, str, &tmp, size);
4738 if (ret < 0 || (unsigned int)ret != len)
4742 buf = (uint8_t *)ctx->object + arg->offset;
4743 memcpy(buf, &tmp, size);
4745 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4748 push_args(ctx, arg);
4753 * Parse an IPv4 address.
4755 * Last argument (ctx->args) is retrieved to determine storage size and
4759 parse_ipv4_addr(struct context *ctx, const struct token *token,
4760 const char *str, unsigned int len,
4761 void *buf, unsigned int size)
4763 const struct arg *arg = pop_args(ctx);
4768 /* Argument is expected. */
4772 /* Bit-mask fill is not supported. */
4773 if (arg->mask || size != sizeof(tmp))
4775 /* Only network endian is supported. */
4778 memcpy(str2, str, len);
4780 ret = inet_pton(AF_INET, str2, &tmp);
4782 /* Attempt integer parsing. */
4783 push_args(ctx, arg);
4784 return parse_int(ctx, token, str, len, buf, size);
4788 buf = (uint8_t *)ctx->object + arg->offset;
4789 memcpy(buf, &tmp, size);
4791 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4794 push_args(ctx, arg);
4799 * Parse an IPv6 address.
4801 * Last argument (ctx->args) is retrieved to determine storage size and
4805 parse_ipv6_addr(struct context *ctx, const struct token *token,
4806 const char *str, unsigned int len,
4807 void *buf, unsigned int size)
4809 const struct arg *arg = pop_args(ctx);
4811 struct in6_addr tmp;
4815 /* Argument is expected. */
4819 /* Bit-mask fill is not supported. */
4820 if (arg->mask || size != sizeof(tmp))
4822 /* Only network endian is supported. */
4825 memcpy(str2, str, len);
4827 ret = inet_pton(AF_INET6, str2, &tmp);
4832 buf = (uint8_t *)ctx->object + arg->offset;
4833 memcpy(buf, &tmp, size);
4835 memset((uint8_t *)ctx->objmask + arg->offset, 0xff, size);
4838 push_args(ctx, arg);
4842 /** Boolean values (even indices stand for false). */
4843 static const char *const boolean_name[] = {
4853 * Parse a boolean value.
4855 * Last argument (ctx->args) is retrieved to determine storage size and
4859 parse_boolean(struct context *ctx, const struct token *token,
4860 const char *str, unsigned int len,
4861 void *buf, unsigned int size)
4863 const struct arg *arg = pop_args(ctx);
4867 /* Argument is expected. */
4870 for (i = 0; boolean_name[i]; ++i)
4871 if (!strcmp_partial(boolean_name[i], str, len))
4873 /* Process token as integer. */
4874 if (boolean_name[i])
4875 str = i & 1 ? "1" : "0";
4876 push_args(ctx, arg);
4877 ret = parse_int(ctx, token, str, strlen(str), buf, size);
4878 return ret > 0 ? (int)len : ret;
4881 /** Parse port and update context. */
4883 parse_port(struct context *ctx, const struct token *token,
4884 const char *str, unsigned int len,
4885 void *buf, unsigned int size)
4887 struct buffer *out = &(struct buffer){ .port = 0 };
4895 ctx->objmask = NULL;
4896 size = sizeof(*out);
4898 ret = parse_int(ctx, token, str, len, out, size);
4900 ctx->port = out->port;
4906 /** No completion. */
4908 comp_none(struct context *ctx, const struct token *token,
4909 unsigned int ent, char *buf, unsigned int size)
4919 /** Complete boolean values. */
4921 comp_boolean(struct context *ctx, const struct token *token,
4922 unsigned int ent, char *buf, unsigned int size)
4928 for (i = 0; boolean_name[i]; ++i)
4929 if (buf && i == ent)
4930 return strlcpy(buf, boolean_name[i], size);
4936 /** Complete action names. */
4938 comp_action(struct context *ctx, const struct token *token,
4939 unsigned int ent, char *buf, unsigned int size)
4945 for (i = 0; next_action[i]; ++i)
4946 if (buf && i == ent)
4947 return strlcpy(buf, token_list[next_action[i]].name,
4954 /** Complete available ports. */
4956 comp_port(struct context *ctx, const struct token *token,
4957 unsigned int ent, char *buf, unsigned int size)
4964 RTE_ETH_FOREACH_DEV(p) {
4965 if (buf && i == ent)
4966 return snprintf(buf, size, "%u", p);
4974 /** Complete available rule IDs. */
4976 comp_rule_id(struct context *ctx, const struct token *token,
4977 unsigned int ent, char *buf, unsigned int size)
4980 struct rte_port *port;
4981 struct port_flow *pf;
4984 if (port_id_is_invalid(ctx->port, DISABLED_WARN) ||
4985 ctx->port == (portid_t)RTE_PORT_ALL)
4987 port = &ports[ctx->port];
4988 for (pf = port->flow_list; pf != NULL; pf = pf->next) {
4989 if (buf && i == ent)
4990 return snprintf(buf, size, "%u", pf->id);
4998 /** Complete type field for RSS action. */
5000 comp_vc_action_rss_type(struct context *ctx, const struct token *token,
5001 unsigned int ent, char *buf, unsigned int size)
5007 for (i = 0; rss_type_table[i].str; ++i)
5012 return strlcpy(buf, rss_type_table[ent].str, size);
5014 return snprintf(buf, size, "end");
5018 /** Complete queue field for RSS action. */
5020 comp_vc_action_rss_queue(struct context *ctx, const struct token *token,
5021 unsigned int ent, char *buf, unsigned int size)
5028 return snprintf(buf, size, "%u", ent);
5030 return snprintf(buf, size, "end");
5034 /** Internal context. */
5035 static struct context cmd_flow_context;
5037 /** Global parser instance (cmdline API). */
5038 cmdline_parse_inst_t cmd_flow;
5040 /** Initialize context. */
5042 cmd_flow_context_init(struct context *ctx)
5044 /* A full memset() is not necessary. */
5054 ctx->objmask = NULL;
5057 /** Parse a token (cmdline API). */
5059 cmd_flow_parse(cmdline_parse_token_hdr_t *hdr, const char *src, void *result,
5062 struct context *ctx = &cmd_flow_context;
5063 const struct token *token;
5064 const enum index *list;
5069 token = &token_list[ctx->curr];
5070 /* Check argument length. */
5073 for (len = 0; src[len]; ++len)
5074 if (src[len] == '#' || isspace(src[len]))
5078 /* Last argument and EOL detection. */
5079 for (i = len; src[i]; ++i)
5080 if (src[i] == '#' || src[i] == '\r' || src[i] == '\n')
5082 else if (!isspace(src[i])) {
5087 if (src[i] == '\r' || src[i] == '\n') {
5091 /* Initialize context if necessary. */
5092 if (!ctx->next_num) {
5095 ctx->next[ctx->next_num++] = token->next[0];
5097 /* Process argument through candidates. */
5098 ctx->prev = ctx->curr;
5099 list = ctx->next[ctx->next_num - 1];
5100 for (i = 0; list[i]; ++i) {
5101 const struct token *next = &token_list[list[i]];
5104 ctx->curr = list[i];
5106 tmp = next->call(ctx, next, src, len, result, size);
5108 tmp = parse_default(ctx, next, src, len, result, size);
5109 if (tmp == -1 || tmp != len)
5117 /* Push subsequent tokens if any. */
5119 for (i = 0; token->next[i]; ++i) {
5120 if (ctx->next_num == RTE_DIM(ctx->next))
5122 ctx->next[ctx->next_num++] = token->next[i];
5124 /* Push arguments if any. */
5126 for (i = 0; token->args[i]; ++i) {
5127 if (ctx->args_num == RTE_DIM(ctx->args))
5129 ctx->args[ctx->args_num++] = token->args[i];
5134 /** Return number of completion entries (cmdline API). */
5136 cmd_flow_complete_get_nb(cmdline_parse_token_hdr_t *hdr)
5138 struct context *ctx = &cmd_flow_context;
5139 const struct token *token = &token_list[ctx->curr];
5140 const enum index *list;
5144 /* Count number of tokens in current list. */
5146 list = ctx->next[ctx->next_num - 1];
5148 list = token->next[0];
5149 for (i = 0; list[i]; ++i)
5154 * If there is a single token, use its completion callback, otherwise
5155 * return the number of entries.
5157 token = &token_list[list[0]];
5158 if (i == 1 && token->comp) {
5159 /* Save index for cmd_flow_get_help(). */
5160 ctx->prev = list[0];
5161 return token->comp(ctx, token, 0, NULL, 0);
5166 /** Return a completion entry (cmdline API). */
5168 cmd_flow_complete_get_elt(cmdline_parse_token_hdr_t *hdr, int index,
5169 char *dst, unsigned int size)
5171 struct context *ctx = &cmd_flow_context;
5172 const struct token *token = &token_list[ctx->curr];
5173 const enum index *list;
5177 /* Count number of tokens in current list. */
5179 list = ctx->next[ctx->next_num - 1];
5181 list = token->next[0];
5182 for (i = 0; list[i]; ++i)
5186 /* If there is a single token, use its completion callback. */
5187 token = &token_list[list[0]];
5188 if (i == 1 && token->comp) {
5189 /* Save index for cmd_flow_get_help(). */
5190 ctx->prev = list[0];
5191 return token->comp(ctx, token, index, dst, size) < 0 ? -1 : 0;
5193 /* Otherwise make sure the index is valid and use defaults. */
5196 token = &token_list[list[index]];
5197 strlcpy(dst, token->name, size);
5198 /* Save index for cmd_flow_get_help(). */
5199 ctx->prev = list[index];
5203 /** Populate help strings for current token (cmdline API). */
5205 cmd_flow_get_help(cmdline_parse_token_hdr_t *hdr, char *dst, unsigned int size)
5207 struct context *ctx = &cmd_flow_context;
5208 const struct token *token = &token_list[ctx->prev];
5213 /* Set token type and update global help with details. */
5214 strlcpy(dst, (token->type ? token->type : "TOKEN"), size);
5216 cmd_flow.help_str = token->help;
5218 cmd_flow.help_str = token->name;
5222 /** Token definition template (cmdline API). */
5223 static struct cmdline_token_hdr cmd_flow_token_hdr = {
5224 .ops = &(struct cmdline_token_ops){
5225 .parse = cmd_flow_parse,
5226 .complete_get_nb = cmd_flow_complete_get_nb,
5227 .complete_get_elt = cmd_flow_complete_get_elt,
5228 .get_help = cmd_flow_get_help,
5233 /** Populate the next dynamic token. */
5235 cmd_flow_tok(cmdline_parse_token_hdr_t **hdr,
5236 cmdline_parse_token_hdr_t **hdr_inst)
5238 struct context *ctx = &cmd_flow_context;
5240 /* Always reinitialize context before requesting the first token. */
5241 if (!(hdr_inst - cmd_flow.tokens))
5242 cmd_flow_context_init(ctx);
5243 /* Return NULL when no more tokens are expected. */
5244 if (!ctx->next_num && ctx->curr) {
5248 /* Determine if command should end here. */
5249 if (ctx->eol && ctx->last && ctx->next_num) {
5250 const enum index *list = ctx->next[ctx->next_num - 1];
5253 for (i = 0; list[i]; ++i) {
5260 *hdr = &cmd_flow_token_hdr;
5263 /** Dispatch parsed buffer to function calls. */
5265 cmd_flow_parsed(const struct buffer *in)
5267 switch (in->command) {
5269 port_flow_validate(in->port, &in->args.vc.attr,
5270 in->args.vc.pattern, in->args.vc.actions);
5273 port_flow_create(in->port, &in->args.vc.attr,
5274 in->args.vc.pattern, in->args.vc.actions);
5277 port_flow_destroy(in->port, in->args.destroy.rule_n,
5278 in->args.destroy.rule);
5281 port_flow_flush(in->port);
5284 port_flow_query(in->port, in->args.query.rule,
5285 &in->args.query.action);
5288 port_flow_list(in->port, in->args.list.group_n,
5289 in->args.list.group);
5292 port_flow_isolate(in->port, in->args.isolate.set);
5299 /** Token generator and output processing callback (cmdline API). */
5301 cmd_flow_cb(void *arg0, struct cmdline *cl, void *arg2)
5304 cmd_flow_tok(arg0, arg2);
5306 cmd_flow_parsed(arg0);
5309 /** Global parser instance (cmdline API). */
5310 cmdline_parse_inst_t cmd_flow = {
5312 .data = NULL, /**< Unused. */
5313 .help_str = NULL, /**< Updated by cmd_flow_get_help(). */
5316 }, /**< Tokens are returned by cmd_flow_tok(). */