X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Fcmdline_flow.c;h=36659a64af899350912ac77da2fb43f02e7b2118;hb=cfe6fab0295cfa744d3f9034069edfa34ae87aa1;hp=105f7f4c4ea203a73213afa26c4e42605e12f22a;hpb=39e5e20f0defa67627e5f53c2d9d62db51fa2d0c;p=dpdk.git diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c index 105f7f4c4e..36659a64af 100644 --- a/app/test-pmd/cmdline_flow.c +++ b/app/test-pmd/cmdline_flow.c @@ -91,6 +91,8 @@ enum index { ITEM_PHY_PORT_INDEX, ITEM_PORT_ID, ITEM_PORT_ID_ID, + ITEM_MARK, + ITEM_MARK_ID, ITEM_RAW, ITEM_RAW_RELATIVE, ITEM_RAW_SEARCH, @@ -156,6 +158,28 @@ enum index { ITEM_GENEVE_PROTO, ITEM_VXLAN_GPE, ITEM_VXLAN_GPE_VNI, + ITEM_ARP_ETH_IPV4, + ITEM_ARP_ETH_IPV4_SHA, + ITEM_ARP_ETH_IPV4_SPA, + ITEM_ARP_ETH_IPV4_THA, + ITEM_ARP_ETH_IPV4_TPA, + ITEM_IPV6_EXT, + ITEM_IPV6_EXT_NEXT_HDR, + ITEM_ICMP6, + ITEM_ICMP6_TYPE, + ITEM_ICMP6_CODE, + ITEM_ICMP6_ND_NS, + ITEM_ICMP6_ND_NS_TARGET_ADDR, + ITEM_ICMP6_ND_NA, + ITEM_ICMP6_ND_NA_TARGET_ADDR, + ITEM_ICMP6_ND_OPT, + ITEM_ICMP6_ND_OPT_TYPE, + ITEM_ICMP6_ND_OPT_SLA_ETH, + ITEM_ICMP6_ND_OPT_SLA_ETH_SLA, + ITEM_ICMP6_ND_OPT_TLA_ETH, + ITEM_ICMP6_ND_OPT_TLA_ETH_TLA, + ITEM_META, + ITEM_META_DATA, /* Validate/create actions. */ ACTIONS, @@ -163,6 +187,8 @@ enum index { ACTION_END, ACTION_VOID, ACTION_PASSTHRU, + ACTION_JUMP, + ACTION_JUMP_GROUP, ACTION_MARK, ACTION_MARK_ID, ACTION_FLAG, @@ -170,6 +196,8 @@ enum index { ACTION_QUEUE_INDEX, ACTION_DROP, ACTION_COUNT, + ACTION_COUNT_SHARED, + ACTION_COUNT_ID, ACTION_RSS, ACTION_RSS_FUNC, ACTION_RSS_LEVEL, @@ -194,6 +222,55 @@ enum index { ACTION_PORT_ID_ID, ACTION_METER, ACTION_METER_ID, + ACTION_OF_SET_MPLS_TTL, + ACTION_OF_SET_MPLS_TTL_MPLS_TTL, + ACTION_OF_DEC_MPLS_TTL, + ACTION_OF_SET_NW_TTL, + ACTION_OF_SET_NW_TTL_NW_TTL, + ACTION_OF_DEC_NW_TTL, + ACTION_OF_COPY_TTL_OUT, + ACTION_OF_COPY_TTL_IN, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_PUSH_VLAN_ETHERTYPE, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_VID_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + ACTION_OF_SET_VLAN_PCP_VLAN_PCP, + ACTION_OF_POP_MPLS, + ACTION_OF_POP_MPLS_ETHERTYPE, + ACTION_OF_PUSH_MPLS, + ACTION_OF_PUSH_MPLS_ETHERTYPE, + ACTION_VXLAN_ENCAP, + ACTION_VXLAN_DECAP, + ACTION_NVGRE_ENCAP, + ACTION_NVGRE_DECAP, + ACTION_L2_ENCAP, + ACTION_L2_DECAP, + ACTION_MPLSOGRE_ENCAP, + ACTION_MPLSOGRE_DECAP, + ACTION_MPLSOUDP_ENCAP, + ACTION_MPLSOUDP_DECAP, + ACTION_SET_IPV4_SRC, + ACTION_SET_IPV4_SRC_IPV4_SRC, + ACTION_SET_IPV4_DST, + ACTION_SET_IPV4_DST_IPV4_DST, + ACTION_SET_IPV6_SRC, + ACTION_SET_IPV6_SRC_IPV6_SRC, + ACTION_SET_IPV6_DST, + ACTION_SET_IPV6_DST_IPV6_DST, + ACTION_SET_TP_SRC, + ACTION_SET_TP_SRC_TP_SRC, + ACTION_SET_TP_DST, + ACTION_SET_TP_DST_TP_DST, + ACTION_MAC_SWAP, + ACTION_DEC_TTL, + ACTION_SET_TTL, + ACTION_SET_TTL_TTL, + ACTION_SET_MAC_SRC, + ACTION_SET_MAC_SRC_MAC_SRC, + ACTION_SET_MAC_DST, + ACTION_SET_MAC_DST_MAC_DST, }; /** Maximum size for pattern in struct rte_flow_item_raw. */ @@ -213,6 +290,55 @@ struct action_rss_data { uint16_t queue[ACTION_RSS_QUEUE_NUM]; }; +/** Maximum number of items in struct rte_flow_action_vxlan_encap. */ +#define ACTION_VXLAN_ENCAP_ITEMS_NUM 6 + +/** Storage for struct rte_flow_action_vxlan_encap including external data. */ +struct action_vxlan_encap_data { + struct rte_flow_action_vxlan_encap conf; + struct rte_flow_item items[ACTION_VXLAN_ENCAP_ITEMS_NUM]; + struct rte_flow_item_eth item_eth; + struct rte_flow_item_vlan item_vlan; + union { + struct rte_flow_item_ipv4 item_ipv4; + struct rte_flow_item_ipv6 item_ipv6; + }; + struct rte_flow_item_udp item_udp; + struct rte_flow_item_vxlan item_vxlan; +}; + +/** Maximum number of items in struct rte_flow_action_nvgre_encap. */ +#define ACTION_NVGRE_ENCAP_ITEMS_NUM 5 + +/** Storage for struct rte_flow_action_nvgre_encap including external data. */ +struct action_nvgre_encap_data { + struct rte_flow_action_nvgre_encap conf; + struct rte_flow_item items[ACTION_NVGRE_ENCAP_ITEMS_NUM]; + struct rte_flow_item_eth item_eth; + struct rte_flow_item_vlan item_vlan; + union { + struct rte_flow_item_ipv4 item_ipv4; + struct rte_flow_item_ipv6 item_ipv6; + }; + struct rte_flow_item_nvgre item_nvgre; +}; + +/** Maximum data size in struct rte_flow_action_raw_encap. */ +#define ACTION_RAW_ENCAP_MAX_DATA 128 + +/** Storage for struct rte_flow_action_raw_encap including external data. */ +struct action_raw_encap_data { + struct rte_flow_action_raw_encap conf; + uint8_t data[ACTION_RAW_ENCAP_MAX_DATA]; + uint8_t preserve[ACTION_RAW_ENCAP_MAX_DATA]; +}; + +/** Storage for struct rte_flow_action_raw_decap including external data. */ +struct action_raw_decap_data { + struct rte_flow_action_raw_decap conf; + uint8_t data[ACTION_RAW_ENCAP_MAX_DATA]; +}; + /** Maximum number of subsequent tokens and arguments on the stack. */ #define CTX_STACK_SIZE 16 @@ -377,7 +503,7 @@ struct buffer { } destroy; /**< Destroy arguments. */ struct { uint32_t rule; - enum rte_flow_action_type action; + struct rte_flow_action action; } query; /**< Query arguments. */ struct { uint32_t *group; @@ -453,6 +579,7 @@ static const enum index next_item[] = { ITEM_VF, ITEM_PHY_PORT, ITEM_PORT_ID, + ITEM_MARK, ITEM_RAW, ITEM_ETH, ITEM_VLAN, @@ -473,6 +600,15 @@ static const enum index next_item[] = { ITEM_GTPU, ITEM_GENEVE, ITEM_VXLAN_GPE, + ITEM_ARP_ETH_IPV4, + ITEM_IPV6_EXT, + ITEM_ICMP6, + ITEM_ICMP6_ND_NS, + ITEM_ICMP6_ND_NA, + ITEM_ICMP6_ND_OPT, + ITEM_ICMP6_ND_OPT_SLA_ETH, + ITEM_ICMP6_ND_OPT_TLA_ETH, + ITEM_META, ZERO, }; @@ -506,6 +642,12 @@ static const enum index item_port_id[] = { ZERO, }; +static const enum index item_mark[] = { + ITEM_MARK_ID, + ITEM_NEXT, + ZERO, +}; + static const enum index item_raw[] = { ITEM_RAW_RELATIVE, ITEM_RAW_SEARCH, @@ -635,10 +777,69 @@ static const enum index item_vxlan_gpe[] = { ZERO, }; +static const enum index item_arp_eth_ipv4[] = { + ITEM_ARP_ETH_IPV4_SHA, + ITEM_ARP_ETH_IPV4_SPA, + ITEM_ARP_ETH_IPV4_THA, + ITEM_ARP_ETH_IPV4_TPA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_ipv6_ext[] = { + ITEM_IPV6_EXT_NEXT_HDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6[] = { + ITEM_ICMP6_TYPE, + ITEM_ICMP6_CODE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_ns[] = { + ITEM_ICMP6_ND_NS_TARGET_ADDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_na[] = { + ITEM_ICMP6_ND_NA_TARGET_ADDR, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt[] = { + ITEM_ICMP6_ND_OPT_TYPE, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt_sla_eth[] = { + ITEM_ICMP6_ND_OPT_SLA_ETH_SLA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_icmp6_nd_opt_tla_eth[] = { + ITEM_ICMP6_ND_OPT_TLA_ETH_TLA, + ITEM_NEXT, + ZERO, +}; + +static const enum index item_meta[] = { + ITEM_META_DATA, + ITEM_NEXT, + ZERO, +}; + static const enum index next_action[] = { ACTION_END, ACTION_VOID, ACTION_PASSTHRU, + ACTION_JUMP, ACTION_MARK, ACTION_FLAG, ACTION_QUEUE, @@ -650,6 +851,39 @@ static const enum index next_action[] = { ACTION_PHY_PORT, ACTION_PORT_ID, ACTION_METER, + ACTION_OF_SET_MPLS_TTL, + ACTION_OF_DEC_MPLS_TTL, + ACTION_OF_SET_NW_TTL, + ACTION_OF_DEC_NW_TTL, + ACTION_OF_COPY_TTL_OUT, + ACTION_OF_COPY_TTL_IN, + ACTION_OF_POP_VLAN, + ACTION_OF_PUSH_VLAN, + ACTION_OF_SET_VLAN_VID, + ACTION_OF_SET_VLAN_PCP, + ACTION_OF_POP_MPLS, + ACTION_OF_PUSH_MPLS, + ACTION_VXLAN_ENCAP, + ACTION_VXLAN_DECAP, + ACTION_NVGRE_ENCAP, + ACTION_NVGRE_DECAP, + ACTION_L2_ENCAP, + ACTION_L2_DECAP, + ACTION_MPLSOGRE_ENCAP, + ACTION_MPLSOGRE_DECAP, + ACTION_MPLSOUDP_ENCAP, + ACTION_MPLSOUDP_DECAP, + ACTION_SET_IPV4_SRC, + ACTION_SET_IPV4_DST, + ACTION_SET_IPV6_SRC, + ACTION_SET_IPV6_DST, + ACTION_SET_TP_SRC, + ACTION_SET_TP_DST, + ACTION_MAC_SWAP, + ACTION_DEC_TTL, + ACTION_SET_TTL, + ACTION_SET_MAC_SRC, + ACTION_SET_MAC_DST, ZERO, }; @@ -665,6 +899,13 @@ static const enum index action_queue[] = { ZERO, }; +static const enum index action_count[] = { + ACTION_COUNT_ID, + ACTION_COUNT_SHARED, + ACTION_NEXT, + ZERO, +}; + static const enum index action_rss[] = { ACTION_RSS_FUNC, ACTION_RSS_LEVEL, @@ -703,6 +944,108 @@ static const enum index action_meter[] = { ZERO, }; +static const enum index action_of_set_mpls_ttl[] = { + ACTION_OF_SET_MPLS_TTL_MPLS_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_nw_ttl[] = { + ACTION_OF_SET_NW_TTL_NW_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_push_vlan[] = { + ACTION_OF_PUSH_VLAN_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_vlan_vid[] = { + ACTION_OF_SET_VLAN_VID_VLAN_VID, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_set_vlan_pcp[] = { + ACTION_OF_SET_VLAN_PCP_VLAN_PCP, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_pop_mpls[] = { + ACTION_OF_POP_MPLS_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_of_push_mpls[] = { + ACTION_OF_PUSH_MPLS_ETHERTYPE, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv4_src[] = { + ACTION_SET_IPV4_SRC_IPV4_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_mac_src[] = { + ACTION_SET_MAC_SRC_MAC_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv4_dst[] = { + ACTION_SET_IPV4_DST_IPV4_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv6_src[] = { + ACTION_SET_IPV6_SRC_IPV6_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ipv6_dst[] = { + ACTION_SET_IPV6_DST_IPV6_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_tp_src[] = { + ACTION_SET_TP_SRC_TP_SRC, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_tp_dst[] = { + ACTION_SET_TP_DST_TP_DST, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_ttl[] = { + ACTION_SET_TTL_TTL, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_jump[] = { + ACTION_JUMP_GROUP, + ACTION_NEXT, + ZERO, +}; + +static const enum index action_set_mac_dst[] = { + ACTION_SET_MAC_DST_MAC_DST, + ACTION_NEXT, + ZERO, +}; + static int parse_init(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); @@ -725,6 +1068,30 @@ static int parse_vc_action_rss_type(struct context *, const struct token *, static int parse_vc_action_rss_queue(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); +static int parse_vc_action_vxlan_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_nvgre_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_l2_encap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_l2_decap(struct context *, const struct token *, + const char *, unsigned int, void *, + unsigned int); +static int parse_vc_action_mplsogre_encap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_mplsogre_decap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_mplsoudp_encap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); +static int parse_vc_action_mplsoudp_decap(struct context *, + const struct token *, const char *, + unsigned int, void *, unsigned int); static int parse_destroy(struct context *, const struct token *, const char *, unsigned int, void *, unsigned int); @@ -930,7 +1297,7 @@ static const struct token token_list[] = { .next = NEXT(NEXT_ENTRY(QUERY_ACTION), NEXT_ENTRY(RULE_ID), NEXT_ENTRY(PORT_ID)), - .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action), + .args = ARGS(ARGS_ENTRY(struct buffer, args.query.action.type), ARGS_ENTRY(struct buffer, args.query.rule), ARGS_ENTRY(struct buffer, port)), .call = parse_query, @@ -1127,6 +1494,19 @@ static const struct token token_list[] = { .next = NEXT(item_port_id, NEXT_ENTRY(UNSIGNED), item_param), .args = ARGS(ARGS_ENTRY(struct rte_flow_item_port_id, id)), }, + [ITEM_MARK] = { + .name = "mark", + .help = "match traffic against value set in previously matched rule", + .priv = PRIV_ITEM(MARK, sizeof(struct rte_flow_item_mark)), + .next = NEXT(item_mark), + .call = parse_vc, + }, + [ITEM_MARK_ID] = { + .name = "id", + .help = "Integer value to match against", + .next = NEXT(item_mark, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY(struct rte_flow_item_mark, id)), + }, [ITEM_RAW] = { .name = "raw", .help = "match an arbitrary byte string", @@ -1584,6 +1964,181 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan_gpe, vni)), }, + [ITEM_ARP_ETH_IPV4] = { + .name = "arp_eth_ipv4", + .help = "match ARP header for Ethernet/IPv4", + .priv = PRIV_ITEM(ARP_ETH_IPV4, + sizeof(struct rte_flow_item_arp_eth_ipv4)), + .next = NEXT(item_arp_eth_ipv4), + .call = parse_vc, + }, + [ITEM_ARP_ETH_IPV4_SHA] = { + .name = "sha", + .help = "sender hardware address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + sha)), + }, + [ITEM_ARP_ETH_IPV4_SPA] = { + .name = "spa", + .help = "sender IPv4 address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + spa)), + }, + [ITEM_ARP_ETH_IPV4_THA] = { + .name = "tha", + .help = "target hardware address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + tha)), + }, + [ITEM_ARP_ETH_IPV4_TPA] = { + .name = "tpa", + .help = "target IPv4 address", + .next = NEXT(item_arp_eth_ipv4, NEXT_ENTRY(IPV4_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_arp_eth_ipv4, + tpa)), + }, + [ITEM_IPV6_EXT] = { + .name = "ipv6_ext", + .help = "match presence of any IPv6 extension header", + .priv = PRIV_ITEM(IPV6_EXT, + sizeof(struct rte_flow_item_ipv6_ext)), + .next = NEXT(item_ipv6_ext), + .call = parse_vc, + }, + [ITEM_IPV6_EXT_NEXT_HDR] = { + .name = "next_hdr", + .help = "next header", + .next = NEXT(item_ipv6_ext, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_ipv6_ext, + next_hdr)), + }, + [ITEM_ICMP6] = { + .name = "icmp6", + .help = "match any ICMPv6 header", + .priv = PRIV_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)), + .next = NEXT(item_icmp6), + .call = parse_vc, + }, + [ITEM_ICMP6_TYPE] = { + .name = "type", + .help = "ICMPv6 type", + .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6, + type)), + }, + [ITEM_ICMP6_CODE] = { + .name = "code", + .help = "ICMPv6 code", + .next = NEXT(item_icmp6, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6, + code)), + }, + [ITEM_ICMP6_ND_NS] = { + .name = "icmp6_nd_ns", + .help = "match ICMPv6 neighbor discovery solicitation", + .priv = PRIV_ITEM(ICMP6_ND_NS, + sizeof(struct rte_flow_item_icmp6_nd_ns)), + .next = NEXT(item_icmp6_nd_ns), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_NS_TARGET_ADDR] = { + .name = "target_addr", + .help = "target address", + .next = NEXT(item_icmp6_nd_ns, NEXT_ENTRY(IPV6_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_ns, + target_addr)), + }, + [ITEM_ICMP6_ND_NA] = { + .name = "icmp6_nd_na", + .help = "match ICMPv6 neighbor discovery advertisement", + .priv = PRIV_ITEM(ICMP6_ND_NA, + sizeof(struct rte_flow_item_icmp6_nd_na)), + .next = NEXT(item_icmp6_nd_na), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_NA_TARGET_ADDR] = { + .name = "target_addr", + .help = "target address", + .next = NEXT(item_icmp6_nd_na, NEXT_ENTRY(IPV6_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_na, + target_addr)), + }, + [ITEM_ICMP6_ND_OPT] = { + .name = "icmp6_nd_opt", + .help = "match presence of any ICMPv6 neighbor discovery" + " option", + .priv = PRIV_ITEM(ICMP6_ND_OPT, + sizeof(struct rte_flow_item_icmp6_nd_opt)), + .next = NEXT(item_icmp6_nd_opt), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_TYPE] = { + .name = "type", + .help = "ND option type", + .next = NEXT(item_icmp6_nd_opt, NEXT_ENTRY(UNSIGNED), + item_param), + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_icmp6_nd_opt, + type)), + }, + [ITEM_ICMP6_ND_OPT_SLA_ETH] = { + .name = "icmp6_nd_opt_sla_eth", + .help = "match ICMPv6 neighbor discovery source Ethernet" + " link-layer address option", + .priv = PRIV_ITEM + (ICMP6_ND_OPT_SLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)), + .next = NEXT(item_icmp6_nd_opt_sla_eth), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_SLA_ETH_SLA] = { + .name = "sla", + .help = "source Ethernet LLA", + .next = NEXT(item_icmp6_nd_opt_sla_eth, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_icmp6_nd_opt_sla_eth, sla)), + }, + [ITEM_ICMP6_ND_OPT_TLA_ETH] = { + .name = "icmp6_nd_opt_tla_eth", + .help = "match ICMPv6 neighbor discovery target Ethernet" + " link-layer address option", + .priv = PRIV_ITEM + (ICMP6_ND_OPT_TLA_ETH, + sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)), + .next = NEXT(item_icmp6_nd_opt_tla_eth), + .call = parse_vc, + }, + [ITEM_ICMP6_ND_OPT_TLA_ETH_TLA] = { + .name = "tla", + .help = "target Ethernet LLA", + .next = NEXT(item_icmp6_nd_opt_tla_eth, NEXT_ENTRY(MAC_ADDR), + item_param), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_item_icmp6_nd_opt_tla_eth, tla)), + }, + [ITEM_META] = { + .name = "meta", + .help = "match metadata header", + .priv = PRIV_ITEM(META, sizeof(struct rte_flow_item_meta)), + .next = NEXT(item_meta), + .call = parse_vc, + }, + [ITEM_META_DATA] = { + .name = "data", + .help = "metadata value", + .next = NEXT(item_meta, NEXT_ENTRY(UNSIGNED), item_param), + .args = ARGS(ARGS_ENTRY_MASK_HTON(struct rte_flow_item_meta, + data, "\xff\xff\xff\xff")), + }, /* Validate/create actions. */ [ACTIONS] = { @@ -1617,6 +2172,20 @@ static const struct token token_list[] = { .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), .call = parse_vc, }, + [ACTION_JUMP] = { + .name = "jump", + .help = "redirect traffic to a given group", + .priv = PRIV_ACTION(JUMP, sizeof(struct rte_flow_action_jump)), + .next = NEXT(action_jump), + .call = parse_vc, + }, + [ACTION_JUMP_GROUP] = { + .name = "group", + .help = "group to redirect traffic to", + .next = NEXT(action_jump, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_jump, group)), + .call = parse_vc_conf, + }, [ACTION_MARK] = { .name = "mark", .help = "attach 32 bit value to packets", @@ -1663,10 +2232,26 @@ static const struct token token_list[] = { [ACTION_COUNT] = { .name = "count", .help = "enable counters for this rule", - .priv = PRIV_ACTION(COUNT, 0), - .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .priv = PRIV_ACTION(COUNT, + sizeof(struct rte_flow_action_count)), + .next = NEXT(action_count), .call = parse_vc, }, + [ACTION_COUNT_ID] = { + .name = "identifier", + .help = "counter identifier to use", + .next = NEXT(action_count, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_count, id)), + .call = parse_vc_conf, + }, + [ACTION_COUNT_SHARED] = { + .name = "shared", + .help = "shared counter", + .next = NEXT(action_count, NEXT_ENTRY(BOOLEAN)), + .args = ARGS(ARGS_ENTRY_BF(struct rte_flow_action_count, + shared, 1)), + .call = parse_vc_conf, + }, [ACTION_RSS] = { .name = "rss", .help = "spread packets among several queues", @@ -1845,93 +2430,506 @@ static const struct token token_list[] = { .args = ARGS(ARGS_ENTRY(struct rte_flow_action_meter, mtr_id)), .call = parse_vc_conf, }, -}; - -/** Remove and return last entry from argument stack. */ -static const struct arg * -pop_args(struct context *ctx) -{ - return ctx->args_num ? ctx->args[--ctx->args_num] : NULL; -} - -/** Add entry on top of the argument stack. */ -static int -push_args(struct context *ctx, const struct arg *arg) -{ - if (ctx->args_num == CTX_STACK_SIZE) - return -1; - ctx->args[ctx->args_num++] = arg; - return 0; -} - -/** Spread value into buffer according to bit-mask. */ -static size_t -arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg) -{ - uint32_t i = arg->size; - uint32_t end = 0; - int sub = 1; - int add = 0; - size_t len = 0; - - if (!arg->mask) - return 0; -#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN - if (!arg->hton) { - i = 0; - end = arg->size; - sub = 0; - add = 1; - } -#endif - while (i != end) { - unsigned int shift = 0; - uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub); - - for (shift = 0; arg->mask[i] >> shift; ++shift) { - if (!(arg->mask[i] & (1 << shift))) - continue; - ++len; - if (!dst) - continue; - *buf &= ~(1 << shift); - *buf |= (val & 1) << shift; - val >>= 1; - } - i += add; - } - return len; -} - -/** Compare a string with a partial one of a given length. */ -static int -strcmp_partial(const char *full, const char *partial, size_t partial_len) -{ - int r = strncmp(full, partial, partial_len); - - if (r) - return r; - if (strlen(full) <= partial_len) - return 0; - return full[partial_len]; -} - -/** - * Parse a prefix length and generate a bit-mask. - * - * Last argument (ctx->args) is retrieved to determine mask size, storage - * location and whether the result must use network byte ordering. - */ -static int -parse_prefix(struct context *ctx, const struct token *token, - const char *str, unsigned int len, - void *buf, unsigned int size) -{ - const struct arg *arg = pop_args(ctx); - static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff"; - char *end; - uintmax_t u; - unsigned int bytes; + [ACTION_OF_SET_MPLS_TTL] = { + .name = "of_set_mpls_ttl", + .help = "OpenFlow's OFPAT_SET_MPLS_TTL", + .priv = PRIV_ACTION + (OF_SET_MPLS_TTL, + sizeof(struct rte_flow_action_of_set_mpls_ttl)), + .next = NEXT(action_of_set_mpls_ttl), + .call = parse_vc, + }, + [ACTION_OF_SET_MPLS_TTL_MPLS_TTL] = { + .name = "mpls_ttl", + .help = "MPLS TTL", + .next = NEXT(action_of_set_mpls_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_mpls_ttl, + mpls_ttl)), + .call = parse_vc_conf, + }, + [ACTION_OF_DEC_MPLS_TTL] = { + .name = "of_dec_mpls_ttl", + .help = "OpenFlow's OFPAT_DEC_MPLS_TTL", + .priv = PRIV_ACTION(OF_DEC_MPLS_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_SET_NW_TTL] = { + .name = "of_set_nw_ttl", + .help = "OpenFlow's OFPAT_SET_NW_TTL", + .priv = PRIV_ACTION + (OF_SET_NW_TTL, + sizeof(struct rte_flow_action_of_set_nw_ttl)), + .next = NEXT(action_of_set_nw_ttl), + .call = parse_vc, + }, + [ACTION_OF_SET_NW_TTL_NW_TTL] = { + .name = "nw_ttl", + .help = "IP TTL", + .next = NEXT(action_of_set_nw_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY(struct rte_flow_action_of_set_nw_ttl, + nw_ttl)), + .call = parse_vc_conf, + }, + [ACTION_OF_DEC_NW_TTL] = { + .name = "of_dec_nw_ttl", + .help = "OpenFlow's OFPAT_DEC_NW_TTL", + .priv = PRIV_ACTION(OF_DEC_NW_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_COPY_TTL_OUT] = { + .name = "of_copy_ttl_out", + .help = "OpenFlow's OFPAT_COPY_TTL_OUT", + .priv = PRIV_ACTION(OF_COPY_TTL_OUT, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_COPY_TTL_IN] = { + .name = "of_copy_ttl_in", + .help = "OpenFlow's OFPAT_COPY_TTL_IN", + .priv = PRIV_ACTION(OF_COPY_TTL_IN, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_POP_VLAN] = { + .name = "of_pop_vlan", + .help = "OpenFlow's OFPAT_POP_VLAN", + .priv = PRIV_ACTION(OF_POP_VLAN, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_OF_PUSH_VLAN] = { + .name = "of_push_vlan", + .help = "OpenFlow's OFPAT_PUSH_VLAN", + .priv = PRIV_ACTION + (OF_PUSH_VLAN, + sizeof(struct rte_flow_action_of_push_vlan)), + .next = NEXT(action_of_push_vlan), + .call = parse_vc, + }, + [ACTION_OF_PUSH_VLAN_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_push_vlan, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_push_vlan, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_VLAN_VID] = { + .name = "of_set_vlan_vid", + .help = "OpenFlow's OFPAT_SET_VLAN_VID", + .priv = PRIV_ACTION + (OF_SET_VLAN_VID, + sizeof(struct rte_flow_action_of_set_vlan_vid)), + .next = NEXT(action_of_set_vlan_vid), + .call = parse_vc, + }, + [ACTION_OF_SET_VLAN_VID_VLAN_VID] = { + .name = "vlan_vid", + .help = "VLAN id", + .next = NEXT(action_of_set_vlan_vid, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_set_vlan_vid, + vlan_vid)), + .call = parse_vc_conf, + }, + [ACTION_OF_SET_VLAN_PCP] = { + .name = "of_set_vlan_pcp", + .help = "OpenFlow's OFPAT_SET_VLAN_PCP", + .priv = PRIV_ACTION + (OF_SET_VLAN_PCP, + sizeof(struct rte_flow_action_of_set_vlan_pcp)), + .next = NEXT(action_of_set_vlan_pcp), + .call = parse_vc, + }, + [ACTION_OF_SET_VLAN_PCP_VLAN_PCP] = { + .name = "vlan_pcp", + .help = "VLAN priority", + .next = NEXT(action_of_set_vlan_pcp, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_set_vlan_pcp, + vlan_pcp)), + .call = parse_vc_conf, + }, + [ACTION_OF_POP_MPLS] = { + .name = "of_pop_mpls", + .help = "OpenFlow's OFPAT_POP_MPLS", + .priv = PRIV_ACTION(OF_POP_MPLS, + sizeof(struct rte_flow_action_of_pop_mpls)), + .next = NEXT(action_of_pop_mpls), + .call = parse_vc, + }, + [ACTION_OF_POP_MPLS_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_pop_mpls, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_pop_mpls, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_OF_PUSH_MPLS] = { + .name = "of_push_mpls", + .help = "OpenFlow's OFPAT_PUSH_MPLS", + .priv = PRIV_ACTION + (OF_PUSH_MPLS, + sizeof(struct rte_flow_action_of_push_mpls)), + .next = NEXT(action_of_push_mpls), + .call = parse_vc, + }, + [ACTION_OF_PUSH_MPLS_ETHERTYPE] = { + .name = "ethertype", + .help = "EtherType", + .next = NEXT(action_of_push_mpls, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_of_push_mpls, + ethertype)), + .call = parse_vc_conf, + }, + [ACTION_VXLAN_ENCAP] = { + .name = "vxlan_encap", + .help = "VXLAN encapsulation, uses configuration set by \"set" + " vxlan\"", + .priv = PRIV_ACTION(VXLAN_ENCAP, + sizeof(struct action_vxlan_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_vxlan_encap, + }, + [ACTION_VXLAN_DECAP] = { + .name = "vxlan_decap", + .help = "Performs a decapsulation action by stripping all" + " headers of the VXLAN tunnel network overlay from the" + " matched flow.", + .priv = PRIV_ACTION(VXLAN_DECAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_NVGRE_ENCAP] = { + .name = "nvgre_encap", + .help = "NVGRE encapsulation, uses configuration set by \"set" + " nvgre\"", + .priv = PRIV_ACTION(NVGRE_ENCAP, + sizeof(struct action_nvgre_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_nvgre_encap, + }, + [ACTION_NVGRE_DECAP] = { + .name = "nvgre_decap", + .help = "Performs a decapsulation action by stripping all" + " headers of the NVGRE tunnel network overlay from the" + " matched flow.", + .priv = PRIV_ACTION(NVGRE_DECAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_L2_ENCAP] = { + .name = "l2_encap", + .help = "l2 encap, uses configuration set by" + " \"set l2_encap\"", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_l2_encap, + }, + [ACTION_L2_DECAP] = { + .name = "l2_decap", + .help = "l2 decap, uses configuration set by" + " \"set l2_decap\"", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_l2_decap, + }, + [ACTION_MPLSOGRE_ENCAP] = { + .name = "mplsogre_encap", + .help = "mplsogre encapsulation, uses configuration set by" + " \"set mplsogre_encap\"", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsogre_encap, + }, + [ACTION_MPLSOGRE_DECAP] = { + .name = "mplsogre_decap", + .help = "mplsogre decapsulation, uses configuration set by" + " \"set mplsogre_decap\"", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsogre_decap, + }, + [ACTION_MPLSOUDP_ENCAP] = { + .name = "mplsoudp_encap", + .help = "mplsoudp encapsulation, uses configuration set by" + " \"set mplsoudp_encap\"", + .priv = PRIV_ACTION(RAW_ENCAP, + sizeof(struct action_raw_encap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsoudp_encap, + }, + [ACTION_MPLSOUDP_DECAP] = { + .name = "mplsoudp_decap", + .help = "mplsoudp decapsulation, uses configuration set by" + " \"set mplsoudp_decap\"", + .priv = PRIV_ACTION(RAW_DECAP, + sizeof(struct action_raw_decap_data)), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc_action_mplsoudp_decap, + }, + [ACTION_SET_IPV4_SRC] = { + .name = "set_ipv4_src", + .help = "Set a new IPv4 source address in the outermost" + " IPv4 header", + .priv = PRIV_ACTION(SET_IPV4_SRC, + sizeof(struct rte_flow_action_set_ipv4)), + .next = NEXT(action_set_ipv4_src), + .call = parse_vc, + }, + [ACTION_SET_IPV4_SRC_IPV4_SRC] = { + .name = "ipv4_addr", + .help = "new IPv4 source address to set", + .next = NEXT(action_set_ipv4_src, NEXT_ENTRY(IPV4_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv4, ipv4_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV4_DST] = { + .name = "set_ipv4_dst", + .help = "Set a new IPv4 destination address in the outermost" + " IPv4 header", + .priv = PRIV_ACTION(SET_IPV4_DST, + sizeof(struct rte_flow_action_set_ipv4)), + .next = NEXT(action_set_ipv4_dst), + .call = parse_vc, + }, + [ACTION_SET_IPV4_DST_IPV4_DST] = { + .name = "ipv4_addr", + .help = "new IPv4 destination address to set", + .next = NEXT(action_set_ipv4_dst, NEXT_ENTRY(IPV4_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv4, ipv4_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV6_SRC] = { + .name = "set_ipv6_src", + .help = "Set a new IPv6 source address in the outermost" + " IPv6 header", + .priv = PRIV_ACTION(SET_IPV6_SRC, + sizeof(struct rte_flow_action_set_ipv6)), + .next = NEXT(action_set_ipv6_src), + .call = parse_vc, + }, + [ACTION_SET_IPV6_SRC_IPV6_SRC] = { + .name = "ipv6_addr", + .help = "new IPv6 source address to set", + .next = NEXT(action_set_ipv6_src, NEXT_ENTRY(IPV6_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv6, ipv6_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_IPV6_DST] = { + .name = "set_ipv6_dst", + .help = "Set a new IPv6 destination address in the outermost" + " IPv6 header", + .priv = PRIV_ACTION(SET_IPV6_DST, + sizeof(struct rte_flow_action_set_ipv6)), + .next = NEXT(action_set_ipv6_dst), + .call = parse_vc, + }, + [ACTION_SET_IPV6_DST_IPV6_DST] = { + .name = "ipv6_addr", + .help = "new IPv6 destination address to set", + .next = NEXT(action_set_ipv6_dst, NEXT_ENTRY(IPV6_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ipv6, ipv6_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_TP_SRC] = { + .name = "set_tp_src", + .help = "set a new source port number in the outermost" + " TCP/UDP header", + .priv = PRIV_ACTION(SET_TP_SRC, + sizeof(struct rte_flow_action_set_tp)), + .next = NEXT(action_set_tp_src), + .call = parse_vc, + }, + [ACTION_SET_TP_SRC_TP_SRC] = { + .name = "port", + .help = "new source port number to set", + .next = NEXT(action_set_tp_src, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_tp, port)), + .call = parse_vc_conf, + }, + [ACTION_SET_TP_DST] = { + .name = "set_tp_dst", + .help = "set a new destination port number in the outermost" + " TCP/UDP header", + .priv = PRIV_ACTION(SET_TP_DST, + sizeof(struct rte_flow_action_set_tp)), + .next = NEXT(action_set_tp_dst), + .call = parse_vc, + }, + [ACTION_SET_TP_DST_TP_DST] = { + .name = "port", + .help = "new destination port number to set", + .next = NEXT(action_set_tp_dst, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_tp, port)), + .call = parse_vc_conf, + }, + [ACTION_MAC_SWAP] = { + .name = "mac_swap", + .help = "Swap the source and destination MAC addresses" + " in the outermost Ethernet header", + .priv = PRIV_ACTION(MAC_SWAP, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_DEC_TTL] = { + .name = "dec_ttl", + .help = "decrease network TTL if available", + .priv = PRIV_ACTION(DEC_TTL, 0), + .next = NEXT(NEXT_ENTRY(ACTION_NEXT)), + .call = parse_vc, + }, + [ACTION_SET_TTL] = { + .name = "set_ttl", + .help = "set ttl value", + .priv = PRIV_ACTION(SET_TTL, + sizeof(struct rte_flow_action_set_ttl)), + .next = NEXT(action_set_ttl), + .call = parse_vc, + }, + [ACTION_SET_TTL_TTL] = { + .name = "ttl_value", + .help = "new ttl value to set", + .next = NEXT(action_set_ttl, NEXT_ENTRY(UNSIGNED)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_ttl, ttl_value)), + .call = parse_vc_conf, + }, + [ACTION_SET_MAC_SRC] = { + .name = "set_mac_src", + .help = "set source mac address", + .priv = PRIV_ACTION(SET_MAC_SRC, + sizeof(struct rte_flow_action_set_mac)), + .next = NEXT(action_set_mac_src), + .call = parse_vc, + }, + [ACTION_SET_MAC_SRC_MAC_SRC] = { + .name = "mac_addr", + .help = "new source mac address", + .next = NEXT(action_set_mac_src, NEXT_ENTRY(MAC_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_mac, mac_addr)), + .call = parse_vc_conf, + }, + [ACTION_SET_MAC_DST] = { + .name = "set_mac_dst", + .help = "set destination mac address", + .priv = PRIV_ACTION(SET_MAC_DST, + sizeof(struct rte_flow_action_set_mac)), + .next = NEXT(action_set_mac_dst), + .call = parse_vc, + }, + [ACTION_SET_MAC_DST_MAC_DST] = { + .name = "mac_addr", + .help = "new destination mac address to set", + .next = NEXT(action_set_mac_dst, NEXT_ENTRY(MAC_ADDR)), + .args = ARGS(ARGS_ENTRY_HTON + (struct rte_flow_action_set_mac, mac_addr)), + .call = parse_vc_conf, + }, +}; + +/** Remove and return last entry from argument stack. */ +static const struct arg * +pop_args(struct context *ctx) +{ + return ctx->args_num ? ctx->args[--ctx->args_num] : NULL; +} + +/** Add entry on top of the argument stack. */ +static int +push_args(struct context *ctx, const struct arg *arg) +{ + if (ctx->args_num == CTX_STACK_SIZE) + return -1; + ctx->args[ctx->args_num++] = arg; + return 0; +} + +/** Spread value into buffer according to bit-mask. */ +static size_t +arg_entry_bf_fill(void *dst, uintmax_t val, const struct arg *arg) +{ + uint32_t i = arg->size; + uint32_t end = 0; + int sub = 1; + int add = 0; + size_t len = 0; + + if (!arg->mask) + return 0; +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN + if (!arg->hton) { + i = 0; + end = arg->size; + sub = 0; + add = 1; + } +#endif + while (i != end) { + unsigned int shift = 0; + uint8_t *buf = (uint8_t *)dst + arg->offset + (i -= sub); + + for (shift = 0; arg->mask[i] >> shift; ++shift) { + if (!(arg->mask[i] & (1 << shift))) + continue; + ++len; + if (!dst) + continue; + *buf &= ~(1 << shift); + *buf |= (val & 1) << shift; + val >>= 1; + } + i += add; + } + return len; +} + +/** Compare a string with a partial one of a given length. */ +static int +strcmp_partial(const char *full, const char *partial, size_t partial_len) +{ + int r = strncmp(full, partial, partial_len); + + if (r) + return r; + if (strlen(full) <= partial_len) + return 0; + return full[partial_len]; +} + +/** + * Parse a prefix length and generate a bit-mask. + * + * Last argument (ctx->args) is retrieved to determine mask size, storage + * location and whether the result must use network byte ordering. + */ +static int +parse_prefix(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + const struct arg *arg = pop_args(ctx); + static const uint8_t conv[] = "\x00\x80\xc0\xe0\xf0\xf8\xfc\xfe\xff"; + char *end; + uintmax_t u; + unsigned int bytes; unsigned int extra; (void)token; @@ -2255,7 +3253,8 @@ parse_vc_action_rss(struct context *ctx, const struct token *token, .key = action_rss_data->key, .queue = action_rss_data->queue, }, - .key = "testpmd's default RSS hash key", + .key = "testpmd's default RSS hash key, " + "override it for better balancing", .queue = { 0 }, }; for (i = 0; i < action_rss_data->conf.queue_num; ++i) @@ -2379,7 +3378,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, i = ctx->objdata >> 16; if (!strcmp_partial("end", str, len)) { ctx->objdata &= 0xffff; - return len; + goto end; } if (i >= ACTION_RSS_QUEUE_NUM) return -1; @@ -2399,6 +3398,7 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, if (ctx->next_num == RTE_DIM(ctx->next)) return -1; ctx->next[ctx->next_num++] = next; +end: if (!ctx->object) return len; action_rss_data = ctx->object; @@ -2407,6 +3407,728 @@ parse_vc_action_rss_queue(struct context *ctx, const struct token *token, return len; } +/** Parse VXLAN encap action. */ +static int +parse_vc_action_vxlan_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_vxlan_encap_data *action_vxlan_encap_data; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_vxlan_encap_data = ctx->object; + *action_vxlan_encap_data = (struct action_vxlan_encap_data){ + .conf = (struct rte_flow_action_vxlan_encap){ + .definition = action_vxlan_encap_data->items, + }, + .items = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &action_vxlan_encap_data->item_eth, + .mask = &rte_flow_item_eth_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &action_vxlan_encap_data->item_vlan, + .mask = &rte_flow_item_vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &action_vxlan_encap_data->item_ipv4, + .mask = &rte_flow_item_ipv4_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_UDP, + .spec = &action_vxlan_encap_data->item_udp, + .mask = &rte_flow_item_udp_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VXLAN, + .spec = &action_vxlan_encap_data->item_vxlan, + .mask = &rte_flow_item_vxlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }, + .item_eth.type = 0, + .item_vlan = { + .tci = vxlan_encap_conf.vlan_tci, + .inner_type = 0, + }, + .item_ipv4.hdr = { + .src_addr = vxlan_encap_conf.ipv4_src, + .dst_addr = vxlan_encap_conf.ipv4_dst, + }, + .item_udp.hdr = { + .src_port = vxlan_encap_conf.udp_src, + .dst_port = vxlan_encap_conf.udp_dst, + }, + .item_vxlan.flags = 0, + }; + memcpy(action_vxlan_encap_data->item_eth.dst.addr_bytes, + vxlan_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(action_vxlan_encap_data->item_eth.src.addr_bytes, + vxlan_encap_conf.eth_src, ETHER_ADDR_LEN); + if (!vxlan_encap_conf.select_ipv4) { + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.src_addr, + &vxlan_encap_conf.ipv6_src, + sizeof(vxlan_encap_conf.ipv6_src)); + memcpy(&action_vxlan_encap_data->item_ipv6.hdr.dst_addr, + &vxlan_encap_conf.ipv6_dst, + sizeof(vxlan_encap_conf.ipv6_dst)); + action_vxlan_encap_data->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &action_vxlan_encap_data->item_ipv6, + .mask = &rte_flow_item_ipv6_mask, + }; + } + if (!vxlan_encap_conf.select_vlan) + action_vxlan_encap_data->items[1].type = + RTE_FLOW_ITEM_TYPE_VOID; + if (vxlan_encap_conf.select_tos_ttl) { + if (vxlan_encap_conf.select_ipv4) { + static struct rte_flow_item_ipv4 ipv4_mask_tos; + + memcpy(&ipv4_mask_tos, &rte_flow_item_ipv4_mask, + sizeof(ipv4_mask_tos)); + ipv4_mask_tos.hdr.type_of_service = 0xff; + ipv4_mask_tos.hdr.time_to_live = 0xff; + action_vxlan_encap_data->item_ipv4.hdr.type_of_service = + vxlan_encap_conf.ip_tos; + action_vxlan_encap_data->item_ipv4.hdr.time_to_live = + vxlan_encap_conf.ip_ttl; + action_vxlan_encap_data->items[2].mask = + &ipv4_mask_tos; + } else { + static struct rte_flow_item_ipv6 ipv6_mask_tos; + + memcpy(&ipv6_mask_tos, &rte_flow_item_ipv6_mask, + sizeof(ipv6_mask_tos)); + ipv6_mask_tos.hdr.vtc_flow |= + RTE_BE32(0xfful << IPV6_HDR_TC_SHIFT); + ipv6_mask_tos.hdr.hop_limits = 0xff; + action_vxlan_encap_data->item_ipv6.hdr.vtc_flow |= + rte_cpu_to_be_32 + ((uint32_t)vxlan_encap_conf.ip_tos << + IPV6_HDR_TC_SHIFT); + action_vxlan_encap_data->item_ipv6.hdr.hop_limits = + vxlan_encap_conf.ip_ttl; + action_vxlan_encap_data->items[2].mask = + &ipv6_mask_tos; + } + } + memcpy(action_vxlan_encap_data->item_vxlan.vni, vxlan_encap_conf.vni, + RTE_DIM(vxlan_encap_conf.vni)); + action->conf = &action_vxlan_encap_data->conf; + return ret; +} + +/** Parse NVGRE encap action. */ +static int +parse_vc_action_nvgre_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_nvgre_encap_data *action_nvgre_encap_data; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Set up default configuration. */ + action_nvgre_encap_data = ctx->object; + *action_nvgre_encap_data = (struct action_nvgre_encap_data){ + .conf = (struct rte_flow_action_nvgre_encap){ + .definition = action_nvgre_encap_data->items, + }, + .items = { + { + .type = RTE_FLOW_ITEM_TYPE_ETH, + .spec = &action_nvgre_encap_data->item_eth, + .mask = &rte_flow_item_eth_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_VLAN, + .spec = &action_nvgre_encap_data->item_vlan, + .mask = &rte_flow_item_vlan_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_IPV4, + .spec = &action_nvgre_encap_data->item_ipv4, + .mask = &rte_flow_item_ipv4_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_NVGRE, + .spec = &action_nvgre_encap_data->item_nvgre, + .mask = &rte_flow_item_nvgre_mask, + }, + { + .type = RTE_FLOW_ITEM_TYPE_END, + }, + }, + .item_eth.type = 0, + .item_vlan = { + .tci = nvgre_encap_conf.vlan_tci, + .inner_type = 0, + }, + .item_ipv4.hdr = { + .src_addr = nvgre_encap_conf.ipv4_src, + .dst_addr = nvgre_encap_conf.ipv4_dst, + }, + .item_nvgre.flow_id = 0, + }; + memcpy(action_nvgre_encap_data->item_eth.dst.addr_bytes, + nvgre_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(action_nvgre_encap_data->item_eth.src.addr_bytes, + nvgre_encap_conf.eth_src, ETHER_ADDR_LEN); + if (!nvgre_encap_conf.select_ipv4) { + memcpy(&action_nvgre_encap_data->item_ipv6.hdr.src_addr, + &nvgre_encap_conf.ipv6_src, + sizeof(nvgre_encap_conf.ipv6_src)); + memcpy(&action_nvgre_encap_data->item_ipv6.hdr.dst_addr, + &nvgre_encap_conf.ipv6_dst, + sizeof(nvgre_encap_conf.ipv6_dst)); + action_nvgre_encap_data->items[2] = (struct rte_flow_item){ + .type = RTE_FLOW_ITEM_TYPE_IPV6, + .spec = &action_nvgre_encap_data->item_ipv6, + .mask = &rte_flow_item_ipv6_mask, + }; + } + if (!nvgre_encap_conf.select_vlan) + action_nvgre_encap_data->items[1].type = + RTE_FLOW_ITEM_TYPE_VOID; + memcpy(action_nvgre_encap_data->item_nvgre.tni, nvgre_encap_conf.tni, + RTE_DIM(nvgre_encap_conf.tni)); + action->conf = &action_nvgre_encap_data->conf; + return ret; +} + +/** Parse l2 encap action. */ +static int +parse_vc_action_l2_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_encap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsoudp_encap_conf.vlan_tci, + .inner_type = 0, + }; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_encap_data = ctx->object; + *action_encap_data = (struct action_raw_encap_data) { + .conf = (struct rte_flow_action_raw_encap){ + .data = action_encap_data->data, + }, + .data = {}, + }; + header = action_encap_data->data; + if (l2_encap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + else if (l2_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(eth.dst.addr_bytes, + l2_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + l2_encap_conf.eth_src, ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (l2_encap_conf.select_vlan) { + if (l2_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + action_encap_data->conf.size = header - + action_encap_data->data; + action->conf = &action_encap_data->conf; + return ret; +} + +/** Parse l2 decap action. */ +static int +parse_vc_action_l2_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_decap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsoudp_encap_conf.vlan_tci, + .inner_type = 0, + }; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_decap_data = ctx->object; + *action_decap_data = (struct action_raw_decap_data) { + .conf = (struct rte_flow_action_raw_decap){ + .data = action_decap_data->data, + }, + .data = {}, + }; + header = action_decap_data->data; + if (l2_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (l2_decap_conf.select_vlan) { + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + action_decap_data->conf.size = header - + action_decap_data->data; + action->conf = &action_decap_data->conf; + return ret; +} + +#define ETHER_TYPE_MPLS_UNICAST 0x8847 + +/** Parse MPLSOGRE encap action. */ +static int +parse_vc_action_mplsogre_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_encap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsogre_encap_conf.vlan_tci, + .inner_type = 0, + }; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .src_addr = mplsogre_encap_conf.ipv4_src, + .dst_addr = mplsogre_encap_conf.ipv4_dst, + .next_proto_id = IPPROTO_GRE, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_GRE, + }, + }; + struct rte_flow_item_gre gre = { + .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST), + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_encap_data = ctx->object; + *action_encap_data = (struct action_raw_encap_data) { + .conf = (struct rte_flow_action_raw_encap){ + .data = action_encap_data->data, + }, + .data = {}, + .preserve = {}, + }; + header = action_encap_data->data; + if (mplsogre_encap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + else if (mplsogre_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(eth.dst.addr_bytes, + mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsogre_encap_conf.select_vlan) { + if (mplsogre_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsogre_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(&ipv6.hdr.src_addr, + &mplsogre_encap_conf.ipv6_src, + sizeof(mplsogre_encap_conf.ipv6_src)); + memcpy(&ipv6.hdr.dst_addr, + &mplsogre_encap_conf.ipv6_dst, + sizeof(mplsogre_encap_conf.ipv6_dst)); + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &gre, sizeof(gre)); + header += sizeof(gre); + memcpy(mpls.label_tc_s, mplsogre_encap_conf.label, + RTE_DIM(mplsogre_encap_conf.label)); + mpls.label_tc_s[2] |= 0x1; + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_encap_data->conf.size = header - + action_encap_data->data; + action->conf = &action_encap_data->conf; + return ret; +} + +/** Parse MPLSOGRE decap action. */ +static int +parse_vc_action_mplsogre_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_decap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = {.tci = 0}; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .next_proto_id = IPPROTO_GRE, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_GRE, + }, + }; + struct rte_flow_item_gre gre = { + .protocol = rte_cpu_to_be_16(ETHER_TYPE_MPLS_UNICAST), + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_decap_data = ctx->object; + *action_decap_data = (struct action_raw_decap_data) { + .conf = (struct rte_flow_action_raw_decap){ + .data = action_decap_data->data, + }, + .data = {}, + }; + header = action_decap_data->data; + if (mplsogre_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + else if (mplsogre_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(eth.dst.addr_bytes, + mplsogre_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsogre_encap_conf.eth_src, ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsogre_encap_conf.select_vlan) { + if (mplsogre_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsogre_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &gre, sizeof(gre)); + header += sizeof(gre); + memset(&mpls, 0, sizeof(mpls)); + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_decap_data->conf.size = header - + action_decap_data->data; + action->conf = &action_decap_data->conf; + return ret; +} + +/** Parse MPLSOUDP encap action. */ +static int +parse_vc_action_mplsoudp_encap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_encap_data *action_encap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = { + .tci = mplsoudp_encap_conf.vlan_tci, + .inner_type = 0, + }; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .src_addr = mplsoudp_encap_conf.ipv4_src, + .dst_addr = mplsoudp_encap_conf.ipv4_dst, + .next_proto_id = IPPROTO_UDP, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_UDP, + }, + }; + struct rte_flow_item_udp udp = { + .hdr = { + .src_port = mplsoudp_encap_conf.udp_src, + .dst_port = mplsoudp_encap_conf.udp_dst, + }, + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_encap_data = ctx->object; + *action_encap_data = (struct action_raw_encap_data) { + .conf = (struct rte_flow_action_raw_encap){ + .data = action_encap_data->data, + }, + .data = {}, + .preserve = {}, + }; + header = action_encap_data->data; + if (mplsoudp_encap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + else if (mplsoudp_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(eth.dst.addr_bytes, + mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsoudp_encap_conf.select_vlan) { + if (mplsoudp_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsoudp_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(&ipv6.hdr.src_addr, + &mplsoudp_encap_conf.ipv6_src, + sizeof(mplsoudp_encap_conf.ipv6_src)); + memcpy(&ipv6.hdr.dst_addr, + &mplsoudp_encap_conf.ipv6_dst, + sizeof(mplsoudp_encap_conf.ipv6_dst)); + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &udp, sizeof(udp)); + header += sizeof(udp); + memcpy(mpls.label_tc_s, mplsoudp_encap_conf.label, + RTE_DIM(mplsoudp_encap_conf.label)); + mpls.label_tc_s[2] |= 0x1; + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_encap_data->conf.size = header - + action_encap_data->data; + action->conf = &action_encap_data->conf; + return ret; +} + +/** Parse MPLSOUDP decap action. */ +static int +parse_vc_action_mplsoudp_decap(struct context *ctx, const struct token *token, + const char *str, unsigned int len, + void *buf, unsigned int size) +{ + struct buffer *out = buf; + struct rte_flow_action *action; + struct action_raw_decap_data *action_decap_data; + struct rte_flow_item_eth eth = { .type = 0, }; + struct rte_flow_item_vlan vlan = {.tci = 0}; + struct rte_flow_item_ipv4 ipv4 = { + .hdr = { + .next_proto_id = IPPROTO_UDP, + }, + }; + struct rte_flow_item_ipv6 ipv6 = { + .hdr = { + .proto = IPPROTO_UDP, + }, + }; + struct rte_flow_item_udp udp = { + .hdr = { + .dst_port = rte_cpu_to_be_16(6635), + }, + }; + struct rte_flow_item_mpls mpls; + uint8_t *header; + int ret; + + ret = parse_vc(ctx, token, str, len, buf, size); + if (ret < 0) + return ret; + /* Nothing else to do if there is no buffer. */ + if (!out) + return ret; + if (!out->args.vc.actions_n) + return -1; + action = &out->args.vc.actions[out->args.vc.actions_n - 1]; + /* Point to selected object. */ + ctx->object = out->args.vc.data; + ctx->objmask = NULL; + /* Copy the headers to the buffer. */ + action_decap_data = ctx->object; + *action_decap_data = (struct action_raw_decap_data) { + .conf = (struct rte_flow_action_raw_decap){ + .data = action_decap_data->data, + }, + .data = {}, + }; + header = action_decap_data->data; + if (mplsoudp_decap_conf.select_vlan) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + else if (mplsoudp_encap_conf.select_ipv4) + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + eth.type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(eth.dst.addr_bytes, + mplsoudp_encap_conf.eth_dst, ETHER_ADDR_LEN); + memcpy(eth.src.addr_bytes, + mplsoudp_encap_conf.eth_src, ETHER_ADDR_LEN); + memcpy(header, ð, sizeof(eth)); + header += sizeof(eth); + if (mplsoudp_encap_conf.select_vlan) { + if (mplsoudp_encap_conf.select_ipv4) + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + else + vlan.inner_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + memcpy(header, &vlan, sizeof(vlan)); + header += sizeof(vlan); + } + if (mplsoudp_encap_conf.select_ipv4) { + memcpy(header, &ipv4, sizeof(ipv4)); + header += sizeof(ipv4); + } else { + memcpy(header, &ipv6, sizeof(ipv6)); + header += sizeof(ipv6); + } + memcpy(header, &udp, sizeof(udp)); + header += sizeof(udp); + memset(&mpls, 0, sizeof(mpls)); + memcpy(header, &mpls, sizeof(mpls)); + header += sizeof(mpls); + action_decap_data->conf.size = header - + action_decap_data->data; + action->conf = &action_decap_data->conf; + return ret; +} + /** Parse tokens for destroy command. */ static int parse_destroy(struct context *ctx, const struct token *token, @@ -2637,6 +4359,8 @@ parse_int(struct context *ctx, const struct token *token, } buf = (uint8_t *)ctx->object + arg->offset; size = arg->size; + if (u > RTE_LEN2MASK(size * CHAR_BIT, uint64_t)) + return -1; objmask: switch (size) { case sizeof(uint8_t): @@ -3325,7 +5049,7 @@ cmd_flow_parsed(const struct buffer *in) break; case QUERY: port_flow_query(in->port, in->args.query.rule, - in->args.query.action); + &in->args.query.action); break; case LIST: port_flow_list(in->port, in->args.list.group_n,