X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-flow-perf%2Factions_gen.c;h=82cddfc6760ee8e34fa5aaa9cb4fe6b4e45361bf;hb=d9c93f1d9aa5001efe7aafec0b76087fffaf7f0b;hp=3ae6059fb1790e9eba44f410879e50543bfec3ed;hpb=0c8f1f4ab90ef4acd63976de4a482891ed1d0aa4;p=dpdk.git diff --git a/app/test-flow-perf/actions_gen.c b/app/test-flow-perf/actions_gen.c index 3ae6059fb1..82cddfc676 100644 --- a/app/test-flow-perf/actions_gen.c +++ b/app/test-flow-perf/actions_gen.c @@ -12,11 +12,14 @@ #include #include #include +#include +#include #include "actions_gen.h" #include "flow_gen.h" #include "config.h" + /* Storage for additional parameters for actions */ struct additional_para { uint16_t queue; @@ -26,6 +29,8 @@ struct additional_para { uint32_t counter; uint64_t encap_data; uint64_t decap_data; + uint8_t core_idx; + bool unique_data; }; /* Storage for struct rte_flow_action_raw_encap including external data. */ @@ -53,16 +58,18 @@ struct action_rss_data { static void add_mark(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_mark mark_action; + static struct rte_flow_action_mark mark_actions[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t counter = para.counter; do { - mark_action.id = MARK_ID; + /* Random values from 1 to 256 */ + mark_actions[para.core_idx].id = (counter % 255) + 1; } while (0); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_MARK; - actions[actions_counter].conf = &mark_action; + actions[actions_counter].conf = &mark_actions[para.core_idx]; } static void @@ -70,14 +77,14 @@ add_queue(struct rte_flow_action *actions, uint8_t actions_counter, struct additional_para para) { - static struct rte_flow_action_queue queue_action; + static struct rte_flow_action_queue queue_actions[RTE_MAX_LCORE] __rte_cache_aligned; do { - queue_action.index = para.queue; + queue_actions[para.core_idx].index = para.queue; } while (0); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_QUEUE; - actions[actions_counter].conf = &queue_action; + actions[actions_counter].conf = &queue_actions[para.core_idx]; } static void @@ -100,39 +107,36 @@ add_rss(struct rte_flow_action *actions, uint8_t actions_counter, struct additional_para para) { - static struct rte_flow_action_rss *rss_action; - static struct action_rss_data *rss_data; + static struct action_rss_data *rss_data[RTE_MAX_LCORE] __rte_cache_aligned; uint16_t queue; - if (rss_data == NULL) - rss_data = rte_malloc("rss_data", + if (rss_data[para.core_idx] == NULL) + rss_data[para.core_idx] = rte_malloc("rss_data", sizeof(struct action_rss_data), 0); - if (rss_data == NULL) + if (rss_data[para.core_idx] == NULL) rte_exit(EXIT_FAILURE, "No Memory available!"); - *rss_data = (struct action_rss_data){ + *rss_data[para.core_idx] = (struct action_rss_data){ .conf = (struct rte_flow_action_rss){ .func = RTE_ETH_HASH_FUNCTION_DEFAULT, .level = 0, .types = GET_RSS_HF(), - .key_len = sizeof(rss_data->key), + .key_len = sizeof(rss_data[para.core_idx]->key), .queue_num = para.queues_number, - .key = rss_data->key, - .queue = rss_data->queue, + .key = rss_data[para.core_idx]->key, + .queue = rss_data[para.core_idx]->queue, }, .key = { 1 }, .queue = { 0 }, }; for (queue = 0; queue < para.queues_number; queue++) - rss_data->queue[queue] = para.queues[queue]; - - rss_action = &rss_data->conf; + rss_data[para.core_idx]->queue[queue] = para.queues[queue]; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RSS; - actions[actions_counter].conf = rss_action; + actions[actions_counter].conf = &rss_data[para.core_idx]->conf; } static void @@ -140,12 +144,10 @@ add_set_meta(struct rte_flow_action *actions, uint8_t actions_counter, __rte_unused struct additional_para para) { - static struct rte_flow_action_set_meta meta_action; - - do { - meta_action.data = RTE_BE32(META_DATA); - meta_action.mask = RTE_BE32(0xffffffff); - } while (0); + static struct rte_flow_action_set_meta meta_action = { + .data = RTE_BE32(META_DATA), + .mask = RTE_BE32(0xffffffff), + }; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_META; actions[actions_counter].conf = &meta_action; @@ -156,13 +158,11 @@ add_set_tag(struct rte_flow_action *actions, uint8_t actions_counter, __rte_unused struct additional_para para) { - static struct rte_flow_action_set_tag tag_action; - - do { - tag_action.data = RTE_BE32(META_DATA); - tag_action.mask = RTE_BE32(0xffffffff); - tag_action.index = TAG_INDEX; - } while (0); + static struct rte_flow_action_set_tag tag_action = { + .data = RTE_BE32(META_DATA), + .mask = RTE_BE32(0xffffffff), + .index = TAG_INDEX, + }; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TAG; actions[actions_counter].conf = &tag_action; @@ -173,11 +173,9 @@ add_port_id(struct rte_flow_action *actions, uint8_t actions_counter, __rte_unused struct additional_para para) { - static struct rte_flow_action_port_id port_id; - - do { - port_id.id = PORT_ID_DST; - } while (0); + static struct rte_flow_action_port_id port_id = { + .id = PORT_ID_DST, + }; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_PORT_ID; actions[actions_counter].conf = &port_id; @@ -205,203 +203,267 @@ add_count(struct rte_flow_action *actions, static void add_set_src_mac(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_mac set_mac; + static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t mac = para.counter; uint16_t i; + /* Fixed value */ + if (!para.unique_data) + mac = 1; + /* Mac address to be set is random each time */ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { - set_mac.mac_addr[i] = mac & 0xff; + set_macs[para.core_idx].mac_addr[i] = mac & 0xff; mac = mac >> 8; } actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_SRC; - actions[actions_counter].conf = &set_mac; + actions[actions_counter].conf = &set_macs[para.core_idx]; } static void add_set_dst_mac(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_mac set_mac; + static struct rte_flow_action_set_mac set_macs[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t mac = para.counter; uint16_t i; + /* Fixed value */ + if (!para.unique_data) + mac = 1; + /* Mac address to be set is random each time */ for (i = 0; i < RTE_ETHER_ADDR_LEN; i++) { - set_mac.mac_addr[i] = mac & 0xff; + set_macs[para.core_idx].mac_addr[i] = mac & 0xff; mac = mac >> 8; } actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_MAC_DST; - actions[actions_counter].conf = &set_mac; + actions[actions_counter].conf = &set_macs[para.core_idx]; } static void add_set_src_ipv4(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_ipv4 set_ipv4; + static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t ip = para.counter; + + /* Fixed value */ + if (!para.unique_data) + ip = 1; /* IPv4 value to be set is random each time */ - set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1); + set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC; - actions[actions_counter].conf = &set_ipv4; + actions[actions_counter].conf = &set_ipv4[para.core_idx]; } static void add_set_dst_ipv4(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_ipv4 set_ipv4; + static struct rte_flow_action_set_ipv4 set_ipv4[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t ip = para.counter; + + /* Fixed value */ + if (!para.unique_data) + ip = 1; /* IPv4 value to be set is random each time */ - set_ipv4.ipv4_addr = RTE_BE32(para.counter + 1); + set_ipv4[para.core_idx].ipv4_addr = RTE_BE32(ip + 1); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DST; - actions[actions_counter].conf = &set_ipv4; + actions[actions_counter].conf = &set_ipv4[para.core_idx]; } static void add_set_src_ipv6(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_ipv6 set_ipv6; + static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t ipv6 = para.counter; uint8_t i; + /* Fixed value */ + if (!para.unique_data) + ipv6 = 1; + /* IPv6 value to set is random each time */ for (i = 0; i < 16; i++) { - set_ipv6.ipv6_addr[i] = ipv6 & 0xff; + set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff; ipv6 = ipv6 >> 8; } actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC; - actions[actions_counter].conf = &set_ipv6; + actions[actions_counter].conf = &set_ipv6[para.core_idx]; } static void add_set_dst_ipv6(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_ipv6 set_ipv6; + static struct rte_flow_action_set_ipv6 set_ipv6[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t ipv6 = para.counter; uint8_t i; + /* Fixed value */ + if (!para.unique_data) + ipv6 = 1; + /* IPv6 value to set is random each time */ for (i = 0; i < 16; i++) { - set_ipv6.ipv6_addr[i] = ipv6 & 0xff; + set_ipv6[para.core_idx].ipv6_addr[i] = ipv6 & 0xff; ipv6 = ipv6 >> 8; } actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DST; - actions[actions_counter].conf = &set_ipv6; + actions[actions_counter].conf = &set_ipv6[para.core_idx]; } static void add_set_src_tp(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_tp set_tp; + static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t tp = para.counter; + /* Fixed value */ + if (!para.unique_data) + tp = 100; + /* TP src port is random each time */ - if (tp > 0xffff) - tp = tp >> 16; + tp = tp % 0xffff; - set_tp.port = RTE_BE16(tp & 0xffff); + set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_SRC; - actions[actions_counter].conf = &set_tp; + actions[actions_counter].conf = &set_tp[para.core_idx]; } static void add_set_dst_tp(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_tp set_tp; + static struct rte_flow_action_set_tp set_tp[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t tp = para.counter; + /* Fixed value */ + if (!para.unique_data) + tp = 100; + /* TP src port is random each time */ if (tp > 0xffff) tp = tp >> 16; - set_tp.port = RTE_BE16(tp & 0xffff); + set_tp[para.core_idx].port = RTE_BE16(tp & 0xffff); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TP_DST; - actions[actions_counter].conf = &set_tp; + actions[actions_counter].conf = &set_tp[para.core_idx]; } static void add_inc_tcp_ack(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static rte_be32_t value = RTE_BE32(1); + static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t ack_value = para.counter; + + /* Fixed value */ + if (!para.unique_data) + ack_value = 1; + + value[para.core_idx] = RTE_BE32(ack_value); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_ACK; - actions[actions_counter].conf = &value; + actions[actions_counter].conf = &value[para.core_idx]; } static void add_dec_tcp_ack(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static rte_be32_t value = RTE_BE32(1); + static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t ack_value = para.counter; + + /* Fixed value */ + if (!para.unique_data) + ack_value = 1; + + value[para.core_idx] = RTE_BE32(ack_value); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK; - actions[actions_counter].conf = &value; + actions[actions_counter].conf = &value[para.core_idx]; } static void add_inc_tcp_seq(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static rte_be32_t value = RTE_BE32(1); + static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t seq_value = para.counter; + + /* Fixed value */ + if (!para.unique_data) + seq_value = 1; + + value[para.core_idx] = RTE_BE32(seq_value); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ; - actions[actions_counter].conf = &value; + actions[actions_counter].conf = &value[para.core_idx]; } static void add_dec_tcp_seq(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static rte_be32_t value = RTE_BE32(1); + static rte_be32_t value[RTE_MAX_LCORE] __rte_cache_aligned; + uint32_t seq_value = para.counter; + + /* Fixed value */ + if (!para.unique_data) + seq_value = 1; + + value[para.core_idx] = RTE_BE32(seq_value); actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ; - actions[actions_counter].conf = &value; + actions[actions_counter].conf = &value[para.core_idx]; } static void add_set_ttl(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_ttl set_ttl; + static struct rte_flow_action_set_ttl set_ttl[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t ttl_value = para.counter; + /* Fixed value */ + if (!para.unique_data) + ttl_value = 1; + /* Set ttl to random value each time */ - while (ttl_value > 0xff) - ttl_value = ttl_value >> 8; + ttl_value = ttl_value % 0xff; - set_ttl.ttl_value = ttl_value; + set_ttl[para.core_idx].ttl_value = ttl_value; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_TTL; - actions[actions_counter].conf = &set_ttl; + actions[actions_counter].conf = &set_ttl[para.core_idx]; } static void @@ -415,37 +477,43 @@ add_dec_ttl(struct rte_flow_action *actions, static void add_set_ipv4_dscp(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_dscp set_dscp; + static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t dscp_value = para.counter; + /* Fixed value */ + if (!para.unique_data) + dscp_value = 1; + /* Set dscp to random value each time */ - while (dscp_value > 0xff) - dscp_value = dscp_value >> 8; + dscp_value = dscp_value % 0xff; - set_dscp.dscp = dscp_value; + set_dscp[para.core_idx].dscp = dscp_value; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP; - actions[actions_counter].conf = &set_dscp; + actions[actions_counter].conf = &set_dscp[para.core_idx]; } static void add_set_ipv6_dscp(struct rte_flow_action *actions, uint8_t actions_counter, - __rte_unused struct additional_para para) + struct additional_para para) { - static struct rte_flow_action_set_dscp set_dscp; + static struct rte_flow_action_set_dscp set_dscp[RTE_MAX_LCORE] __rte_cache_aligned; uint32_t dscp_value = para.counter; + /* Fixed value */ + if (!para.unique_data) + dscp_value = 1; + /* Set dscp to random value each time */ - while (dscp_value > 0xff) - dscp_value = dscp_value >> 8; + dscp_value = dscp_value % 0xff; - set_dscp.dscp = dscp_value; + set_dscp[para.core_idx].dscp = dscp_value; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP; - actions[actions_counter].conf = &set_dscp; + actions[actions_counter].conf = &set_dscp[para.core_idx]; } static void @@ -460,27 +528,27 @@ static void add_ether_header(uint8_t **header, uint64_t data, __rte_unused struct additional_para para) { - struct rte_flow_item_eth eth_item; + struct rte_ether_hdr eth_hdr; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH))) return; - memset(ð_item, 0, sizeof(struct rte_flow_item_eth)); + memset(ð_hdr, 0, sizeof(struct rte_ether_hdr)); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN)) - eth_item.type = RTE_BE16(RTE_ETHER_TYPE_VLAN); + eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN); else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)) - eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV4); + eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4); else if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)) - eth_item.type = RTE_BE16(RTE_ETHER_TYPE_IPV6); - memcpy(*header, ð_item, sizeof(eth_item)); - *header += sizeof(eth_item); + eth_hdr.ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6); + memcpy(*header, ð_hdr, sizeof(eth_hdr)); + *header += sizeof(eth_hdr); } static void add_vlan_header(uint8_t **header, uint64_t data, __rte_unused struct additional_para para) { - struct rte_flow_item_vlan vlan_item; + struct rte_vlan_hdr vlan_hdr; uint16_t vlan_value; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN))) @@ -488,173 +556,191 @@ add_vlan_header(uint8_t **header, uint64_t data, vlan_value = VLAN_VALUE; - memset(&vlan_item, 0, sizeof(struct rte_flow_item_vlan)); - vlan_item.tci = RTE_BE16(vlan_value); + memset(&vlan_hdr, 0, sizeof(struct rte_vlan_hdr)); + vlan_hdr.vlan_tci = RTE_BE16(vlan_value); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4)) - vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV4); + vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6)) - vlan_item.inner_type = RTE_BE16(RTE_ETHER_TYPE_IPV6); - memcpy(*header, &vlan_item, sizeof(vlan_item)); - *header += sizeof(vlan_item); + vlan_hdr.eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6); + memcpy(*header, &vlan_hdr, sizeof(vlan_hdr)); + *header += sizeof(vlan_hdr); } static void add_ipv4_header(uint8_t **header, uint64_t data, struct additional_para para) { - struct rte_flow_item_ipv4 ipv4_item; + struct rte_ipv4_hdr ipv4_hdr; + uint32_t ip_dst = para.counter; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4))) return; - memset(&ipv4_item, 0, sizeof(struct rte_flow_item_ipv4)); - ipv4_item.hdr.src_addr = RTE_IPV4(127, 0, 0, 1); - ipv4_item.hdr.dst_addr = RTE_BE32(para.counter); - ipv4_item.hdr.version_ihl = RTE_IPV4_VHL_DEF; + /* Fixed value */ + if (!para.unique_data) + ip_dst = 1; + + memset(&ipv4_hdr, 0, sizeof(struct rte_ipv4_hdr)); + ipv4_hdr.src_addr = RTE_IPV4(127, 0, 0, 1); + ipv4_hdr.dst_addr = RTE_BE32(ip_dst); + ipv4_hdr.version_ihl = RTE_IPV4_VHL_DEF; if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)) - ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_UDP; + ipv4_hdr.next_proto_id = RTE_IP_TYPE_UDP; if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)) - ipv4_item.hdr.next_proto_id = RTE_IP_TYPE_GRE; - memcpy(*header, &ipv4_item, sizeof(ipv4_item)); - *header += sizeof(ipv4_item); + ipv4_hdr.next_proto_id = RTE_IP_TYPE_GRE; + memcpy(*header, &ipv4_hdr, sizeof(ipv4_hdr)); + *header += sizeof(ipv4_hdr); } static void add_ipv6_header(uint8_t **header, uint64_t data, __rte_unused struct additional_para para) { - struct rte_flow_item_ipv6 ipv6_item; + struct rte_ipv6_hdr ipv6_hdr; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6))) return; - memset(&ipv6_item, 0, sizeof(struct rte_flow_item_ipv6)); + memset(&ipv6_hdr, 0, sizeof(struct rte_ipv6_hdr)); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP)) - ipv6_item.hdr.proto = RTE_IP_TYPE_UDP; + ipv6_hdr.proto = RTE_IP_TYPE_UDP; if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE)) - ipv6_item.hdr.proto = RTE_IP_TYPE_GRE; - memcpy(*header, &ipv6_item, sizeof(ipv6_item)); - *header += sizeof(ipv6_item); + ipv6_hdr.proto = RTE_IP_TYPE_GRE; + memcpy(*header, &ipv6_hdr, sizeof(ipv6_hdr)); + *header += sizeof(ipv6_hdr); } static void add_udp_header(uint8_t **header, uint64_t data, __rte_unused struct additional_para para) { - struct rte_flow_item_udp udp_item; + struct rte_udp_hdr udp_hdr; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP))) return; - memset(&udp_item, 0, sizeof(struct rte_flow_item_udp)); + memset(&udp_hdr, 0, sizeof(struct rte_flow_item_udp)); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN)) - udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT); + udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE)) - udp_item.hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT); + udp_hdr.dst_port = RTE_BE16(RTE_VXLAN_GPE_UDP_PORT); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE)) - udp_item.hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT); + udp_hdr.dst_port = RTE_BE16(RTE_GENEVE_UDP_PORT); if (data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP)) - udp_item.hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT); - memcpy(*header, &udp_item, sizeof(udp_item)); - *header += sizeof(udp_item); + udp_hdr.dst_port = RTE_BE16(RTE_GTPU_UDP_PORT); + memcpy(*header, &udp_hdr, sizeof(udp_hdr)); + *header += sizeof(udp_hdr); } static void add_vxlan_header(uint8_t **header, uint64_t data, struct additional_para para) { - struct rte_flow_item_vxlan vxlan_item; + struct rte_vxlan_hdr vxlan_hdr; uint32_t vni_value = para.counter; - uint8_t i; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN))) return; - memset(&vxlan_item, 0, sizeof(struct rte_flow_item_vxlan)); + /* Fixed value */ + if (!para.unique_data) + vni_value = 1; - for (i = 0; i < 3; i++) - vxlan_item.vni[2 - i] = vni_value >> (i * 8); - vxlan_item.flags = 0x8; + memset(&vxlan_hdr, 0, sizeof(struct rte_vxlan_hdr)); + + vxlan_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16; + vxlan_hdr.vx_flags = 0x8; - memcpy(*header, &vxlan_item, sizeof(vxlan_item)); - *header += sizeof(vxlan_item); + memcpy(*header, &vxlan_hdr, sizeof(vxlan_hdr)); + *header += sizeof(vxlan_hdr); } static void add_vxlan_gpe_header(uint8_t **header, uint64_t data, struct additional_para para) { - struct rte_flow_item_vxlan_gpe vxlan_gpe_item; + struct rte_vxlan_gpe_hdr vxlan_gpe_hdr; uint32_t vni_value = para.counter; - uint8_t i; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE))) return; - memset(&vxlan_gpe_item, 0, sizeof(struct rte_flow_item_vxlan_gpe)); + /* Fixed value */ + if (!para.unique_data) + vni_value = 1; - for (i = 0; i < 3; i++) - vxlan_gpe_item.vni[2 - i] = vni_value >> (i * 8); - vxlan_gpe_item.flags = 0x0c; + memset(&vxlan_gpe_hdr, 0, sizeof(struct rte_vxlan_gpe_hdr)); + + vxlan_gpe_hdr.vx_vni = (RTE_BE32(vni_value)) >> 16; + vxlan_gpe_hdr.vx_flags = 0x0c; - memcpy(*header, &vxlan_gpe_item, sizeof(vxlan_gpe_item)); - *header += sizeof(vxlan_gpe_item); + memcpy(*header, &vxlan_gpe_hdr, sizeof(vxlan_gpe_hdr)); + *header += sizeof(vxlan_gpe_hdr); } static void add_gre_header(uint8_t **header, uint64_t data, __rte_unused struct additional_para para) { - struct rte_flow_item_gre gre_item; + struct rte_gre_hdr gre_hdr; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE))) return; - memset(&gre_item, 0, sizeof(struct rte_flow_item_gre)); + memset(&gre_hdr, 0, sizeof(struct rte_gre_hdr)); - gre_item.protocol = RTE_BE16(RTE_ETHER_TYPE_TEB); + gre_hdr.proto = RTE_BE16(RTE_ETHER_TYPE_TEB); - memcpy(*header, &gre_item, sizeof(gre_item)); - *header += sizeof(gre_item); + memcpy(*header, &gre_hdr, sizeof(gre_hdr)); + *header += sizeof(gre_hdr); } static void add_geneve_header(uint8_t **header, uint64_t data, struct additional_para para) { - struct rte_flow_item_geneve geneve_item; + struct rte_geneve_hdr geneve_hdr; uint32_t vni_value = para.counter; uint8_t i; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE))) return; - memset(&geneve_item, 0, sizeof(struct rte_flow_item_geneve)); + /* Fixed value */ + if (!para.unique_data) + vni_value = 1; + + memset(&geneve_hdr, 0, sizeof(struct rte_geneve_hdr)); for (i = 0; i < 3; i++) - geneve_item.vni[2 - i] = vni_value >> (i * 8); + geneve_hdr.vni[2 - i] = vni_value >> (i * 8); - memcpy(*header, &geneve_item, sizeof(geneve_item)); - *header += sizeof(geneve_item); + memcpy(*header, &geneve_hdr, sizeof(geneve_hdr)); + *header += sizeof(geneve_hdr); } static void add_gtp_header(uint8_t **header, uint64_t data, struct additional_para para) { - struct rte_flow_item_gtp gtp_item; + struct rte_gtp_hdr gtp_hdr; + uint32_t teid_value = para.counter; if (!(data & FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP))) return; - memset(>p_item, 0, sizeof(struct rte_flow_item_gtp)); + /* Fixed value */ + if (!para.unique_data) + teid_value = 1; + + memset(>p_hdr, 0, sizeof(struct rte_flow_item_gtp)); - gtp_item.teid = RTE_BE32(para.counter); - gtp_item.msg_type = 255; + gtp_hdr.teid = RTE_BE32(teid_value); + gtp_hdr.msg_type = 255; - memcpy(*header, >p_item, sizeof(gtp_item)); - *header += sizeof(gtp_item); + memcpy(*header, >p_hdr, sizeof(gtp_hdr)); + *header += sizeof(gtp_hdr); } static const struct encap_decap_headers { @@ -681,36 +767,36 @@ add_raw_encap(struct rte_flow_action *actions, uint8_t actions_counter, struct additional_para para) { - static struct action_raw_encap_data *action_encap_data; + static struct action_raw_encap_data *action_encap_data[RTE_MAX_LCORE] __rte_cache_aligned; uint64_t encap_data = para.encap_data; uint8_t *header; uint8_t i; /* Avoid double allocation. */ - if (action_encap_data == NULL) - action_encap_data = rte_malloc("encap_data", + if (action_encap_data[para.core_idx] == NULL) + action_encap_data[para.core_idx] = rte_malloc("encap_data", sizeof(struct action_raw_encap_data), 0); /* Check if allocation failed. */ - if (action_encap_data == NULL) + if (action_encap_data[para.core_idx] == NULL) rte_exit(EXIT_FAILURE, "No Memory available!"); - *action_encap_data = (struct action_raw_encap_data) { + *action_encap_data[para.core_idx] = (struct action_raw_encap_data) { .conf = (struct rte_flow_action_raw_encap) { - .data = action_encap_data->data, + .data = action_encap_data[para.core_idx]->data, }, .data = {}, }; - header = action_encap_data->data; + header = action_encap_data[para.core_idx]->data; for (i = 0; i < RTE_DIM(headers); i++) headers[i].funct(&header, encap_data, para); - action_encap_data->conf.size = header - - action_encap_data->data; + action_encap_data[para.core_idx]->conf.size = header - + action_encap_data[para.core_idx]->data; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_ENCAP; - actions[actions_counter].conf = &action_encap_data->conf; + actions[actions_counter].conf = &action_encap_data[para.core_idx]->conf; } static void @@ -718,42 +804,112 @@ add_raw_decap(struct rte_flow_action *actions, uint8_t actions_counter, struct additional_para para) { - static struct action_raw_decap_data *action_decap_data; + static struct action_raw_decap_data *action_decap_data[RTE_MAX_LCORE] __rte_cache_aligned; uint64_t decap_data = para.decap_data; uint8_t *header; uint8_t i; /* Avoid double allocation. */ - if (action_decap_data == NULL) - action_decap_data = rte_malloc("decap_data", + if (action_decap_data[para.core_idx] == NULL) + action_decap_data[para.core_idx] = rte_malloc("decap_data", sizeof(struct action_raw_decap_data), 0); /* Check if allocation failed. */ - if (action_decap_data == NULL) + if (action_decap_data[para.core_idx] == NULL) rte_exit(EXIT_FAILURE, "No Memory available!"); - *action_decap_data = (struct action_raw_decap_data) { + *action_decap_data[para.core_idx] = (struct action_raw_decap_data) { .conf = (struct rte_flow_action_raw_decap) { - .data = action_decap_data->data, + .data = action_decap_data[para.core_idx]->data, }, .data = {}, }; - header = action_decap_data->data; + header = action_decap_data[para.core_idx]->data; for (i = 0; i < RTE_DIM(headers); i++) headers[i].funct(&header, decap_data, para); - action_decap_data->conf.size = header - - action_decap_data->data; + action_decap_data[para.core_idx]->conf.size = header - + action_decap_data[para.core_idx]->data; actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_RAW_DECAP; - actions[actions_counter].conf = &action_decap_data->conf; + actions[actions_counter].conf = &action_decap_data[para.core_idx]->conf; +} + +static void +add_vxlan_encap(struct rte_flow_action *actions, + uint8_t actions_counter, + __rte_unused struct additional_para para) +{ + static struct rte_flow_action_vxlan_encap vxlan_encap[RTE_MAX_LCORE] __rte_cache_aligned; + static struct rte_flow_item items[5]; + static struct rte_flow_item_eth item_eth; + static struct rte_flow_item_ipv4 item_ipv4; + static struct rte_flow_item_udp item_udp; + static struct rte_flow_item_vxlan item_vxlan; + uint32_t ip_dst = para.counter; + + /* Fixed value */ + if (!para.unique_data) + ip_dst = 1; + + items[0].spec = &item_eth; + items[0].mask = &item_eth; + items[0].type = RTE_FLOW_ITEM_TYPE_ETH; + + item_ipv4.hdr.src_addr = RTE_IPV4(127, 0, 0, 1); + item_ipv4.hdr.dst_addr = RTE_BE32(ip_dst); + item_ipv4.hdr.version_ihl = RTE_IPV4_VHL_DEF; + items[1].spec = &item_ipv4; + items[1].mask = &item_ipv4; + items[1].type = RTE_FLOW_ITEM_TYPE_IPV4; + + + item_udp.hdr.dst_port = RTE_BE16(RTE_VXLAN_DEFAULT_PORT); + items[2].spec = &item_udp; + items[2].mask = &item_udp; + items[2].type = RTE_FLOW_ITEM_TYPE_UDP; + + + item_vxlan.vni[2] = 1; + items[3].spec = &item_vxlan; + items[3].mask = &item_vxlan; + items[3].type = RTE_FLOW_ITEM_TYPE_VXLAN; + + items[4].type = RTE_FLOW_ITEM_TYPE_END; + + vxlan_encap[para.core_idx].definition = items; + + actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP; + actions[actions_counter].conf = &vxlan_encap[para.core_idx]; +} + +static void +add_vxlan_decap(struct rte_flow_action *actions, + uint8_t actions_counter, + __rte_unused struct additional_para para) +{ + actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_VXLAN_DECAP; +} + +static void +add_meter(struct rte_flow_action *actions, + uint8_t actions_counter, + __rte_unused struct additional_para para) +{ + static struct rte_flow_action_meter + meters[RTE_MAX_LCORE] __rte_cache_aligned; + + meters[para.core_idx].mtr_id = para.counter; + actions[actions_counter].type = RTE_FLOW_ACTION_TYPE_METER; + actions[actions_counter].conf = &meters[para.core_idx]; } void fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions, uint32_t counter, uint16_t next_table, uint16_t hairpinq, - uint64_t encap_data, uint64_t decap_data) + uint64_t encap_data, uint64_t decap_data, uint8_t core_idx, + bool unique_data) { struct additional_para additional_para_data; uint8_t actions_counter = 0; @@ -775,6 +931,8 @@ fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions, .counter = counter, .encap_data = encap_data, .decap_data = decap_data, + .core_idx = core_idx, + .unique_data = unique_data, }; if (hairpinq != 0) { @@ -949,6 +1107,24 @@ fill_actions(struct rte_flow_action *actions, uint64_t *flow_actions, ), .funct = add_raw_decap, }, + { + .mask = FLOW_ACTION_MASK( + RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP + ), + .funct = add_vxlan_encap, + }, + { + .mask = FLOW_ACTION_MASK( + RTE_FLOW_ACTION_TYPE_VXLAN_DECAP + ), + .funct = add_vxlan_decap, + }, + { + .mask = FLOW_ACTION_MASK( + RTE_FLOW_ACTION_TYPE_METER + ), + .funct = add_meter, + }, }; for (j = 0; j < MAX_ACTIONS_NUM; j++) {