From 6e6b34fb26ddc8cf3762bcd0bbb99d9864741f7b Mon Sep 17 00:00:00 2001 From: Maciej Gajdzica Date: Fri, 20 Feb 2015 17:09:23 +0100 Subject: [PATCH] app/test: add unit tests for link bonding mode 6 Added 4 unit tests checking link bonding mode 6 behavior. Also modified virtual_pmd so it is possible to provide packets, that should be received with rx_burst and to inspect packets transmitted by tx_burst. In packet_burst_generator.c function creating eth_header is modified, so it accepts ether_type as a parameter and function creating arp_header is added. Updated other unit tests to get rid of compilation errors. Signed-off-by: Maciej Gajdzica Acked-by: Declan Doherty --- app/test/packet_burst_generator.c | 41 ++- app/test/packet_burst_generator.h | 11 +- app/test/test_link_bonding.c | 439 +++++++++++++++++++++++++++++- app/test/test_pmd_perf.c | 3 +- app/test/virtual_pmd.c | 109 +++++--- app/test/virtual_pmd.h | 5 +- 6 files changed, 533 insertions(+), 75 deletions(-) diff --git a/app/test/packet_burst_generator.c b/app/test/packet_burst_generator.c index e9d059c98f..b46eed70f8 100644 --- a/app/test/packet_burst_generator.c +++ b/app/test/packet_burst_generator.c @@ -80,11 +80,10 @@ copy_buf_to_pkt(void *buf, unsigned len, struct rte_mbuf *pkt, unsigned offset) copy_buf_to_pkt_segs(buf, len, pkt, offset); } - void initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac, - struct ether_addr *dst_mac, uint8_t ipv4, uint8_t vlan_enabled, - uint16_t van_id) + struct ether_addr *dst_mac, uint16_t ether_type, + uint8_t vlan_enabled, uint16_t van_id) { ether_addr_copy(dst_mac, ð_hdr->d_addr); ether_addr_copy(src_mac, ð_hdr->s_addr); @@ -95,19 +94,27 @@ initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac, eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_VLAN); - if (ipv4) - vhdr->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_IPv4); - else - vhdr->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_IPv6); - + vhdr->eth_proto = rte_cpu_to_be_16(ether_type); vhdr->vlan_tci = van_id; } else { - if (ipv4) - eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); - else - eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv6); + eth_hdr->ether_type = rte_cpu_to_be_16(ether_type); } +} +void +initialize_arp_header(struct arp_hdr *arp_hdr, struct ether_addr *src_mac, + struct ether_addr *dst_mac, uint32_t src_ip, uint32_t dst_ip, + uint32_t opcode) +{ + arp_hdr->arp_hrd = rte_cpu_to_be_16(ARP_HRD_ETHER); + arp_hdr->arp_pro = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + arp_hdr->arp_hln = ETHER_ADDR_LEN; + arp_hdr->arp_pln = sizeof(uint32_t); + arp_hdr->arp_op = rte_cpu_to_be_16(opcode); + ether_addr_copy(src_mac, &arp_hdr->arp_data.arp_sha); + arp_hdr->arp_data.arp_sip = src_ip; + ether_addr_copy(dst_mac, &arp_hdr->arp_data.arp_tha); + arp_hdr->arp_data.arp_tip = dst_ip; } uint16_t @@ -265,9 +272,19 @@ nomore_mbuf: if (ipv4) { pkt->vlan_tci = ETHER_TYPE_IPv4; pkt->l3_len = sizeof(struct ipv4_hdr); + + if (vlan_enabled) + pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT; + else + pkt->ol_flags = PKT_RX_IPV4_HDR; } else { pkt->vlan_tci = ETHER_TYPE_IPv6; pkt->l3_len = sizeof(struct ipv6_hdr); + + if (vlan_enabled) + pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT; + else + pkt->ol_flags = PKT_RX_IPV6_HDR; } pkts_burst[nb_pkt] = pkt; diff --git a/app/test/packet_burst_generator.h b/app/test/packet_burst_generator.h index 666cc8e597..edc104417b 100644 --- a/app/test/packet_burst_generator.h +++ b/app/test/packet_burst_generator.h @@ -40,6 +40,7 @@ extern "C" { #include #include +#include #include #include @@ -50,11 +51,15 @@ extern "C" { #define PACKET_BURST_GEN_PKT_LEN 60 #define PACKET_BURST_GEN_PKT_LEN_128 128 - void initialize_eth_header(struct ether_hdr *eth_hdr, struct ether_addr *src_mac, - struct ether_addr *dst_mac, uint8_t ipv4, uint8_t vlan_enabled, - uint16_t van_id); + struct ether_addr *dst_mac, uint16_t ether_type, + uint8_t vlan_enabled, uint16_t van_id); + +void +initialize_arp_header(struct arp_hdr *arp_hdr, struct ether_addr *src_mac, + struct ether_addr *dst_mac, uint32_t src_ip, uint32_t dst_ip, + uint32_t opcode); uint16_t initialize_udp_header(struct udp_hdr *udp_hdr, uint16_t src_port, diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c index ef8207ebf0..7adc6da64f 100644 --- a/app/test/test_link_bonding.c +++ b/app/test/test_link_bonding.c @@ -1313,17 +1313,22 @@ generate_test_burst(struct rte_mbuf **pkts_burst, uint16_t burst_size, uint8_t vlan, uint8_t ipv4, uint8_t toggle_dst_mac, uint8_t toggle_ip_addr, uint8_t toggle_udp_port) { - uint16_t pktlen, generated_burst_size; + uint16_t pktlen, generated_burst_size, ether_type; void *ip_hdr; + if (ipv4) + ether_type = ETHER_TYPE_IPv4; + else + ether_type = ETHER_TYPE_IPv6; + if (toggle_dst_mac) initialize_eth_header(test_params->pkt_eth_hdr, (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_1, - ipv4, vlan, vlan_id); + ether_type, vlan, vlan_id); else initialize_eth_header(test_params->pkt_eth_hdr, (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, - ipv4, vlan, vlan_id); + ether_type, vlan, vlan_id); if (toggle_udp_port) @@ -2094,7 +2099,8 @@ test_activebackup_tx_burst(void) "Failed to initialize bonded device with slaves"); initialize_eth_header(test_params->pkt_eth_hdr, - (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, 1, 0, 0); + (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, + ETHER_TYPE_IPv4, 0, 0); pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port, dst_port_0, 16); pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr, @@ -2637,7 +2643,8 @@ test_balance_l2_tx_burst(void) "Failed to set balance xmit policy."); initialize_eth_header(test_params->pkt_eth_hdr, - (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, 1, 0, 0); + (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, + ETHER_TYPE_IPv4, 0, 0); pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port, dst_port_0, 16); pktlen = initialize_ipv4_header(test_params->pkt_ipv4_hdr, src_addr, @@ -2651,7 +2658,8 @@ test_balance_l2_tx_burst(void) "failed to generate packet burst"); initialize_eth_header(test_params->pkt_eth_hdr, - (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_1, 1, 0, 0); + (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_1, + ETHER_TYPE_IPv4, 0, 0); /* Generate a burst 2 of packets to transmit */ TEST_ASSERT_EQUAL(generate_packet_burst(test_params->mbuf_pool, &pkts_burst[1][0], @@ -3488,7 +3496,8 @@ test_broadcast_tx_burst(void) "Failed to intialise bonded device"); initialize_eth_header(test_params->pkt_eth_hdr, - (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, 1, 0, 0); + (struct ether_addr *)src_mac, (struct ether_addr *)dst_mac_0, + ETHER_TYPE_IPv4, 0, 0); pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port, dst_port_0, 16); @@ -4041,6 +4050,23 @@ testsuite_teardown(void) return remove_slaves_and_stop_bonded_device(); } +static void +free_virtualpmd_tx_queue(void) +{ + int i, slave_port, to_free_cnt; + struct rte_mbuf *pkts_to_free[MAX_PKT_BURST]; + + /* Free tx queue of virtual pmd */ + for (slave_port = 0; slave_port < test_params->bonded_slave_count; + slave_port++) { + to_free_cnt = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_port], + pkts_to_free, MAX_PKT_BURST); + for (i = 0; i < to_free_cnt; i++) + rte_pktmbuf_free(pkts_to_free[i]); + } +} + static int test_tlb_tx_burst(void) { @@ -4068,11 +4094,11 @@ test_tlb_tx_burst(void) if (i % 2 == 0) { initialize_eth_header(test_params->pkt_eth_hdr, (struct ether_addr *)src_mac, - (struct ether_addr *)dst_mac_0, 1, 0, 0); + (struct ether_addr *)dst_mac_0, ETHER_TYPE_IPv4, 0, 0); } else { initialize_eth_header(test_params->pkt_eth_hdr, (struct ether_addr *)test_params->default_slave_mac, - (struct ether_addr *)dst_mac_0, 1, 0, 0); + (struct ether_addr *)dst_mac_0, ETHER_TYPE_IPv4, 0, 0); } pktlen = initialize_udp_header(test_params->pkt_udp_hdr, src_port, dst_port_0, 16); @@ -4086,6 +4112,8 @@ test_tlb_tx_burst(void) burst_size); nb_tx2 += nb_tx; + free_virtualpmd_tx_queue(); + TEST_ASSERT_EQUAL(nb_tx, burst_size, "number of packet not equal burst size"); @@ -4474,14 +4502,13 @@ test_tlb_verify_slave_link_status_change_failover(void) rte_eth_stats_get(test_params->slave_port_ids[0], &port_stats); TEST_ASSERT_EQUAL(port_stats.opackets, (int8_t)0, - "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[0]); + "(%d) port_stats.opackets not as expected\n", + test_params->slave_port_ids[0]); rte_eth_stats_get(test_params->slave_port_ids[1], &port_stats); TEST_ASSERT_NOT_EQUAL(port_stats.opackets, (int8_t)0, - "(%d) port_stats.opackets not as expected\n", - test_params->slave_port_ids[1]); - + "(%d) port_stats.opackets not as expected\n", + test_params->slave_port_ids[1]); rte_eth_stats_get(test_params->slave_port_ids[2], &port_stats); TEST_ASSERT_NOT_EQUAL(port_stats.opackets, (int8_t)0, @@ -4534,6 +4561,386 @@ test_tlb_verify_slave_link_status_change_failover(void) return remove_slaves_and_stop_bonded_device(); } +#define TEST_ALB_SLAVE_COUNT 2 + +static uint8_t mac_client1[] = {0x00, 0xAA, 0x55, 0xFF, 0xCC, 1}; +static uint8_t mac_client2[] = {0x00, 0xAA, 0x55, 0xFF, 0xCC, 2}; +static uint8_t mac_client3[] = {0x00, 0xAA, 0x55, 0xFF, 0xCC, 3}; +static uint8_t mac_client4[] = {0x00, 0xAA, 0x55, 0xFF, 0xCC, 4}; + +static uint32_t ip_host = IPV4_ADDR(192, 168, 0, 0); +static uint32_t ip_client1 = IPV4_ADDR(192, 168, 0, 1); +static uint32_t ip_client2 = IPV4_ADDR(192, 168, 0, 2); +static uint32_t ip_client3 = IPV4_ADDR(192, 168, 0, 3); +static uint32_t ip_client4 = IPV4_ADDR(192, 168, 0, 4); + +static int +test_alb_change_mac_in_reply_sent(void) +{ + struct rte_mbuf *pkt; + struct rte_mbuf *pkts_sent[MAX_PKT_BURST]; + + struct ether_hdr *eth_pkt; + struct arp_hdr *arp_pkt; + + int slave_idx, nb_pkts, pkt_idx; + int retval = 0; + + struct ether_addr bond_mac, client_mac; + struct ether_addr *slave_mac1, *slave_mac2; + + TEST_ASSERT_SUCCESS( + initialize_bonded_device_with_slaves(BONDING_MODE_ALB, + 0, TEST_ALB_SLAVE_COUNT, 1), + "Failed to initialize_bonded_device_with_slaves."); + + /* Flush tx queue */ + rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; + slave_idx++) { + nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_idx], pkts_sent, + MAX_PKT_BURST); + } + + ether_addr_copy( + rte_eth_devices[test_params->bonded_port_id].data->mac_addrs, + &bond_mac); + + /* + * Generating four packets with different mac and ip addresses and sending + * them through the bonding port. + */ + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client1, + ARP_OP_REPLY); + rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client2, + ARP_OP_REPLY); + rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client3, + ARP_OP_REPLY); + rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &bond_mac, &client_mac, ip_host, ip_client4, + ARP_OP_REPLY); + rte_eth_tx_burst(test_params->bonded_port_id, 0, &pkt, 1); + + slave_mac1 = + rte_eth_devices[test_params->slave_port_ids[0]].data->mac_addrs; + slave_mac2 = + rte_eth_devices[test_params->slave_port_ids[1]].data->mac_addrs; + + /* + * Checking if packets are properly distributed on bonding ports. Packets + * 0 and 2 should be sent on port 0 and packets 1 and 3 on port 1. + */ + for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_idx], pkts_sent, + MAX_PKT_BURST); + + for (pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { + eth_pkt = rte_pktmbuf_mtod(pkts_sent[pkt_idx], struct ether_hdr *); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + + if (slave_idx%2 == 0) { + if (!is_same_ether_addr(slave_mac1, &arp_pkt->arp_data.arp_sha)) { + retval = -1; + goto test_end; + } + } else { + if (!is_same_ether_addr(slave_mac2, &arp_pkt->arp_data.arp_sha)) { + retval = -1; + goto test_end; + } + } + } + } + +test_end: + retval += remove_slaves_and_stop_bonded_device(); + return retval; +} + +static int +test_alb_reply_from_client(void) +{ + struct ether_hdr *eth_pkt; + struct arp_hdr *arp_pkt; + + struct rte_mbuf *pkt; + struct rte_mbuf *pkts_sent[MAX_PKT_BURST]; + + int slave_idx, nb_pkts, pkt_idx, nb_pkts_sum = 0; + int retval = 0; + + struct ether_addr bond_mac, client_mac; + struct ether_addr *slave_mac1, *slave_mac2; + + TEST_ASSERT_SUCCESS( + initialize_bonded_device_with_slaves(BONDING_MODE_ALB, + 0, TEST_ALB_SLAVE_COUNT, 1), + "Failed to initialize_bonded_device_with_slaves."); + + /* Flush tx queue */ + rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_idx], pkts_sent, + MAX_PKT_BURST); + } + + ether_addr_copy( + rte_eth_devices[test_params->bonded_port_id].data->mac_addrs, + &bond_mac); + + /* + * Generating four packets with different mac and ip addresses and placing + * them in the rx queue to be received by the bonding driver on rx_burst. + */ + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host, + ARP_OP_REPLY); + virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + 1); + + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client2, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client2, ip_host, + ARP_OP_REPLY); + virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + 1); + + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client3, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client3, ip_host, + ARP_OP_REPLY); + virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + 1); + + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client4, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_ARP, 0, + 0); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client4, ip_host, + ARP_OP_REPLY); + virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + 1); + + /* + * Issue rx_burst and tx_burst to force bonding driver to send update ARP + * packets to every client in alb table. + */ + rte_eth_rx_burst(test_params->bonded_port_id, 0, pkts_sent, MAX_PKT_BURST); + rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + + slave_mac1 = rte_eth_devices[test_params->slave_port_ids[0]].data->mac_addrs; + slave_mac2 = rte_eth_devices[test_params->slave_port_ids[1]].data->mac_addrs; + + /* + * Checking if update ARP packets were properly send on slave ports. + */ + for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_idx], pkts_sent, MAX_PKT_BURST); + nb_pkts_sum += nb_pkts; + + for (pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { + eth_pkt = rte_pktmbuf_mtod(pkts_sent[pkt_idx], struct ether_hdr *); + arp_pkt = (struct arp_hdr *)((char *)eth_pkt + sizeof(struct ether_hdr)); + + if (slave_idx%2 == 0) { + if (!is_same_ether_addr(slave_mac1, &arp_pkt->arp_data.arp_sha)) { + retval = -1; + goto test_end; + } + } else { + if (!is_same_ether_addr(slave_mac2, &arp_pkt->arp_data.arp_sha)) { + retval = -1; + goto test_end; + } + } + } + } + + /* Check if proper number of packets was send */ + if (nb_pkts_sum < 4) { + retval = -1; + goto test_end; + } + +test_end: + retval += remove_slaves_and_stop_bonded_device(); + return retval; +} + +static int +test_alb_receive_vlan_reply(void) +{ + struct ether_hdr *eth_pkt; + struct vlan_hdr *vlan_pkt; + struct arp_hdr *arp_pkt; + + struct rte_mbuf *pkt; + struct rte_mbuf *pkts_sent[MAX_PKT_BURST]; + + int slave_idx, nb_pkts, pkt_idx; + int retval = 0; + + struct ether_addr bond_mac, client_mac; + + TEST_ASSERT_SUCCESS( + initialize_bonded_device_with_slaves(BONDING_MODE_ALB, + 0, TEST_ALB_SLAVE_COUNT, 1), + "Failed to initialize_bonded_device_with_slaves."); + + /* Flush tx queue */ + rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_idx], pkts_sent, + MAX_PKT_BURST); + } + + ether_addr_copy( + rte_eth_devices[test_params->bonded_port_id].data->mac_addrs, + &bond_mac); + + /* + * Generating packet with double VLAN header and placing it in the rx queue. + */ + pkt = rte_pktmbuf_alloc(test_params->mbuf_pool); + memcpy(client_mac.addr_bytes, mac_client1, ETHER_ADDR_LEN); + eth_pkt = rte_pktmbuf_mtod(pkt, struct ether_hdr *); + initialize_eth_header(eth_pkt, &bond_mac, &client_mac, ETHER_TYPE_VLAN, 0, + 0); + vlan_pkt = (struct vlan_hdr *)((char *)(eth_pkt + 1)); + vlan_pkt->vlan_tci = rte_cpu_to_be_16(1); + vlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_VLAN); + vlan_pkt = vlan_pkt+1; + vlan_pkt->vlan_tci = rte_cpu_to_be_16(2); + vlan_pkt->eth_proto = rte_cpu_to_be_16(ETHER_TYPE_ARP); + arp_pkt = (struct arp_hdr *)((char *)(vlan_pkt + 1)); + initialize_arp_header(arp_pkt, &client_mac, &bond_mac, ip_client1, ip_host, + ARP_OP_REPLY); + virtual_ethdev_add_mbufs_to_rx_queue(test_params->slave_port_ids[0], &pkt, + 1); + + rte_eth_rx_burst(test_params->bonded_port_id, 0, pkts_sent, MAX_PKT_BURST); + rte_eth_tx_burst(test_params->bonded_port_id, 0, NULL, 0); + + /* + * Checking if VLAN headers in generated ARP Update packet are correct. + */ + for (slave_idx = 0; slave_idx < test_params->bonded_slave_count; slave_idx++) { + nb_pkts = virtual_ethdev_get_mbufs_from_tx_queue( + test_params->slave_port_ids[slave_idx], pkts_sent, + MAX_PKT_BURST); + + for (pkt_idx = 0; pkt_idx < nb_pkts; pkt_idx++) { + eth_pkt = rte_pktmbuf_mtod(pkts_sent[pkt_idx], struct ether_hdr *); + vlan_pkt = (struct vlan_hdr *)((char *)(eth_pkt + 1)); + if (vlan_pkt->vlan_tci != rte_cpu_to_be_16(1)) { + retval = -1; + goto test_end; + } + if (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) { + retval = -1; + goto test_end; + } + vlan_pkt = vlan_pkt+1; + if (vlan_pkt->vlan_tci != rte_cpu_to_be_16(2)) { + retval = -1; + goto test_end; + } + if (vlan_pkt->eth_proto != rte_cpu_to_be_16(ETHER_TYPE_ARP)) { + retval = -1; + goto test_end; + } + } + } + +test_end: + retval += remove_slaves_and_stop_bonded_device(); + return retval; +} + +static int +test_alb_ipv4_tx(void) +{ + int burst_size, retval, pkts_send; + struct rte_mbuf *pkt_burst[MAX_PKT_BURST]; + + retval = 0; + + TEST_ASSERT_SUCCESS( + initialize_bonded_device_with_slaves(BONDING_MODE_ALB, + 0, TEST_ALB_SLAVE_COUNT, 1), + "Failed to initialize_bonded_device_with_slaves."); + + burst_size = 32; + + /* Generate test bursts of packets to transmit */ + if (generate_test_burst(pkt_burst, burst_size, 0, 1, 0, 0, 0) != burst_size) { + retval = -1; + goto test_end; + } + + /* + * Checking if ipv4 traffic is transmitted via TLB policy. + */ + pkts_send = rte_eth_tx_burst( + test_params->bonded_port_id, 0, pkt_burst, burst_size); + if (pkts_send != burst_size) { + retval = -1; + goto test_end; + } + +test_end: + retval += remove_slaves_and_stop_bonded_device(); + return retval; +} static struct unit_test_suite link_bonding_test_suite = { .suite_name = "Link Bonding Unit Test Suite", @@ -4593,6 +5000,10 @@ static struct unit_test_suite link_bonding_test_suite = { TEST_CASE(test_tlb_verify_mac_assignment), TEST_CASE(test_tlb_verify_promiscuous_enable_disable), TEST_CASE(test_tlb_verify_slave_link_status_change_failover), + TEST_CASE(test_alb_change_mac_in_reply_sent), + TEST_CASE(test_alb_reply_from_client), + TEST_CASE(test_alb_receive_vlan_reply), + TEST_CASE(test_alb_ipv4_tx), #ifdef RTE_MBUF_REFCNT TEST_CASE(test_broadcast_tx_burst), TEST_CASE(test_broadcast_tx_burst_slave_tx_fail), diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c index bad9503ede..d6a4a45a19 100644 --- a/app/test/test_pmd_perf.c +++ b/app/test/test_pmd_perf.c @@ -235,8 +235,7 @@ init_traffic(struct rte_mempool *mp, initialize_eth_header(&pkt_eth_hdr, (struct ether_addr *)src_mac, - (struct ether_addr *)dst_mac, 1, 0, 0); - pkt_eth_hdr.ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4); + (struct ether_addr *)dst_mac, ETHER_TYPE_IPv4, 0, 0); pktlen = initialize_ipv4_header(&pkt_ipv4_hdr, IPV4_ADDR(10, 0, 0, 1), diff --git a/app/test/virtual_pmd.c b/app/test/virtual_pmd.c index 9fac95d774..cd9faf3da6 100644 --- a/app/test/virtual_pmd.c +++ b/app/test/virtual_pmd.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "virtual_pmd.h" @@ -46,8 +47,8 @@ static const char *virtual_ethdev_driver_name = "Virtual PMD"; struct virtual_ethdev_private { struct rte_eth_stats eth_stats; - struct rte_mbuf *rx_pkt_burst[MAX_PKT_BURST]; - int rx_pkt_burst_len; + struct rte_ring *rx_queue; + struct rte_ring *tx_queue; int tx_burst_fail_count; }; @@ -74,8 +75,16 @@ virtual_ethdev_start_fail(struct rte_eth_dev *eth_dev __rte_unused) } static void virtual_ethdev_stop(struct rte_eth_dev *eth_dev __rte_unused) { + struct rte_mbuf *pkt = NULL; + struct virtual_ethdev_private *prv = eth_dev->data->dev_private; + eth_dev->data->dev_link.link_status = 0; eth_dev->data->dev_started = 0; + while (rte_ring_dequeue(prv->rx_queue, (void **)&pkt) != -ENOENT) + rte_pktmbuf_free(pkt); + + while (rte_ring_dequeue(prv->tx_queue, (void **)&pkt) != -ENOENT) + rte_pktmbuf_free(pkt); } static void @@ -214,8 +223,10 @@ static void virtual_ethdev_stats_reset(struct rte_eth_dev *dev) { struct virtual_ethdev_private *dev_private = dev->data->dev_private; + struct rte_mbuf *pkt = NULL; - dev_private->rx_pkt_burst_len = 0; + while (rte_ring_dequeue(dev_private->tx_queue, (void **)&pkt) == -ENOBUFS) + rte_pktmbuf_free(pkt); /* Reset internal statistics */ memset(&dev_private->eth_stats, 0, sizeof(dev_private->eth_stats)); @@ -318,29 +329,23 @@ virtual_ethdev_rx_burst_success(void *queue __rte_unused, struct virtual_ethdev_queue *pq_map; struct virtual_ethdev_private *dev_private; - int i; + int rx_count, i; pq_map = (struct virtual_ethdev_queue *)queue; - vrtl_eth_dev = &rte_eth_devices[pq_map->port_id]; - dev_private = vrtl_eth_dev->data->dev_private; - if (dev_private->rx_pkt_burst_len > 0) { - if (dev_private->rx_pkt_burst_len < nb_pkts) { + rx_count = rte_ring_dequeue_burst(dev_private->rx_queue, (void **) bufs, + nb_pkts); - for (i = 0; i < dev_private->rx_pkt_burst_len; i++) { - bufs[i] = dev_private->rx_pkt_burst[i]; - dev_private->rx_pkt_burst[i] = NULL; - } + /* increments ipackets count */ + dev_private->eth_stats.ipackets += rx_count; - dev_private->eth_stats.ipackets = dev_private->rx_pkt_burst_len; - } - /* reset private burst values */ - dev_private->rx_pkt_burst_len = 0; - } + /* increments ibytes count */ + for (i = 0; i < rx_count; i++) + dev_private->eth_stats.ibytes += rte_pktmbuf_pkt_len(bufs[i]); - return dev_private->eth_stats.ipackets; + return rx_count; } static uint16_t @@ -359,26 +364,26 @@ virtual_ethdev_tx_burst_success(void *queue, struct rte_mbuf **bufs, struct rte_eth_dev *vrtl_eth_dev; struct virtual_ethdev_private *dev_private; - uint64_t obytes = 0; + int i; - for (i = 0; i < nb_pkts; i++) - obytes += rte_pktmbuf_pkt_len(bufs[i]); vrtl_eth_dev = &rte_eth_devices[tx_q->port_id]; dev_private = vrtl_eth_dev->data->dev_private; - if (vrtl_eth_dev->data->dev_link.link_status) { - /* increment opacket count */ - dev_private->eth_stats.opackets += nb_pkts; - dev_private->eth_stats.obytes += obytes; - /* free packets in burst */ - for (i = 0; i < nb_pkts; i++) - rte_pktmbuf_free(bufs[i]); + if (!vrtl_eth_dev->data->dev_link.link_status) + nb_pkts = 0; + else + nb_pkts = rte_ring_enqueue_burst(dev_private->tx_queue, (void **)bufs, + nb_pkts); - return nb_pkts; - } + /* increment opacket count */ + dev_private->eth_stats.opackets += nb_pkts; - return 0; + /* increment obytes count */ + for (i = 0; i < nb_pkts; i++) + dev_private->eth_stats.obytes += rte_pktmbuf_pkt_len(bufs[i]); + + return nb_pkts; } static uint16_t @@ -476,23 +481,28 @@ virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, _rte_eth_dev_callback_process(vrtl_eth_dev, RTE_ETH_EVENT_INTR_LSC); } - - -void +int virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id, struct rte_mbuf **pkt_burst, int burst_length) { - struct virtual_ethdev_private *dev_private = NULL; struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; + struct virtual_ethdev_private *dev_private = + vrtl_eth_dev->data->dev_private; - int i; - - dev_private = vrtl_eth_dev->data->dev_private; + return rte_ring_enqueue_burst(dev_private->rx_queue, (void **)pkt_burst, + burst_length); +} - for (i = 0; i < burst_length; i++) - dev_private->rx_pkt_burst[i] = pkt_burst[i]; +int +virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id, + struct rte_mbuf **pkt_burst, int burst_length) +{ + struct virtual_ethdev_private *dev_private; + struct rte_eth_dev *vrtl_eth_dev = &rte_eth_devices[port_id]; - dev_private->rx_pkt_burst_len = burst_length; + dev_private = vrtl_eth_dev->data->dev_private; + return rte_ring_dequeue_burst(dev_private->tx_queue, (void **)pkt_burst, + burst_length); } static uint8_t @@ -510,7 +520,6 @@ get_number_of_sockets(void) return ++sockets; } - int virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, uint8_t socket_id, uint8_t isr_support) @@ -522,6 +531,7 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, struct eth_dev_ops *dev_ops = NULL; struct rte_pci_id *id_table = NULL; struct virtual_ethdev_private *dev_private = NULL; + char name_buf[RTE_RING_NAMESIZE]; /* now do all data allocation - for eth_dev structure, dummy pci driver @@ -555,6 +565,20 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, if (dev_private == NULL) goto err; + memset(dev_private, 0, sizeof(*dev_private)); + + snprintf(name_buf, sizeof(name_buf), "%s_rxQ", name); + dev_private->rx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, + 0); + if (dev_private->rx_queue == NULL) + goto err; + + snprintf(name_buf, sizeof(name_buf), "%s_txQ", name); + dev_private->tx_queue = rte_ring_create(name_buf, MAX_PKT_BURST, socket_id, + 0); + if (dev_private->tx_queue == NULL) + goto err; + /* reserve an ethdev entry */ eth_dev = rte_eth_dev_allocate(name); if (eth_dev == NULL) @@ -594,7 +618,6 @@ virtual_ethdev_create(const char *name, struct ether_addr *mac_addr, eth_dev->data->scattered_rx = 0; eth_dev->data->all_multicast = 0; - memset(dev_private, 0, sizeof(*dev_private)); eth_dev->data->dev_private = dev_private; eth_dev->dev_ops = dev_ops; diff --git a/app/test/virtual_pmd.h b/app/test/virtual_pmd.h index 2462853ec6..de001884de 100644 --- a/app/test/virtual_pmd.h +++ b/app/test/virtual_pmd.h @@ -54,10 +54,13 @@ void virtual_ethdev_simulate_link_status_interrupt(uint8_t port_id, uint8_t link_status); -void +int virtual_ethdev_add_mbufs_to_rx_queue(uint8_t port_id, struct rte_mbuf **pkts_burst, int burst_length); +int +virtual_ethdev_get_mbufs_from_tx_queue(uint8_t port_id, + struct rte_mbuf **pkt_burst, int burst_length); /** Control methods for the dev_ops functions pointer to control the behavior * of the Virtual PMD */ -- 2.20.1