From c3dfe188ba05d495c3e2909e1b401cc9ffa36ee8 Mon Sep 17 00:00:00 2001 From: Ouyang Changchun Date: Wed, 28 May 2014 16:06:38 +0800 Subject: [PATCH] examples/vhost: zero copy mode This patch supports user space vhost zero copy. It removes packets copying between host and guest in RX/TX. It introduces an extra ring to store the detached mbufs. At initialization stage all mbufs will put into this ring; when one guest starts, vhost gets the available buffer address allocated by guest for RX and translates them into host space addresses, then attaches them to mbufs and puts the attached mbufs into mempool. Queue starting and DMA refilling will get mbufs from mempool and use them to set the DMA addresses. For TX, it gets the buffer addresses of available packets to be transmitted from guest and translates them to host space addresses, then attaches them to mbufs and puts them to TX queues. After TX finishes, it pulls mbufs out from mempool, detaches them and puts them back into the extra ring. Signed-off-by: Ouyang Changchun Tested-by: Waterman Cao Acked-by: Thomas Monjalon --- examples/vhost/main.c | 1476 +++++++++++++++++++++++++++++++++-- examples/vhost/virtio-net.c | 186 ++++- examples/vhost/virtio-net.h | 23 +- 3 files changed, 1623 insertions(+), 62 deletions(-) diff --git a/examples/vhost/main.c b/examples/vhost/main.c index b86d57dcca..e911111ffc 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "main.h" #include "virtio-net.h" @@ -69,6 +70,16 @@ #define MBUF_CACHE_SIZE 128 #define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) +/* + * No frame data buffer allocated from host are required for zero copy + * implementation, guest will allocate the frame data buffer, and vhost + * directly use it. + */ +#define VIRTIO_DESCRIPTOR_LEN_ZCP 1518 +#define MBUF_SIZE_ZCP (VIRTIO_DESCRIPTOR_LEN_ZCP + sizeof(struct rte_mbuf) \ + + RTE_PKTMBUF_HEADROOM) +#define MBUF_CACHE_SIZE_ZCP 0 + /* * RX and TX Prefetch, Host, and Write-back threshold values should be * carefully set for optimal performance. Consult the network @@ -108,6 +119,25 @@ #define RTE_TEST_RX_DESC_DEFAULT 1024 #define RTE_TEST_TX_DESC_DEFAULT 512 +/* + * Need refine these 2 macros for legacy and DPDK based front end: + * Max vring avail descriptor/entries from guest - MAX_PKT_BURST + * And then adjust power 2. + */ +/* + * For legacy front end, 128 descriptors, + * half for virtio header, another half for mbuf. + */ +#define RTE_TEST_RX_DESC_DEFAULT_ZCP 32 /* legacy: 32, DPDK virt FE: 128. */ +#define RTE_TEST_TX_DESC_DEFAULT_ZCP 64 /* legacy: 64, DPDK virt FE: 64. */ + +/* Get first 4 bytes in mbuf headroom. */ +#define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \ + + sizeof(struct rte_mbuf))) + +/* true if x is a power of 2 */ +#define POWEROF2(x) ((((x)-1) & (x)) == 0) + #define INVALID_PORT_ID 0xFF /* Max number of devices. Limited by vmdq. */ @@ -138,8 +168,42 @@ static uint32_t num_switching_cores = 0; static uint32_t num_queues = 0; uint32_t num_devices = 0; +/* + * Enable zero copy, pkts buffer will directly dma to hw descriptor, + * disabled on default. + */ +static uint32_t zero_copy; + +/* number of descriptors to apply*/ +static uint32_t num_rx_descriptor = RTE_TEST_RX_DESC_DEFAULT_ZCP; +static uint32_t num_tx_descriptor = RTE_TEST_TX_DESC_DEFAULT_ZCP; + +/* max ring descriptor, ixgbe, i40e, e1000 all are 4096. */ +#define MAX_RING_DESC 4096 + +struct vpool { + struct rte_mempool *pool; + struct rte_ring *ring; + uint32_t buf_size; +} vpool_array[MAX_QUEUES+MAX_QUEUES]; + /* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */ -static uint32_t enable_vm2vm = 1; +typedef enum { + VM2VM_DISABLED = 0, + VM2VM_SOFTWARE = 1, + VM2VM_HARDWARE = 2, + VM2VM_LAST +} vm2vm_type; +static vm2vm_type vm2vm_mode = VM2VM_SOFTWARE; + +/* The type of host physical address translated from guest physical address. */ +typedef enum { + PHYS_ADDR_CONTINUOUS = 0, + PHYS_ADDR_CROSS_SUBREG = 1, + PHYS_ADDR_INVALID = 2, + PHYS_ADDR_LAST +} hpa_type; + /* Enable stats. */ static uint32_t enable_stats = 0; /* Enable retries on RX. */ @@ -159,7 +223,7 @@ static uint32_t dev_index = 0; extern uint64_t VHOST_FEATURES; /* Default configuration for rx and tx thresholds etc. */ -static const struct rte_eth_rxconf rx_conf_default = { +static struct rte_eth_rxconf rx_conf_default = { .rx_thresh = { .pthresh = RX_PTHRESH, .hthresh = RX_HTHRESH, @@ -173,7 +237,7 @@ static const struct rte_eth_rxconf rx_conf_default = { * Controller and the DPDK ixgbe/igb PMD. Consider using other values for other * network controllers and/or network drivers. */ -static const struct rte_eth_txconf tx_conf_default = { +static struct rte_eth_txconf tx_conf_default = { .tx_thresh = { .pthresh = TX_PTHRESH, .hthresh = TX_HTHRESH, @@ -184,7 +248,7 @@ static const struct rte_eth_txconf tx_conf_default = { }; /* empty vmdq configuration structure. Filled in programatically */ -static const struct rte_eth_conf vmdq_conf_default = { +static struct rte_eth_conf vmdq_conf_default = { .rxmode = { .mq_mode = ETH_MQ_RX_VMDQ_ONLY, .split_hdr_size = 0, @@ -223,6 +287,7 @@ static unsigned lcore_ids[RTE_MAX_LCORE]; static uint8_t ports[RTE_MAX_ETHPORTS]; static unsigned num_ports = 0; /**< The number of ports specified in command line */ +static const uint16_t external_pkt_default_vlan_tag = 2000; const uint16_t vlan_tags[] = { 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, @@ -254,6 +319,9 @@ struct mbuf_table { /* TX queue for each data core. */ struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE]; +/* TX queue fori each virtio device for zero copy. */ +struct mbuf_table tx_queue_zcp[MAX_QUEUES]; + /* Vlan header struct used to insert vlan tags on TX. */ struct vlan_ethhdr { unsigned char h_dest[ETH_ALEN]; @@ -263,6 +331,20 @@ struct vlan_ethhdr { __be16 h_vlan_encapsulated_proto; }; +/* IPv4 Header */ +struct ipv4_hdr { + uint8_t version_ihl; /**< version and header length */ + uint8_t type_of_service; /**< type of service */ + uint16_t total_length; /**< length of packet */ + uint16_t packet_id; /**< packet ID */ + uint16_t fragment_offset; /**< fragmentation offset */ + uint8_t time_to_live; /**< time to live */ + uint8_t next_proto_id; /**< protocol ID */ + uint16_t hdr_checksum; /**< header checksum */ + uint32_t src_addr; /**< source address */ + uint32_t dst_addr; /**< destination address */ +} __attribute__((__packed__)); + /* Header lengths. */ #define VLAN_HLEN 4 #define VLAN_ETH_HLEN 18 @@ -270,9 +352,11 @@ struct vlan_ethhdr { /* Per-device statistics struct */ struct device_statistics { uint64_t tx_total; - rte_atomic64_t rx_total; + rte_atomic64_t rx_total_atomic; + uint64_t rx_total; uint64_t tx; - rte_atomic64_t rx; + rte_atomic64_t rx_atomic; + uint64_t rx; } __rte_cache_aligned; struct device_statistics dev_statistics[MAX_DEVICES]; @@ -289,6 +373,8 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) memset(&conf, 0, sizeof(conf)); conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices; conf.nb_pool_maps = num_devices; + conf.enable_loop_back = + vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back; for (i = 0; i < conf.nb_pool_maps; i++) { conf.pool_map[i].vlan_id = vlan_tags[ i ]; @@ -302,9 +388,9 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices) } /* - * Validate the device number according to the max pool number gotten form dev_info - * If the device number is invalid, give the error message and return -1. - * Each device must have its own pool. + * Validate the device number according to the max pool number gotten form + * dev_info. If the device number is invalid, give the error message and + * return -1. Each device must have its own pool. */ static inline int validate_num_devices(uint32_t max_nb_devices) @@ -321,12 +407,12 @@ validate_num_devices(uint32_t max_nb_devices) * coming from the mbuf_pool passed as parameter */ static inline int -port_init(uint8_t port, struct rte_mempool *mbuf_pool) +port_init(uint8_t port) { struct rte_eth_dev_info dev_info; struct rte_eth_conf port_conf; - uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count(); - const uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT, tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; + uint16_t rx_rings, tx_rings; + uint16_t rx_ring_size, tx_ring_size; int retval; uint16_t q; @@ -337,6 +423,16 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) num_devices = dev_info.max_vmdq_pools; num_queues = dev_info.max_rx_queues; + if (zero_copy) { + rx_ring_size = num_rx_descriptor; + tx_ring_size = num_tx_descriptor; + tx_rings = dev_info.max_tx_queues; + } else { + rx_ring_size = RTE_TEST_RX_DESC_DEFAULT; + tx_ring_size = RTE_TEST_TX_DESC_DEFAULT; + tx_rings = (uint16_t)rte_lcore_count(); + } + retval = validate_num_devices(MAX_DEVICES); if (retval < 0) return retval; @@ -358,7 +454,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), &rx_conf_default, - mbuf_pool); + vpool_array[q].pool); if (retval < 0) return retval; } @@ -371,8 +467,10 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) /* Start the device. */ retval = rte_eth_dev_start(port); - if (retval < 0) + if (retval < 0) { + RTE_LOG(ERR, DATA, "Failed to start the device.\n"); return retval; + } rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]); RTE_LOG(INFO, PORT, "Max virtio devices supported: %u\n", num_devices); @@ -457,16 +555,26 @@ parse_num_opt(const char *q_arg, uint32_t max_valid_value) static void us_vhost_usage(const char *prgname) { - RTE_LOG(INFO, CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --rx_retry [0|1] --mergeable [0|1] --stats [0-N] --dev-basename --dev-index [0-N] --nb-devices ND\n" + RTE_LOG(INFO, CONFIG, "%s [EAL options] -- -p PORTMASK\n" + " --vm2vm [0|1|2]\n" + " --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n" + " --dev-basename --dev-index [0-N]\n" + " --nb-devices ND\n" " -p PORTMASK: Set mask for ports to be used by application\n" - " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n" + " --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n" " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n" " --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n" " --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n" " --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n" " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n" " --dev-basename: The basename to be used for the character device.\n" - " --dev-index [0-N]: Defaults to zero if not used. Index is appended to basename.\n", + " --dev-index [0-N]: Defaults to zero if not used. Index is appended to basename.\n" + " --zero-copy [0|1]: disable(default)/enable rx/tx " + "zero copy\n" + " --rx-desc-num [0-N]: the number of descriptors on rx, " + "used only when zero copy is enabled.\n" + " --tx-desc-num [0-N]: the number of descriptors on tx, " + "used only when zero copy is enabled.\n", prgname); } @@ -489,7 +597,10 @@ us_vhost_parse_args(int argc, char **argv) {"stats", required_argument, NULL, 0}, {"dev-basename", required_argument, NULL, 0}, {"dev-index", required_argument, NULL, 0}, - {NULL, 0, 0, 0} + {"zero-copy", required_argument, NULL, 0}, + {"rx-desc-num", required_argument, NULL, 0}, + {"tx-desc-num", required_argument, NULL, 0}, + {NULL, 0, 0, 0}, }; /* Parse command line */ @@ -507,14 +618,17 @@ us_vhost_parse_args(int argc, char **argv) case 0: /* Enable/disable vm2vm comms. */ - if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) { - ret = parse_num_opt(optarg, 1); + if (!strncmp(long_option[option_index].name, "vm2vm", + MAX_LONG_OPT_SZ)) { + ret = parse_num_opt(optarg, (VM2VM_LAST - 1)); if (ret == -1) { - RTE_LOG(INFO, CONFIG, "Invalid argument for vm2vm [0|1]\n"); + RTE_LOG(INFO, CONFIG, + "Invalid argument for " + "vm2vm [0|1|2]\n"); us_vhost_usage(prgname); return -1; } else { - enable_vm2vm = ret; + vm2vm_mode = (vm2vm_type)ret; } } @@ -595,8 +709,64 @@ us_vhost_parse_args(int argc, char **argv) RTE_LOG(INFO, CONFIG, "Invalid argument for character device index [0..N]\n"); us_vhost_usage(prgname); return -1; - } else { + } else dev_index = ret; + } + + /* Enable/disable rx/tx zero copy. */ + if (!strncmp(long_option[option_index].name, + "zero-copy", MAX_LONG_OPT_SZ)) { + ret = parse_num_opt(optarg, 1); + if (ret == -1) { + RTE_LOG(INFO, CONFIG, + "Invalid argument" + " for zero-copy [0|1]\n"); + us_vhost_usage(prgname); + return -1; + } else + zero_copy = ret; + + if (zero_copy) { +#ifdef RTE_MBUF_SCATTER_GATHER + RTE_LOG(ERR, CONFIG, "Before running " + "zero copy vhost APP, please " + "disable RTE_MBUF_SCATTER_GATHER\n" + "in config file and then rebuild DPDK " + "core lib!\n" + "Otherwise please disable zero copy " + "flag in command line!\n"); + return -1; +#endif + } + } + + /* Specify the descriptor number on RX. */ + if (!strncmp(long_option[option_index].name, + "rx-desc-num", MAX_LONG_OPT_SZ)) { + ret = parse_num_opt(optarg, MAX_RING_DESC); + if ((ret == -1) || (!POWEROF2(ret))) { + RTE_LOG(INFO, CONFIG, + "Invalid argument for rx-desc-num[0-N]," + "power of 2 required.\n"); + us_vhost_usage(prgname); + return -1; + } else { + num_rx_descriptor = ret; + } + } + + /* Specify the descriptor number on TX. */ + if (!strncmp(long_option[option_index].name, + "tx-desc-num", MAX_LONG_OPT_SZ)) { + ret = parse_num_opt(optarg, MAX_RING_DESC); + if ((ret == -1) || (!POWEROF2(ret))) { + RTE_LOG(INFO, CONFIG, + "Invalid argument for tx-desc-num [0-N]," + "power of 2 required.\n"); + us_vhost_usage(prgname); + return -1; + } else { + num_tx_descriptor = ret; } } @@ -620,6 +790,13 @@ us_vhost_parse_args(int argc, char **argv) return -1; } + if ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) { + RTE_LOG(INFO, PORT, + "Vhost zero copy doesn't support software vm2vm," + "please specify 'vm2vm 2' to use hardware vm2vm.\n"); + return -1; + } + return 0; } @@ -700,6 +877,41 @@ gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa) return vhost_va; } +/* + * Function to convert guest physical addresses to vhost physical addresses. + * This is used to convert virtio buffer addresses. + */ +static inline uint64_t __attribute__((always_inline)) +gpa_to_hpa(struct virtio_net *dev, uint64_t guest_pa, + uint32_t buf_len, hpa_type *addr_type) +{ + struct virtio_memory_regions_hpa *region; + uint32_t regionidx; + uint64_t vhost_pa = 0; + + *addr_type = PHYS_ADDR_INVALID; + + for (regionidx = 0; regionidx < dev->mem->nregions_hpa; regionidx++) { + region = &dev->mem->regions_hpa[regionidx]; + if ((guest_pa >= region->guest_phys_address) && + (guest_pa <= region->guest_phys_address_end)) { + vhost_pa = region->host_phys_addr_offset + guest_pa; + if (likely((guest_pa + buf_len - 1) + <= region->guest_phys_address_end)) + *addr_type = PHYS_ADDR_CONTINUOUS; + else + *addr_type = PHYS_ADDR_CROSS_SUBREG; + break; + } + } + + LOG_DEBUG(DATA, "(%"PRIu64") GPA %p| HPA %p\n", + dev->device_fh, (void *)(uintptr_t)guest_pa, + (void *)(uintptr_t)vhost_pa); + + return vhost_pa; +} + /* * This function adds buffers to the virtio devices RX virtqueue. Buffers can * be received from the physical port or from another virtio device. A packet @@ -894,7 +1106,6 @@ link_vmdq(struct virtio_net *dev, struct rte_mbuf *m) /* vlan_tag currently uses the device_id. */ dev->vlan_tag = vlan_tags[dev->device_fh]; - dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices); /* Print out VMDQ registration info. */ RTE_LOG(INFO, DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n", @@ -991,8 +1202,12 @@ virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m) /*send the packet to the local virtio device*/ ret = virtio_dev_rx(dev_ll->dev, &m, 1); if (enable_stats) { - rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1); - rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret); + rte_atomic64_add( + &dev_statistics[dev_ll->dev->device_fh].rx_total_atomic, + 1); + rte_atomic64_add( + &dev_statistics[dev_ll->dev->device_fh].rx_atomic, + ret); dev_statistics[dev->device_fh].tx_total++; dev_statistics[dev->device_fh].tx += ret; } @@ -1017,12 +1232,47 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool * struct vlan_ethhdr *vlan_hdr; struct rte_mbuf **m_table; struct rte_mbuf *mbuf; - unsigned len, ret; + unsigned len, ret, offset = 0; const uint16_t lcore_id = rte_lcore_id(); + struct virtio_net_data_ll *dev_ll = ll_root_used; + struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data; /*check if destination is local VM*/ - if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) { + if ((vm2vm_mode == VM2VM_SOFTWARE) && (virtio_tx_local(dev, m) == 0)) return; + + if (vm2vm_mode == VM2VM_HARDWARE) { + while (dev_ll != NULL) { + if ((dev_ll->dev->ready == DEVICE_RX) + && ether_addr_cmp(&(pkt_hdr->d_addr), + &dev_ll->dev->mac_address)) { + /* + * Drop the packet if the TX packet is + * destined for the TX device. + */ + if (dev_ll->dev->device_fh == dev->device_fh) { + LOG_DEBUG(DATA, + "(%"PRIu64") TX: Source and destination" + " MAC addresses are the same. Dropping " + "packet.\n", + dev_ll->dev->device_fh); + return; + } + offset = 4; + vlan_tag = + (uint16_t) + vlan_tags[(uint16_t)dev_ll->dev->device_fh]; + + LOG_DEBUG(DATA, + "(%"PRIu64") TX: pkt to local VM device id:" + "(%"PRIu64") vlan tag: %d.\n", + dev->device_fh, dev_ll->dev->device_fh, + vlan_tag); + + break; + } + dev_ll = dev_ll->next; + } } LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh); @@ -1038,7 +1288,7 @@ virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool * return; } - mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN; + mbuf->pkt.data_len = m->pkt.data_len + VLAN_HLEN + offset; mbuf->pkt.pkt_len = mbuf->pkt.data_len; /* Copy ethernet header to mbuf. */ @@ -1262,8 +1512,11 @@ switch_worker(__attribute__((unused)) void *arg) if (rx_count) { ret_count = virtio_dev_rx(dev, pkts_burst, rx_count); if (enable_stats) { - rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count); - rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count); + rte_atomic64_add( + &dev_statistics[dev_ll->dev->device_fh].rx_total_atomic, + rx_count); + rte_atomic64_add( + &dev_statistics[dev_ll->dev->device_fh].rx_atomic, ret_count); } while (likely(rx_count)) { rx_count--; @@ -1286,11 +1539,866 @@ switch_worker(__attribute__((unused)) void *arg) } /* - * Add an entry to a used linked list. A free entry must first be found in the free linked list - * using get_data_ll_free_entry(); + * This function gets available ring number for zero copy rx. + * Only one thread will call this funciton for a paticular virtio device, + * so, it is designed as non-thread-safe function. + */ +static inline uint32_t __attribute__((always_inline)) +get_available_ring_num_zcp(struct virtio_net *dev) +{ + struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ]; + uint16_t avail_idx; + + avail_idx = *((volatile uint16_t *)&vq->avail->idx); + return (uint32_t)(avail_idx - vq->last_used_idx_res); +} + +/* + * This function gets available ring index for zero copy rx, + * it will retry 'burst_rx_retry_num' times till it get enough ring index. + * Only one thread will call this funciton for a paticular virtio device, + * so, it is designed as non-thread-safe function. + */ +static inline uint32_t __attribute__((always_inline)) +get_available_ring_index_zcp(struct virtio_net *dev, + uint16_t *res_base_idx, uint32_t count) +{ + struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ]; + uint16_t avail_idx; + uint32_t retry = 0; + uint16_t free_entries; + + *res_base_idx = vq->last_used_idx_res; + avail_idx = *((volatile uint16_t *)&vq->avail->idx); + free_entries = (avail_idx - *res_base_idx); + + LOG_DEBUG(DATA, "(%"PRIu64") in get_available_ring_index_zcp: " + "avail idx: %d, " + "res base idx:%d, free entries:%d\n", + dev->device_fh, avail_idx, *res_base_idx, + free_entries); + + /* + * If retry is enabled and the queue is full then we wait + * and retry to avoid packet loss. + */ + if (enable_retry && unlikely(count > free_entries)) { + for (retry = 0; retry < burst_rx_retry_num; retry++) { + rte_delay_us(burst_rx_delay_time); + avail_idx = *((volatile uint16_t *)&vq->avail->idx); + free_entries = (avail_idx - *res_base_idx); + if (count <= free_entries) + break; + } + } + + /*check that we have enough buffers*/ + if (unlikely(count > free_entries)) + count = free_entries; + + if (unlikely(count == 0)) { + LOG_DEBUG(DATA, + "(%"PRIu64") Fail in get_available_ring_index_zcp: " + "avail idx: %d, res base idx:%d, free entries:%d\n", + dev->device_fh, avail_idx, + *res_base_idx, free_entries); + return 0; + } + + vq->last_used_idx_res = *res_base_idx + count; + + return count; +} + +/* + * This function put descriptor back to used list. + */ +static inline void __attribute__((always_inline)) +put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx) +{ + uint16_t res_cur_idx = vq->last_used_idx; + vq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx; + vq->used->ring[res_cur_idx & (vq->size - 1)].len = 0; + rte_compiler_barrier(); + *(volatile uint16_t *)&vq->used->idx += 1; + vq->last_used_idx += 1; + + /* Kick the guest if necessary. */ + if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) + eventfd_write((int)vq->kickfd, 1); +} + +/* + * This function get available descriptor from vitio vring and un-attached mbuf + * from vpool->ring, and then attach them together. It needs adjust the offset + * for buff_addr and phys_addr accroding to PMD implementation, otherwise the + * frame data may be put to wrong location in mbuf. + */ +static inline void __attribute__((always_inline)) +attach_rxmbuf_zcp(struct virtio_net *dev) +{ + uint16_t res_base_idx, desc_idx; + uint64_t buff_addr, phys_addr; + struct vhost_virtqueue *vq; + struct vring_desc *desc; + struct rte_mbuf *mbuf = NULL; + struct vpool *vpool; + hpa_type addr_type; + + vpool = &vpool_array[dev->vmdq_rx_q]; + vq = dev->virtqueue[VIRTIO_RXQ]; + + do { + if (unlikely(get_available_ring_index_zcp(dev, &res_base_idx, + 1) != 1)) + return; + desc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)]; + + desc = &vq->desc[desc_idx]; + if (desc->flags & VRING_DESC_F_NEXT) { + desc = &vq->desc[desc->next]; + buff_addr = gpa_to_vva(dev, desc->addr); + phys_addr = gpa_to_hpa(dev, desc->addr, desc->len, + &addr_type); + } else { + buff_addr = gpa_to_vva(dev, + desc->addr + vq->vhost_hlen); + phys_addr = gpa_to_hpa(dev, + desc->addr + vq->vhost_hlen, + desc->len, &addr_type); + } + + if (unlikely(addr_type == PHYS_ADDR_INVALID)) { + RTE_LOG(ERR, DATA, "(%"PRIu64") Invalid frame buffer" + " address found when attaching RX frame buffer" + " address!\n", dev->device_fh); + put_desc_to_used_list_zcp(vq, desc_idx); + continue; + } + + /* + * Check if the frame buffer address from guest crosses + * sub-region or not. + */ + if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) { + RTE_LOG(ERR, DATA, + "(%"PRIu64") Frame buffer address cross " + "sub-regioin found when attaching RX frame " + "buffer address!\n", + dev->device_fh); + put_desc_to_used_list_zcp(vq, desc_idx); + continue; + } + } while (unlikely(phys_addr == 0)); + + rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf); + if (unlikely(mbuf == NULL)) { + LOG_DEBUG(DATA, + "(%"PRIu64") in attach_rxmbuf_zcp: " + "ring_sc_dequeue fail.\n", + dev->device_fh); + put_desc_to_used_list_zcp(vq, desc_idx); + return; + } + + if (unlikely(vpool->buf_size > desc->len)) { + LOG_DEBUG(DATA, + "(%"PRIu64") in attach_rxmbuf_zcp: frame buffer " + "length(%d) of descriptor idx: %d less than room " + "size required: %d\n", + dev->device_fh, desc->len, desc_idx, vpool->buf_size); + put_desc_to_used_list_zcp(vq, desc_idx); + rte_ring_sp_enqueue(vpool->ring, (void *)mbuf); + return; + } + + mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM); + mbuf->pkt.data = (void *)(uintptr_t)(buff_addr); + mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM; + mbuf->pkt.data_len = desc->len; + MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx; + + LOG_DEBUG(DATA, + "(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, " + "descriptor idx:%d\n", + dev->device_fh, res_base_idx, desc_idx); + + __rte_mbuf_raw_free(mbuf); + + return; +} + +/* + * Detach an attched packet mbuf - + * - restore original mbuf address and length values. + * - reset pktmbuf data and data_len to their default values. + * All other fields of the given packet mbuf will be left intact. + * + * @param m + * The attached packet mbuf. + */ +static inline void pktmbuf_detach_zcp(struct rte_mbuf *m) +{ + const struct rte_mempool *mp = m->pool; + void *buf = RTE_MBUF_TO_BADDR(m); + uint32_t buf_ofs; + uint32_t buf_len = mp->elt_size - sizeof(*m); + m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m); + + m->buf_addr = buf; + m->buf_len = (uint16_t)buf_len; + + buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ? + RTE_PKTMBUF_HEADROOM : m->buf_len; + m->pkt.data = (char *) m->buf_addr + buf_ofs; + + m->pkt.data_len = 0; +} + +/* + * This function is called after packets have been transimited. It fetchs mbuf + * from vpool->pool, detached it and put into vpool->ring. It also update the + * used index and kick the guest if necessary. + */ +static inline uint32_t __attribute__((always_inline)) +txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool) +{ + struct rte_mbuf *mbuf; + struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ]; + uint32_t used_idx = vq->last_used_idx & (vq->size - 1); + uint32_t index = 0; + uint32_t mbuf_count = rte_mempool_count(vpool->pool); + + LOG_DEBUG(DATA, + "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before " + "clean is: %d\n", + dev->device_fh, mbuf_count); + LOG_DEBUG(DATA, + "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before " + "clean is : %d\n", + dev->device_fh, rte_ring_count(vpool->ring)); + + for (index = 0; index < mbuf_count; index++) { + mbuf = __rte_mbuf_raw_alloc(vpool->pool); + if (likely(RTE_MBUF_INDIRECT(mbuf))) + pktmbuf_detach_zcp(mbuf); + rte_ring_sp_enqueue(vpool->ring, mbuf); + + /* Update used index buffer information. */ + vq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf); + vq->used->ring[used_idx].len = 0; + + used_idx = (used_idx + 1) & (vq->size - 1); + } + + LOG_DEBUG(DATA, + "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after " + "clean is: %d\n", + dev->device_fh, rte_mempool_count(vpool->pool)); + LOG_DEBUG(DATA, + "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after " + "clean is : %d\n", + dev->device_fh, rte_ring_count(vpool->ring)); + LOG_DEBUG(DATA, + "(%"PRIu64") in txmbuf_clean_zcp: before updated " + "vq->last_used_idx:%d\n", + dev->device_fh, vq->last_used_idx); + + vq->last_used_idx += mbuf_count; + + LOG_DEBUG(DATA, + "(%"PRIu64") in txmbuf_clean_zcp: after updated " + "vq->last_used_idx:%d\n", + dev->device_fh, vq->last_used_idx); + + rte_compiler_barrier(); + + *(volatile uint16_t *)&vq->used->idx += mbuf_count; + + /* Kick guest if required. */ + if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) + eventfd_write((int)vq->kickfd, 1); + + return 0; +} + +/* + * This function is called when a virtio device is destroy. + * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring. + */ +static void mbuf_destroy_zcp(struct vpool *vpool) +{ + struct rte_mbuf *mbuf = NULL; + uint32_t index, mbuf_count = rte_mempool_count(vpool->pool); + + LOG_DEBUG(CONFIG, + "in mbuf_destroy_zcp: mbuf count in mempool before " + "mbuf_destroy_zcp is: %d\n", + mbuf_count); + LOG_DEBUG(CONFIG, + "in mbuf_destroy_zcp: mbuf count in ring before " + "mbuf_destroy_zcp is : %d\n", + rte_ring_count(vpool->ring)); + + for (index = 0; index < mbuf_count; index++) { + mbuf = __rte_mbuf_raw_alloc(vpool->pool); + if (likely(mbuf != NULL)) { + if (likely(RTE_MBUF_INDIRECT(mbuf))) + pktmbuf_detach_zcp(mbuf); + rte_ring_sp_enqueue(vpool->ring, (void *)mbuf); + } + } + + LOG_DEBUG(CONFIG, + "in mbuf_destroy_zcp: mbuf count in mempool after " + "mbuf_destroy_zcp is: %d\n", + rte_mempool_count(vpool->pool)); + LOG_DEBUG(CONFIG, + "in mbuf_destroy_zcp: mbuf count in ring after " + "mbuf_destroy_zcp is : %d\n", + rte_ring_count(vpool->ring)); +} + +/* + * This function update the use flag and counter. + */ +static inline uint32_t __attribute__((always_inline)) +virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts, + uint32_t count) +{ + struct vhost_virtqueue *vq; + struct vring_desc *desc; + struct rte_mbuf *buff; + /* The virtio_hdr is initialised to 0. */ + struct virtio_net_hdr_mrg_rxbuf virtio_hdr + = {{0, 0, 0, 0, 0, 0}, 0}; + uint64_t buff_hdr_addr = 0; + uint32_t head[MAX_PKT_BURST], packet_len = 0; + uint32_t head_idx, packet_success = 0; + uint16_t res_cur_idx; + + LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh); + + if (count == 0) + return 0; + + vq = dev->virtqueue[VIRTIO_RXQ]; + count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count; + + res_cur_idx = vq->last_used_idx; + LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n", + dev->device_fh, res_cur_idx, res_cur_idx + count); + + /* Retrieve all of the head indexes first to avoid caching issues. */ + for (head_idx = 0; head_idx < count; head_idx++) + head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]); + + /*Prefetch descriptor index. */ + rte_prefetch0(&vq->desc[head[packet_success]]); + + while (packet_success != count) { + /* Get descriptor from available ring */ + desc = &vq->desc[head[packet_success]]; + + buff = pkts[packet_success]; + LOG_DEBUG(DATA, + "(%"PRIu64") in dev_rx_zcp: update the used idx for " + "pkt[%d] descriptor idx: %d\n", + dev->device_fh, packet_success, + MBUF_HEADROOM_UINT32(buff)); + + PRINT_PACKET(dev, + (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr) + + RTE_PKTMBUF_HEADROOM), + rte_pktmbuf_data_len(buff), 0); + + /* Buffer address translation for virtio header. */ + buff_hdr_addr = gpa_to_vva(dev, desc->addr); + packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen; + + /* + * If the descriptors are chained the header and data are + * placed in separate buffers. + */ + if (desc->flags & VRING_DESC_F_NEXT) { + desc->len = vq->vhost_hlen; + desc = &vq->desc[desc->next]; + desc->len = rte_pktmbuf_data_len(buff); + } else { + desc->len = packet_len; + } + + /* Update used ring with desc information */ + vq->used->ring[res_cur_idx & (vq->size - 1)].id + = head[packet_success]; + vq->used->ring[res_cur_idx & (vq->size - 1)].len + = packet_len; + res_cur_idx++; + packet_success++; + + /* A header is required per buffer. */ + rte_memcpy((void *)(uintptr_t)buff_hdr_addr, + (const void *)&virtio_hdr, vq->vhost_hlen); + + PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1); + + if (likely(packet_success < count)) { + /* Prefetch descriptor index. */ + rte_prefetch0(&vq->desc[head[packet_success]]); + } + } + + rte_compiler_barrier(); + + LOG_DEBUG(DATA, + "(%"PRIu64") in dev_rx_zcp: before update used idx: " + "vq.last_used_idx: %d, vq->used->idx: %d\n", + dev->device_fh, vq->last_used_idx, vq->used->idx); + + *(volatile uint16_t *)&vq->used->idx += count; + vq->last_used_idx += count; + + LOG_DEBUG(DATA, + "(%"PRIu64") in dev_rx_zcp: after update used idx: " + "vq.last_used_idx: %d, vq->used->idx: %d\n", + dev->device_fh, vq->last_used_idx, vq->used->idx); + + /* Kick the guest if necessary. */ + if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) + eventfd_write((int)vq->kickfd, 1); + + return count; +} + +/* + * This function routes the TX packet to the correct interface. + * This may be a local device or the physical port. + */ +static inline void __attribute__((always_inline)) +virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m, + uint32_t desc_idx, uint8_t need_copy) +{ + struct mbuf_table *tx_q; + struct rte_mbuf **m_table; + struct rte_mbuf *mbuf = NULL; + unsigned len, ret, offset = 0; + struct vpool *vpool; + struct virtio_net_data_ll *dev_ll = ll_root_used; + struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data; + uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh]; + + /*Add packet to the port tx queue*/ + tx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q]; + len = tx_q->len; + + /* Allocate an mbuf and populate the structure. */ + vpool = &vpool_array[MAX_QUEUES + (uint16_t)dev->vmdq_rx_q]; + rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf); + if (unlikely(mbuf == NULL)) { + struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ]; + RTE_LOG(ERR, DATA, + "(%"PRIu64") Failed to allocate memory for mbuf.\n", + dev->device_fh); + put_desc_to_used_list_zcp(vq, desc_idx); + return; + } + + if (vm2vm_mode == VM2VM_HARDWARE) { + /* Avoid using a vlan tag from any vm for external pkt, such as + * vlan_tags[dev->device_fh], oterwise, it conflicts when pool + * selection, MAC address determines it as an external pkt + * which should go to network, while vlan tag determine it as + * a vm2vm pkt should forward to another vm. Hardware confuse + * such a ambiguous situation, so pkt will lost. + */ + vlan_tag = external_pkt_default_vlan_tag; + while (dev_ll != NULL) { + if (likely(dev_ll->dev->ready == DEVICE_RX) && + ether_addr_cmp(&(pkt_hdr->d_addr), + &dev_ll->dev->mac_address)) { + + /* + * Drop the packet if the TX packet is destined + * for the TX device. + */ + if (unlikely(dev_ll->dev->device_fh + == dev->device_fh)) { + LOG_DEBUG(DATA, + "(%"PRIu64") TX: Source and destination" + "MAC addresses are the same. Dropping " + "packet.\n", + dev_ll->dev->device_fh); + MBUF_HEADROOM_UINT32(mbuf) + = (uint32_t)desc_idx; + __rte_mbuf_raw_free(mbuf); + return; + } + + /* + * Packet length offset 4 bytes for HW vlan + * strip when L2 switch back. + */ + offset = 4; + vlan_tag = + (uint16_t) + vlan_tags[(uint16_t)dev_ll->dev->device_fh]; + + LOG_DEBUG(DATA, + "(%"PRIu64") TX: pkt to local VM device id:" + "(%"PRIu64") vlan tag: %d.\n", + dev->device_fh, dev_ll->dev->device_fh, + vlan_tag); + + break; + } + dev_ll = dev_ll->next; + } + } + + mbuf->pkt.nb_segs = m->pkt.nb_segs; + mbuf->pkt.next = m->pkt.next; + mbuf->pkt.data_len = m->pkt.data_len + offset; + mbuf->pkt.pkt_len = mbuf->pkt.data_len; + if (unlikely(need_copy)) { + /* Copy the packet contents to the mbuf. */ + rte_memcpy((void *)((uint8_t *)mbuf->pkt.data), + (const void *) ((uint8_t *)m->pkt.data), + m->pkt.data_len); + } else { + mbuf->pkt.data = m->pkt.data; + mbuf->buf_physaddr = m->buf_physaddr; + mbuf->buf_addr = m->buf_addr; + } + mbuf->ol_flags = PKT_TX_VLAN_PKT; + mbuf->pkt.vlan_macip.f.vlan_tci = vlan_tag; + mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr); + mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr); + MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx; + + tx_q->m_table[len] = mbuf; + len++; + + LOG_DEBUG(DATA, + "(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n", + dev->device_fh, + mbuf->pkt.nb_segs, + (mbuf->pkt.next == NULL) ? "null" : "non-null"); + + if (enable_stats) { + dev_statistics[dev->device_fh].tx_total++; + dev_statistics[dev->device_fh].tx++; + } + + if (unlikely(len == MAX_PKT_BURST)) { + m_table = (struct rte_mbuf **)tx_q->m_table; + ret = rte_eth_tx_burst(ports[0], + (uint16_t)tx_q->txq_id, m_table, (uint16_t) len); + + /* + * Free any buffers not handled by TX and update + * the port stats. + */ + if (unlikely(ret < len)) { + do { + rte_pktmbuf_free(m_table[ret]); + } while (++ret < len); + } + + len = 0; + txmbuf_clean_zcp(dev, vpool); + } + + tx_q->len = len; + + return; +} + +/* + * This function TX all available packets in virtio TX queue for one + * virtio-net device. If it is first packet, it learns MAC address and + * setup VMDQ. + */ +static inline void __attribute__((always_inline)) +virtio_dev_tx_zcp(struct virtio_net *dev) +{ + struct rte_mbuf m; + struct vhost_virtqueue *vq; + struct vring_desc *desc; + uint64_t buff_addr = 0, phys_addr; + uint32_t head[MAX_PKT_BURST]; + uint32_t i; + uint16_t free_entries, packet_success = 0; + uint16_t avail_idx; + uint8_t need_copy = 0; + hpa_type addr_type; + + vq = dev->virtqueue[VIRTIO_TXQ]; + avail_idx = *((volatile uint16_t *)&vq->avail->idx); + + /* If there are no available buffers then return. */ + if (vq->last_used_idx_res == avail_idx) + return; + + LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh); + + /* Prefetch available ring to retrieve head indexes. */ + rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]); + + /* Get the number of free entries in the ring */ + free_entries = (avail_idx - vq->last_used_idx_res); + + /* Limit to MAX_PKT_BURST. */ + free_entries + = (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries; + + LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n", + dev->device_fh, free_entries); + + /* Retrieve all of the head indexes first to avoid caching issues. */ + for (i = 0; i < free_entries; i++) + head[i] + = vq->avail->ring[(vq->last_used_idx_res + i) + & (vq->size - 1)]; + + vq->last_used_idx_res += free_entries; + + /* Prefetch descriptor index. */ + rte_prefetch0(&vq->desc[head[packet_success]]); + rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]); + + while (packet_success < free_entries) { + desc = &vq->desc[head[packet_success]]; + + /* Discard first buffer as it is the virtio header */ + desc = &vq->desc[desc->next]; + + /* Buffer address translation. */ + buff_addr = gpa_to_vva(dev, desc->addr); + phys_addr = gpa_to_hpa(dev, desc->addr, desc->len, &addr_type); + + if (likely(packet_success < (free_entries - 1))) + /* Prefetch descriptor index. */ + rte_prefetch0(&vq->desc[head[packet_success + 1]]); + + if (unlikely(addr_type == PHYS_ADDR_INVALID)) { + RTE_LOG(ERR, DATA, + "(%"PRIu64") Invalid frame buffer address found" + "when TX packets!\n", + dev->device_fh); + packet_success++; + continue; + } + + /* Prefetch buffer address. */ + rte_prefetch0((void *)(uintptr_t)buff_addr); + + /* + * Setup dummy mbuf. This is copied to a real mbuf if + * transmitted out the physical port. + */ + m.pkt.data_len = desc->len; + m.pkt.nb_segs = 1; + m.pkt.next = NULL; + m.pkt.data = (void *)(uintptr_t)buff_addr; + m.buf_addr = m.pkt.data; + m.buf_physaddr = phys_addr; + + /* + * Check if the frame buffer address from guest crosses + * sub-region or not. + */ + if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) { + RTE_LOG(ERR, DATA, + "(%"PRIu64") Frame buffer address cross " + "sub-regioin found when attaching TX frame " + "buffer address!\n", + dev->device_fh); + need_copy = 1; + } else + need_copy = 0; + + PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0); + + /* + * If this is the first received packet we need to learn + * the MAC and setup VMDQ + */ + if (unlikely(dev->ready == DEVICE_MAC_LEARNING)) { + if (dev->remove || (link_vmdq(dev, &m) == -1)) { + /* + * Discard frame if device is scheduled for + * removal or a duplicate MAC address is found. + */ + packet_success += free_entries; + vq->last_used_idx += packet_success; + break; + } + } + + virtio_tx_route_zcp(dev, &m, head[packet_success], need_copy); + packet_success++; + } +} + +/* + * This function is called by each data core. It handles all RX/TX registered + * with the core. For TX the specific lcore linked list is used. For RX, MAC + * addresses are compared with all devices in the main linked list. + */ +static int +switch_worker_zcp(__attribute__((unused)) void *arg) +{ + struct virtio_net *dev = NULL; + struct rte_mbuf *pkts_burst[MAX_PKT_BURST]; + struct virtio_net_data_ll *dev_ll; + struct mbuf_table *tx_q; + volatile struct lcore_ll_info *lcore_ll; + const uint64_t drain_tsc + = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S + * BURST_TX_DRAIN_US; + uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0; + unsigned ret; + const uint16_t lcore_id = rte_lcore_id(); + uint16_t count_in_ring, rx_count = 0; + + RTE_LOG(INFO, DATA, "Procesing on Core %u started\n", lcore_id); + + lcore_ll = lcore_info[lcore_id].lcore_ll; + prev_tsc = 0; + + while (1) { + cur_tsc = rte_rdtsc(); + + /* TX burst queue drain */ + diff_tsc = cur_tsc - prev_tsc; + if (unlikely(diff_tsc > drain_tsc)) { + /* + * Get mbuf from vpool.pool and detach mbuf and + * put back into vpool.ring. + */ + dev_ll = lcore_ll->ll_root_used; + while ((dev_ll != NULL) && (dev_ll->dev != NULL)) { + /* Get virtio device ID */ + dev = dev_ll->dev; + + if (likely(!dev->remove)) { + tx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q]; + if (tx_q->len) { + LOG_DEBUG(DATA, + "TX queue drained after timeout" + " with burst size %u\n", + tx_q->len); + + /* + * Tx any packets in the queue + */ + ret = rte_eth_tx_burst( + ports[0], + (uint16_t)tx_q->txq_id, + (struct rte_mbuf **) + tx_q->m_table, + (uint16_t)tx_q->len); + if (unlikely(ret < tx_q->len)) { + do { + rte_pktmbuf_free( + tx_q->m_table[ret]); + } while (++ret < tx_q->len); + } + tx_q->len = 0; + + txmbuf_clean_zcp(dev, + &vpool_array[MAX_QUEUES+dev->vmdq_rx_q]); + } + } + dev_ll = dev_ll->next; + } + prev_tsc = cur_tsc; + } + + rte_prefetch0(lcore_ll->ll_root_used); + + /* + * Inform the configuration core that we have exited the linked + * list and that no devices are in use if requested. + */ + if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL) + lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL; + + /* Process devices */ + dev_ll = lcore_ll->ll_root_used; + + while ((dev_ll != NULL) && (dev_ll->dev != NULL)) { + dev = dev_ll->dev; + if (unlikely(dev->remove)) { + dev_ll = dev_ll->next; + unlink_vmdq(dev); + dev->ready = DEVICE_SAFE_REMOVE; + continue; + } + + if (likely(dev->ready == DEVICE_RX)) { + uint32_t index = dev->vmdq_rx_q; + uint16_t i; + count_in_ring + = rte_ring_count(vpool_array[index].ring); + uint16_t free_entries + = (uint16_t)get_available_ring_num_zcp(dev); + + /* + * Attach all mbufs in vpool.ring and put back + * into vpool.pool. + */ + for (i = 0; + i < RTE_MIN(free_entries, + RTE_MIN(count_in_ring, MAX_PKT_BURST)); + i++) + attach_rxmbuf_zcp(dev); + + /* Handle guest RX */ + rx_count = rte_eth_rx_burst(ports[0], + (uint16_t)dev->vmdq_rx_q, pkts_burst, + MAX_PKT_BURST); + + if (rx_count) { + ret_count = virtio_dev_rx_zcp(dev, + pkts_burst, rx_count); + if (enable_stats) { + dev_statistics[dev->device_fh].rx_total + += rx_count; + dev_statistics[dev->device_fh].rx + += ret_count; + } + while (likely(rx_count)) { + rx_count--; + pktmbuf_detach_zcp( + pkts_burst[rx_count]); + rte_ring_sp_enqueue( + vpool_array[index].ring, + (void *)pkts_burst[rx_count]); + } + } + } + + if (likely(!dev->remove)) + /* Handle guest TX */ + virtio_dev_tx_zcp(dev); + + /* Move to the next device in the list */ + dev_ll = dev_ll->next; + } + } + + return 0; +} + + +/* + * Add an entry to a used linked list. A free entry must first be found + * in the free linked list using get_data_ll_free_entry(); */ static void -add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev) +add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, + struct virtio_net_data_ll *ll_dev) { struct virtio_net_data_ll *ll = *ll_root_addr; @@ -1311,14 +2419,16 @@ add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_da } /* - * Remove an entry from a used linked list. The entry must then be added to the free linked list - * using put_data_ll_free_entry(). + * Remove an entry from a used linked list. The entry must then be added to + * the free linked list using put_data_ll_free_entry(). */ static void -rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last) +rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, + struct virtio_net_data_ll *ll_dev, + struct virtio_net_data_ll *ll_dev_last) { struct virtio_net_data_ll *ll = *ll_root_addr; - + if (unlikely((ll == NULL) || (ll_dev == NULL))) return; @@ -1353,7 +2463,8 @@ get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr) * Place an entry back on to the free linked list. */ static void -put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev) +put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, + struct virtio_net_data_ll *ll_dev) { struct virtio_net_data_ll *ll_free = *ll_root_addr; @@ -1465,7 +2576,9 @@ destroy_device (volatile struct virtio_net *dev) } if (ll_lcore_dev_cur == NULL) { - RTE_LOG(ERR, CONFIG, "Failed to find the dev to be destroy.\n"); + RTE_LOG(ERR, CONFIG, + "(%"PRIu64") Failed to find the dev to be destroy.\n", + dev->device_fh); return; } @@ -1509,6 +2622,45 @@ destroy_device (volatile struct virtio_net *dev) lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--; RTE_LOG(INFO, DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh); + + if (zero_copy) { + struct vpool *vpool = &vpool_array[dev->vmdq_rx_q]; + + /* Stop the RX queue. */ + if (rte_eth_dev_rx_queue_stop(ports[0], dev->vmdq_rx_q) != 0) { + LOG_DEBUG(CONFIG, + "(%"PRIu64") In destroy_device: Failed to stop " + "rx queue:%d\n", + dev->device_fh, + dev->vmdq_rx_q); + } + + LOG_DEBUG(CONFIG, + "(%"PRIu64") in destroy_device: Start put mbuf in " + "mempool back to ring for RX queue: %d\n", + dev->device_fh, dev->vmdq_rx_q); + + mbuf_destroy_zcp(vpool); + + /* Stop the TX queue. */ + if (rte_eth_dev_tx_queue_stop(ports[0], dev->vmdq_rx_q) != 0) { + LOG_DEBUG(CONFIG, + "(%"PRIu64") In destroy_device: Failed to " + "stop tx queue:%d\n", + dev->device_fh, dev->vmdq_rx_q); + } + + vpool = &vpool_array[dev->vmdq_rx_q + MAX_QUEUES]; + + LOG_DEBUG(CONFIG, + "(%"PRIu64") destroy_device: Start put mbuf in mempool " + "back to ring for TX queue: %d, dev:(%"PRIu64")\n", + dev->device_fh, (dev->vmdq_rx_q + MAX_QUEUES), + dev->device_fh); + + mbuf_destroy_zcp(vpool); + } + } /* @@ -1532,6 +2684,78 @@ new_device (struct virtio_net *dev) } ll_dev->dev = dev; add_data_ll_entry(&ll_root_used, ll_dev); + ll_dev->dev->vmdq_rx_q + = ll_dev->dev->device_fh * (num_queues / num_devices); + + if (zero_copy) { + uint32_t index = ll_dev->dev->vmdq_rx_q; + uint32_t count_in_ring, i; + struct mbuf_table *tx_q; + + count_in_ring = rte_ring_count(vpool_array[index].ring); + + LOG_DEBUG(CONFIG, + "(%"PRIu64") in new_device: mbuf count in mempool " + "before attach is: %d\n", + dev->device_fh, + rte_mempool_count(vpool_array[index].pool)); + LOG_DEBUG(CONFIG, + "(%"PRIu64") in new_device: mbuf count in ring " + "before attach is : %d\n", + dev->device_fh, count_in_ring); + + /* + * Attach all mbufs in vpool.ring and put back intovpool.pool. + */ + for (i = 0; i < count_in_ring; i++) + attach_rxmbuf_zcp(dev); + + LOG_DEBUG(CONFIG, "(%"PRIu64") in new_device: mbuf count in " + "mempool after attach is: %d\n", + dev->device_fh, + rte_mempool_count(vpool_array[index].pool)); + LOG_DEBUG(CONFIG, "(%"PRIu64") in new_device: mbuf count in " + "ring after attach is : %d\n", + dev->device_fh, + rte_ring_count(vpool_array[index].ring)); + + tx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q]; + tx_q->txq_id = dev->vmdq_rx_q; + + if (rte_eth_dev_tx_queue_start(ports[0], dev->vmdq_rx_q) != 0) { + struct vpool *vpool = &vpool_array[dev->vmdq_rx_q]; + + LOG_DEBUG(CONFIG, + "(%"PRIu64") In new_device: Failed to start " + "tx queue:%d\n", + dev->device_fh, dev->vmdq_rx_q); + + mbuf_destroy_zcp(vpool); + return -1; + } + + if (rte_eth_dev_rx_queue_start(ports[0], dev->vmdq_rx_q) != 0) { + struct vpool *vpool = &vpool_array[dev->vmdq_rx_q]; + + LOG_DEBUG(CONFIG, + "(%"PRIu64") In new_device: Failed to start " + "rx queue:%d\n", + dev->device_fh, dev->vmdq_rx_q); + + /* Stop the TX queue. */ + if (rte_eth_dev_tx_queue_stop(ports[0], + dev->vmdq_rx_q) != 0) { + LOG_DEBUG(CONFIG, + "(%"PRIu64") In new_device: Failed to " + "stop tx queue:%d\n", + dev->device_fh, dev->vmdq_rx_q); + } + + mbuf_destroy_zcp(vpool); + return -1; + } + + } /*reset ready flag*/ dev->ready = DEVICE_MAC_LEARNING; @@ -1549,6 +2773,7 @@ new_device (struct virtio_net *dev) ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free); if (ll_dev == NULL) { RTE_LOG(INFO, DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh); + dev->ready = DEVICE_SAFE_REMOVE; destroy_device(dev); return -1; } @@ -1606,8 +2831,15 @@ print_stats(void) tx_total = dev_statistics[device_fh].tx_total; tx = dev_statistics[device_fh].tx; tx_dropped = tx_total - tx; - rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total); - rx = rte_atomic64_read(&dev_statistics[device_fh].rx); + if (zero_copy == 0) { + rx_total = rte_atomic64_read( + &dev_statistics[device_fh].rx_total_atomic); + rx = rte_atomic64_read( + &dev_statistics[device_fh].rx_atomic); + } else { + rx_total = dev_statistics[device_fh].rx_total; + rx = dev_statistics[device_fh].rx; + } rx_dropped = rx_total - rx; printf("\nStatistics for device %"PRIu32" ------------------------------" @@ -1631,6 +2863,43 @@ print_stats(void) } } +static void +setup_mempool_tbl(int socket, uint32_t index, char *pool_name, + char *ring_name, uint32_t nb_mbuf) +{ + uint16_t roomsize = VIRTIO_DESCRIPTOR_LEN_ZCP + RTE_PKTMBUF_HEADROOM; + vpool_array[index].pool + = rte_mempool_create(pool_name, nb_mbuf, MBUF_SIZE_ZCP, + MBUF_CACHE_SIZE_ZCP, sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, (void *)(uintptr_t)roomsize, + rte_pktmbuf_init, NULL, socket, 0); + if (vpool_array[index].pool != NULL) { + vpool_array[index].ring + = rte_ring_create(ring_name, + rte_align32pow2(nb_mbuf + 1), + socket, RING_F_SP_ENQ | RING_F_SC_DEQ); + if (likely(vpool_array[index].ring != NULL)) { + LOG_DEBUG(CONFIG, + "in setup_mempool_tbl: mbuf count in " + "mempool is: %d\n", + rte_mempool_count(vpool_array[index].pool)); + LOG_DEBUG(CONFIG, + "in setup_mempool_tbl: mbuf count in " + "ring is: %d\n", + rte_ring_count(vpool_array[index].ring)); + } else { + rte_exit(EXIT_FAILURE, "ring_create(%s) failed", + ring_name); + } + + /* Need consider head room. */ + vpool_array[index].buf_size = roomsize - RTE_PKTMBUF_HEADROOM; + } else { + rte_exit(EXIT_FAILURE, "mempool_create(%s) failed", pool_name); + } +} + + /* * Main function, does initialisation and calls the per-lcore functions. The CUSE * device is also registered here to handle the IOCTLs. @@ -1638,11 +2907,11 @@ print_stats(void) int MAIN(int argc, char *argv[]) { - struct rte_mempool *mbuf_pool; + struct rte_mempool *mbuf_pool = NULL; unsigned lcore_id, core_id = 0; unsigned nb_ports, valid_num_ports; int ret; - uint8_t portid; + uint8_t portid, queue_id = 0; static pthread_t tid; /* init EAL */ @@ -1687,16 +2956,71 @@ MAIN(int argc, char *argv[]) return -1; } - /* Create the mbuf pool. */ - mbuf_pool = rte_mempool_create("MBUF_POOL", NUM_MBUFS_PER_PORT * valid_num_ports, - MBUF_SIZE, MBUF_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - rte_socket_id(), 0); - if (mbuf_pool == NULL) - rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); + if (zero_copy == 0) { + /* Create the mbuf pool. */ + mbuf_pool = rte_mempool_create( + "MBUF_POOL", + NUM_MBUFS_PER_PORT + * valid_num_ports, + MBUF_SIZE, MBUF_CACHE_SIZE, + sizeof(struct rte_pktmbuf_pool_private), + rte_pktmbuf_pool_init, NULL, + rte_pktmbuf_init, NULL, + rte_socket_id(), 0); + if (mbuf_pool == NULL) + rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n"); + + for (queue_id = 0; queue_id < MAX_QUEUES + 1; queue_id++) + vpool_array[queue_id].pool = mbuf_pool; + + if (vm2vm_mode == VM2VM_HARDWARE) { + /* Enable VT loop back to let L2 switch to do it. */ + vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1; + LOG_DEBUG(CONFIG, + "Enable loop back for L2 switch in vmdq.\n"); + } + } else { + uint32_t nb_mbuf; + char pool_name[RTE_MEMPOOL_NAMESIZE]; + char ring_name[RTE_MEMPOOL_NAMESIZE]; + + rx_conf_default.start_rx_per_q = (uint8_t)zero_copy; + rx_conf_default.rx_drop_en = 0; + tx_conf_default.start_tx_per_q = (uint8_t)zero_copy; + nb_mbuf = num_rx_descriptor + + num_switching_cores * MBUF_CACHE_SIZE_ZCP + + num_switching_cores * MAX_PKT_BURST; + + for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) { + rte_snprintf(pool_name, sizeof(pool_name), + "rxmbuf_pool_%u", queue_id); + rte_snprintf(ring_name, sizeof(ring_name), + "rxmbuf_ring_%u", queue_id); + setup_mempool_tbl(rte_socket_id(), queue_id, + pool_name, ring_name, nb_mbuf); + } + + nb_mbuf = num_tx_descriptor + + num_switching_cores * MBUF_CACHE_SIZE_ZCP + + num_switching_cores * MAX_PKT_BURST; + + for (queue_id = 0; queue_id < MAX_QUEUES; queue_id++) { + rte_snprintf(pool_name, sizeof(pool_name), + "txmbuf_pool_%u", queue_id); + rte_snprintf(ring_name, sizeof(ring_name), + "txmbuf_ring_%u", queue_id); + setup_mempool_tbl(rte_socket_id(), + (queue_id + MAX_QUEUES), + pool_name, ring_name, nb_mbuf); + } + if (vm2vm_mode == VM2VM_HARDWARE) { + /* Enable VT loop back to let L2 switch to do it. */ + vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1; + LOG_DEBUG(CONFIG, + "Enable loop back for L2 switch in vmdq.\n"); + } + } /* Set log level. */ rte_set_log_level(LOG_LEVEL); @@ -1704,11 +3028,13 @@ MAIN(int argc, char *argv[]) for (portid = 0; portid < nb_ports; portid++) { /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { - RTE_LOG(INFO, PORT, "Skipping disabled port %d\n", portid); + RTE_LOG(INFO, PORT, + "Skipping disabled port %d\n", portid); continue; } - if (port_init(portid, mbuf_pool) != 0) - rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n"); + if (port_init(portid) != 0) + rte_exit(EXIT_FAILURE, + "Cannot initialize network ports\n"); } /* Initialise all linked lists. */ @@ -1723,8 +3049,42 @@ MAIN(int argc, char *argv[]) pthread_create(&tid, NULL, (void*)print_stats, NULL ); /* Launch all data cores. */ - RTE_LCORE_FOREACH_SLAVE(lcore_id) { - rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id); + if (zero_copy == 0) { + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + rte_eal_remote_launch(switch_worker, + mbuf_pool, lcore_id); + } + } else { + uint32_t count_in_mempool, index, i; + for (index = 0; index < 2*MAX_QUEUES; index++) { + /* For all RX and TX queues. */ + count_in_mempool + = rte_mempool_count(vpool_array[index].pool); + + /* + * Transfer all un-attached mbufs from vpool.pool + * to vpoo.ring. + */ + for (i = 0; i < count_in_mempool; i++) { + struct rte_mbuf *mbuf + = __rte_mbuf_raw_alloc( + vpool_array[index].pool); + rte_ring_sp_enqueue(vpool_array[index].ring, + (void *)mbuf); + } + + LOG_DEBUG(CONFIG, + "in MAIN: mbuf count in mempool at initial " + "is: %d\n", count_in_mempool); + LOG_DEBUG(CONFIG, + "in MAIN: mbuf count in ring at initial is :" + " %d\n", + rte_ring_count(vpool_array[index].ring)); + } + + RTE_LCORE_FOREACH_SLAVE(lcore_id) + rte_eal_remote_launch(switch_worker_zcp, NULL, + lcore_id); } /* Register CUSE device to handle IOCTLs. */ diff --git a/examples/vhost/virtio-net.c b/examples/vhost/virtio-net.c index b7b39be0f1..8a3e3332f1 100644 --- a/examples/vhost/virtio-net.c +++ b/examples/vhost/virtio-net.c @@ -46,6 +46,7 @@ #include #include #include +#include #include "main.h" #include "virtio-net.h" @@ -326,7 +327,7 @@ add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev) while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1))) ll_dev = ll_dev->next; - new_ll_dev->dev.device_fh++; + new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1; new_ll_dev->next = ll_dev->next; ll_dev->next = new_ll_dev; } @@ -346,6 +347,8 @@ cleanup_device(struct virtio_net *dev) /* Unmap QEMU memory file if mapped. */ if (dev->mem) { munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size); + if (dev->mem->regions_hpa) + free(dev->mem->regions_hpa); free(dev->mem); } @@ -589,6 +592,154 @@ set_features(struct vhost_device_ctx ctx, uint64_t *pu) return 0; } +/* + * Calculate the region count of physical continous regions for one particular + * region of whose vhost virtual address is continous. The particular region + * start from vva_start, with size of 'size' in argument. + */ +static uint32_t check_hpa_regions(uint64_t vva_start, uint64_t size) +{ + uint32_t i, nregions = 0, page_size = PAGE_SIZE; + uint64_t cur_phys_addr = 0, next_phys_addr = 0; + if (vva_start % page_size) { + LOG_DEBUG(CONFIG, + "in check_countinous: vva start(%p) mod page_size(%d) " + "has remainder\n", + (void *)(uintptr_t)vva_start, page_size); + return 0; + } + if (size % page_size) { + LOG_DEBUG(CONFIG, + "in check_countinous: " + "size((%"PRIu64")) mod page_size(%d) has remainder\n", + size, page_size); + return 0; + } + for (i = 0; i < size - page_size; i = i + page_size) { + cur_phys_addr + = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i)); + next_phys_addr = rte_mem_virt2phy( + (void *)(uintptr_t)(vva_start + i + page_size)); + if ((cur_phys_addr + page_size) != next_phys_addr) { + ++nregions; + LOG_DEBUG(CONFIG, + "in check_continuous: hva addr:(%p) is not " + "continuous with hva addr:(%p), diff:%d\n", + (void *)(uintptr_t)(vva_start + (uint64_t)i), + (void *)(uintptr_t)(vva_start + (uint64_t)i + + page_size), page_size); + LOG_DEBUG(CONFIG, + "in check_continuous: hpa addr:(%p) is not " + "continuous with hpa addr:(%p), " + "diff:(%"PRIu64")\n", + (void *)(uintptr_t)cur_phys_addr, + (void *)(uintptr_t)next_phys_addr, + (next_phys_addr-cur_phys_addr)); + } + } + return nregions; +} + +/* + * Divide each region whose vhost virtual address is continous into a few + * sub-regions, make sure the physical address within each sub-region are + * continous. And fill offset(to GPA) and size etc. information of each + * sub-region into regions_hpa. + */ +static uint32_t fill_hpa_memory_regions(void *memory) +{ + uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = PAGE_SIZE; + uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start; + struct virtio_memory *virtio_memory = (struct virtio_memory *)memory; + struct virtio_memory_regions_hpa *mem_region_hpa + = virtio_memory->regions_hpa; + + if (mem_region_hpa == NULL) + return 0; + + for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) { + vva_start = virtio_memory->regions[regionidx].guest_phys_address + + virtio_memory->regions[regionidx].address_offset; + mem_region_hpa[regionidx_hpa].guest_phys_address + = virtio_memory->regions[regionidx].guest_phys_address; + mem_region_hpa[regionidx_hpa].host_phys_addr_offset = + rte_mem_virt2phy((void *)(uintptr_t)(vva_start)) + - mem_region_hpa[regionidx_hpa].guest_phys_address; + LOG_DEBUG(CONFIG, + "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address)); + LOG_DEBUG(CONFIG, + "in fill_hpa_regions: host phys addr start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].host_phys_addr_offset)); + for (i = 0, k = 0; + i < virtio_memory->regions[regionidx].memory_size + - page_size; + i += page_size) { + cur_phys_addr = rte_mem_virt2phy( + (void *)(uintptr_t)(vva_start + i)); + next_phys_addr = rte_mem_virt2phy( + (void *)(uintptr_t)(vva_start + + i + page_size)); + if ((cur_phys_addr + page_size) != next_phys_addr) { + mem_region_hpa[regionidx_hpa].guest_phys_address_end = + mem_region_hpa[regionidx_hpa].guest_phys_address + + k + page_size; + mem_region_hpa[regionidx_hpa].memory_size + = k + page_size; + LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest " + "phys addr end [%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address_end)); + LOG_DEBUG(CONFIG, + "in fill_hpa_regions: guest phys addr " + "size [%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].memory_size)); + mem_region_hpa[regionidx_hpa + 1].guest_phys_address + = mem_region_hpa[regionidx_hpa].guest_phys_address_end; + ++regionidx_hpa; + mem_region_hpa[regionidx_hpa].host_phys_addr_offset = + next_phys_addr + - mem_region_hpa[regionidx_hpa].guest_phys_address; + LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest" + " phys addr start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address)); + LOG_DEBUG(CONFIG, + "in fill_hpa_regions: host phys addr " + "start[%d]:(%p)\n", + regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].host_phys_addr_offset)); + k = 0; + } else { + k += page_size; + } + } + mem_region_hpa[regionidx_hpa].guest_phys_address_end + = mem_region_hpa[regionidx_hpa].guest_phys_address + + k + page_size; + mem_region_hpa[regionidx_hpa].memory_size = k + page_size; + LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest phys addr end " + "[%d]:(%p)\n", regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].guest_phys_address_end)); + LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest phys addr size " + "[%d]:(%p)\n", regionidx_hpa, + (void *)(uintptr_t) + (mem_region_hpa[regionidx_hpa].memory_size)); + ++regionidx_hpa; + } + return regionidx_hpa; +} + /* * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE * This function creates and populates the memory structure for the device. This includes @@ -681,16 +832,45 @@ set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_ } } mem->nregions = valid_regions; + mem->nregions_hpa = mem->nregions; dev->mem = mem; /* * Calculate the address offset for each region. This offset is used to identify the vhost virtual address * corresponding to a QEMU guest physical address. */ - for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) + for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) { dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address + dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address; + dev->mem->nregions_hpa + += check_hpa_regions( + dev->mem->regions[regionidx].guest_phys_address + + dev->mem->regions[regionidx].address_offset, + dev->mem->regions[regionidx].memory_size); + } + if (dev->mem->regions_hpa != NULL) { + free(dev->mem->regions_hpa); + dev->mem->regions_hpa = NULL; + } + + dev->mem->regions_hpa = (struct virtio_memory_regions_hpa *) calloc(1, + (sizeof(struct virtio_memory_regions_hpa) + * dev->mem->nregions_hpa)); + if (dev->mem->regions_hpa == NULL) { + RTE_LOG(ERR, CONFIG, + "(%"PRIu64") Failed to allocate memory for " + "dev->mem->regions_hpa.\n", dev->device_fh); + return -1; + } + if (fill_hpa_memory_regions( + (void *)dev->mem) != dev->mem->nregions_hpa) { + RTE_LOG(ERR, CONFIG, + "in set_mem_table: hpa memory regions number mismatch: " + "[%d]\n", dev->mem->nregions_hpa); + return -1; + } + return 0; } @@ -918,7 +1098,7 @@ set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file) if (!(dev->flags & VIRTIO_DEV_RUNNING)) { if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) && ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) - notify_ops->new_device(dev); + return notify_ops->new_device(dev); /* Otherwise we remove it. */ } else if (file->fd == VIRTIO_DEV_STOPPED) { diff --git a/examples/vhost/virtio-net.h b/examples/vhost/virtio-net.h index 3e677e7b65..fbc2a1621c 100644 --- a/examples/vhost/virtio-net.h +++ b/examples/vhost/virtio-net.h @@ -40,6 +40,7 @@ /* Backend value set by guest. */ #define VIRTIO_DEV_STOPPED -1 +#define PAGE_SIZE 4096 /* Enum for virtqueue management. */ enum {VIRTIO_RXQ, VIRTIO_TXQ, VIRTIO_QNUM}; @@ -99,6 +100,21 @@ struct virtio_memory_regions { uint64_t address_offset; /* Offset of region for address translation. */ }; +/* + * Information relating to memory regions including offsets to + * addresses in host physical space. + */ +struct virtio_memory_regions_hpa { + /* Base guest physical address of region. */ + uint64_t guest_phys_address; + /* End guest physical address of region. */ + uint64_t guest_phys_address_end; + /* Size of region. */ + uint64_t memory_size; + /* Offset of region for gpa to hpa translation. */ + uint64_t host_phys_addr_offset; +}; + /* * Memory structure includes region and mapping information. */ @@ -107,7 +123,12 @@ struct virtio_memory { uint64_t mapped_address; /* Mapped address of memory file base in our applications memory space. */ uint64_t mapped_size; /* Total size of memory file. */ uint32_t nregions; /* Number of memory regions. */ - struct virtio_memory_regions regions[0]; /* Memory region information. */ + /* Number of memory regions for gpa to hpa translation. */ + uint32_t nregions_hpa; + /* Memory region information for gpa to hpa translation. */ + struct virtio_memory_regions_hpa *regions_hpa; + /* Memory region information. */ + struct virtio_memory_regions regions[0]; }; /* -- 2.20.1