#include <stdint.h>
#include <stdio.h>
#include <sys/types.h>
+#include <sys/queue.h>
#include <unistd.h>
#include <linux/vhost.h>
uint32_t desc_idx;
};
+/*
+ * A structure to hold some fields needed in zero copy code path,
+ * mainly for associating an mbuf with the right desc_idx.
+ */
+struct zcopy_mbuf {
+ struct rte_mbuf *mbuf;
+ uint32_t desc_idx;
+ uint16_t in_use;
+
+ TAILQ_ENTRY(zcopy_mbuf) next;
+};
+TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
struct vring_used *used;
uint32_t size;
- /* Last index used on the available ring */
- volatile uint16_t last_used_idx;
+ uint16_t last_avail_idx;
+ uint16_t last_used_idx;
#define VIRTIO_INVALID_EVENTFD (-1)
#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
+
+ uint16_t nr_zmbuf;
+ uint16_t zmbuf_size;
+ uint16_t last_zmbuf_idx;
+ struct zcopy_mbuf *zmbufs;
+ struct zcopy_mbuf_list zmbuf_list;
} __rte_cache_aligned;
/* Old kernels have no such macro defined */
#define VIRTIO_F_VERSION_1 32
#endif
+struct guest_page {
+ uint64_t guest_phys_addr;
+ uint64_t host_phys_addr;
+ uint64_t size;
+};
+
/**
* Device structure contains all configuration information relating
* to the device.
/* to tell if we need broadcast rarp packet */
rte_atomic16_t broadcast_rarp;
uint32_t virt_qp_nb;
+ int dequeue_zero_copy;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
char ifname[IF_NAME_SZ];
uint64_t log_addr;
struct ether_addr mac;
+ uint32_t nr_guest_pages;
+ uint32_t max_guest_pages;
+ struct guest_page *guest_pages;
} __rte_cache_aligned;
/**
* Information relating to memory regions including offsets to
* addresses in QEMUs memory file.
*/
-struct virtio_memory_regions {
- uint64_t guest_phys_address;
- uint64_t guest_phys_address_end;
- uint64_t memory_size;
- uint64_t userspace_address;
- uint64_t address_offset;
+struct virtio_memory_region {
+ uint64_t guest_phys_addr;
+ uint64_t guest_user_addr;
+ uint64_t host_user_addr;
+ uint64_t size;
+ void *mmap_addr;
+ uint64_t mmap_size;
+ int fd;
};
* Memory structure includes region and mapping information.
*/
struct virtio_memory {
- /* Base QEMU userspace address of the memory file. */
- uint64_t base_address;
- uint64_t mapped_address;
- uint64_t mapped_size;
uint32_t nregions;
- struct virtio_memory_regions regions[0];
+ struct virtio_memory_region regions[0];
};
#define MAX_VHOST_DEVICE 1024
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
-/**
- * Function to convert guest physical addresses to vhost virtual addresses.
- * This is used to convert guest virtio buffer addresses.
- */
+/* Convert guest physical Address to host virtual address */
static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
+gpa_to_vva(struct virtio_net *dev, uint64_t gpa)
+{
+ struct virtio_memory_region *reg;
+ uint32_t i;
+
+ for (i = 0; i < dev->mem->nregions; i++) {
+ reg = &dev->mem->regions[i];
+ if (gpa >= reg->guest_phys_addr &&
+ gpa < reg->guest_phys_addr + reg->size) {
+ return gpa - reg->guest_phys_addr +
+ reg->host_user_addr;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert guest physical address to host physical address */
+static inline phys_addr_t __attribute__((always_inline))
+gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
- struct virtio_memory_regions *region;
- uint32_t regionidx;
- uint64_t vhost_va = 0;
-
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- region = &dev->mem->regions[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_va = region->address_offset + guest_pa;
- break;
+ uint32_t i;
+ struct guest_page *page;
+
+ for (i = 0; i < dev->nr_guest_pages; i++) {
+ page = &dev->guest_pages[i];
+
+ if (gpa >= page->guest_phys_addr &&
+ gpa + size < page->guest_phys_addr + page->size) {
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
}
}
- return vhost_va;
+
+ return 0;
}
struct virtio_net_device_ops const *notify_ops;
int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
+void vhost_enable_dequeue_zero_copy(int vid);
/*
* Backend-specific cleanup. Defined by vhost-cuse and vhost-user.