-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2014 Intel Corporation
*/
#ifndef _VHOST_NET_CDEV_H_
#include <sys/queue.h>
#include <unistd.h>
#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <sys/socket.h>
+#include <linux/if.h>
#include <rte_log.h>
+#include <rte_ether.h>
+#include <rte_rwlock.h>
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
/* Used to indicate that the device is running on a data core */
#define VIRTIO_DEV_RUNNING 1
+/* Used to indicate that the device is ready to operate */
+#define VIRTIO_DEV_READY 2
/* Backend value set by guest. */
#define VIRTIO_DEV_STOPPED -1
};
TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
+/*
+ * Structure contains the info for each batched memory copy.
+ */
+struct batch_copy_elem {
+ void *dst;
+ void *src;
+ uint32_t len;
+ uint64_t log_addr;
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
uint32_t size;
uint16_t last_avail_idx;
- volatile uint16_t last_used_idx;
+ uint16_t last_used_idx;
+ /* Last used index we notify to front end. */
+ uint16_t signalled_used;
#define VIRTIO_INVALID_EVENTFD (-1)
#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
/* Backend value to determine if device should started/stopped */
int backend;
+ int enabled;
+ int access_ok;
+ rte_spinlock_t access_lock;
+
/* Used to notify the guest (trigger interrupt) */
int callfd;
/* Currently unused as polling mode is enabled */
int kickfd;
- int enabled;
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
uint16_t last_zmbuf_idx;
struct zcopy_mbuf *zmbufs;
struct zcopy_mbuf_list zmbuf_list;
+
+ struct vring_used_elem *shadow_used_ring;
+ uint16_t shadow_used_idx;
+ struct vhost_vring_addr ring_addrs;
+
+ struct batch_copy_elem *batch_copy_elems;
+ uint16_t batch_copy_nb_elems;
+
+ rte_rwlock_t iotlb_lock;
+ rte_rwlock_t iotlb_pending_lock;
+ struct rte_mempool *iotlb_pool;
+ TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
+ int iotlb_cache_nr;
+ TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
} __rte_cache_aligned;
-/* Old kernels have no such macro defined */
+/* Old kernels have no such macros defined */
#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
#define VIRTIO_NET_F_GUEST_ANNOUNCE 21
#endif
+#ifndef VIRTIO_NET_F_MQ
+ #define VIRTIO_NET_F_MQ 22
+#endif
-/*
- * Make an extra wrapper for VIRTIO_NET_F_MQ and
- * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as they are
- * introduced since kernel v3.8. This makes our
- * code buildable for older kernel.
- */
-#ifdef VIRTIO_NET_F_MQ
- #define VHOST_MAX_QUEUE_PAIRS VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
- #define VHOST_SUPPORTS_MQ (1ULL << VIRTIO_NET_F_MQ)
-#else
- #define VHOST_MAX_QUEUE_PAIRS 1
- #define VHOST_SUPPORTS_MQ 0
+#define VHOST_MAX_VRING 0x100
+#define VHOST_MAX_QUEUE_PAIRS 0x80
+
+#ifndef VIRTIO_NET_F_MTU
+ #define VIRTIO_NET_F_MTU 3
+#endif
+
+#ifndef VIRTIO_F_ANY_LAYOUT
+ #define VIRTIO_F_ANY_LAYOUT 27
+#endif
+
+/* Declare IOMMU related bits for older kernels */
+#ifndef VIRTIO_F_IOMMU_PLATFORM
+
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+struct vhost_iotlb_msg {
+ __u64 iova;
+ __u64 size;
+ __u64 uaddr;
+#define VHOST_ACCESS_RO 0x1
+#define VHOST_ACCESS_WO 0x2
+#define VHOST_ACCESS_RW 0x3
+ __u8 perm;
+#define VHOST_IOTLB_MISS 1
+#define VHOST_IOTLB_UPDATE 2
+#define VHOST_IOTLB_INVALIDATE 3
+#define VHOST_IOTLB_ACCESS_FAIL 4
+ __u8 type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+
+struct vhost_msg {
+ int type;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
#endif
/*
#define VIRTIO_F_VERSION_1 32
#endif
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+/* Features supported by this builtin vhost-user net driver. */
+#define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
+ (1ULL << VIRTIO_F_ANY_LAYOUT) | \
+ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+ (1ULL << VIRTIO_NET_F_CTRL_RX) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
+ (1ULL << VIRTIO_NET_F_MQ) | \
+ (1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VHOST_F_LOG_ALL) | \
+ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
+ (1ULL << VIRTIO_NET_F_GSO) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_HOST_UFO) | \
+ (1ULL << VIRTIO_NET_F_HOST_ECN) | \
+ (1ULL << VIRTIO_NET_F_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+ (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
+ (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
+ (1ULL << VIRTIO_NET_F_MTU) | \
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+
+
struct guest_page {
uint64_t guest_phys_addr;
uint64_t host_phys_addr;
*/
struct virtio_net {
/* Frontend (QEMU) memory and memory region information */
- struct virtio_memory *mem;
+ struct rte_vhost_memory *mem;
uint64_t features;
uint64_t protocol_features;
int vid;
uint16_t vhost_hlen;
/* to tell if we need broadcast rarp packet */
rte_atomic16_t broadcast_rarp;
- uint32_t virt_qp_nb;
+ uint32_t nr_vring;
int dequeue_zero_copy;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
uint64_t log_base;
uint64_t log_addr;
struct ether_addr mac;
+ uint16_t mtu;
+
+ struct vhost_device_ops const *notify_ops;
uint32_t nr_guest_pages;
uint32_t max_guest_pages;
struct guest_page *guest_pages;
+
+ int slave_req_fd;
} __rte_cache_aligned;
-/**
- * Information relating to memory regions including offsets to
- * addresses in QEMUs memory file.
- */
-struct virtio_memory_region {
- uint64_t guest_phys_addr;
- uint64_t guest_user_addr;
- uint64_t host_user_addr;
- uint64_t size;
- void *mmap_addr;
- uint64_t mmap_size;
- int fd;
-};
+#define VHOST_LOG_PAGE 4096
-/**
- * Memory structure includes region and mapping information.
+/*
+ * Atomically set a bit in memory.
*/
-struct virtio_memory {
- uint32_t nregions;
- struct virtio_memory_region regions[0];
-};
+static __rte_always_inline void
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+ __sync_fetch_and_or_8(addr, (1U << nr));
+}
+static __rte_always_inline void
+vhost_log_page(uint8_t *log_base, uint64_t page)
+{
+ vhost_set_bit(page % 8, &log_base[page / 8]);
+}
+
+static __rte_always_inline void
+vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+{
+ uint64_t page;
+
+ if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+ !dev->log_base || !len))
+ return;
+
+ if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+ return;
+
+ /* To make sure guest memory updates are committed before logging */
+ rte_smp_wmb();
+
+ page = addr / VHOST_LOG_PAGE;
+ while (page * VHOST_LOG_PAGE < addr + len) {
+ vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+ page += 1;
+ }
+}
+
+static __rte_always_inline void
+vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t offset, uint64_t len)
+{
+ vhost_log_write(dev, vq->log_guest_addr + offset, len);
+}
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define MAX_VHOST_DEVICE 1024
extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
-/* Convert guest physical Address to host virtual address */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t gpa)
-{
- struct virtio_memory_region *reg;
- uint32_t i;
-
- for (i = 0; i < dev->mem->nregions; i++) {
- reg = &dev->mem->regions[i];
- if (gpa >= reg->guest_phys_addr &&
- gpa < reg->guest_phys_addr + reg->size) {
- return gpa - reg->guest_phys_addr +
- reg->host_user_addr;
- }
- }
-
- return 0;
-}
-
/* Convert guest physical address to host physical address */
-static inline phys_addr_t __attribute__((always_inline))
+static __rte_always_inline rte_iova_t
gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
{
uint32_t i;
return 0;
}
-struct virtio_net_device_ops const *notify_ops;
struct virtio_net *get_device(int vid);
int vhost_new_device(void);
void reset_device(struct virtio_net *dev);
void vhost_destroy_device(int);
-int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx);
+void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
+void free_vq(struct vhost_virtqueue *vq);
+
+int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
+void vhost_enable_dequeue_zero_copy(int vid);
+
+struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
/*
- * Backend-specific cleanup. Defined by vhost-cuse and vhost-user.
+ * Backend-specific cleanup.
+ *
+ * TODO: fix it; we have one backend now
*/
void vhost_backend_cleanup(struct virtio_net *dev);
+uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm);
+int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+
+static __rte_always_inline uint64_t
+vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t size, uint8_t perm)
+{
+ if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ return rte_vhost_gpa_to_vva(dev->mem, iova);
+
+ return __vhost_iova_to_vva(dev, vq, iova, size, perm);
+}
+
+#define vhost_used_event(vr) \
+ (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static __rte_always_inline int
+vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+ return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
+static __rte_always_inline void
+vhost_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ /* Flush used->idx update before we read avail->flags. */
+ rte_mb();
+
+ /* Don't kick guest if we don't reach index specified by guest. */
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
+ uint16_t old = vq->signalled_used;
+ uint16_t new = vq->last_used_idx;
+
+ LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
+ __func__,
+ vhost_used_event(vq),
+ old, new);
+ if (vhost_need_event(vhost_used_event(vq), new, old)
+ && (vq->callfd >= 0)) {
+ vq->signalled_used = vq->last_used_idx;
+ eventfd_write(vq->callfd, (eventfd_t) 1);
+ }
+ } else {
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+ && (vq->callfd >= 0))
+ eventfd_write(vq->callfd, (eventfd_t)1);
+ }
+}
+
#endif /* _VHOST_NET_CDEV_H_ */