#include <rte_log.h>
#include <rte_ether.h>
#include <rte_rwlock.h>
+#include <rte_malloc.h>
#include "rte_vhost.h"
#include "rte_vdpa.h"
struct zcopy_mbuf {
struct rte_mbuf *mbuf;
uint32_t desc_idx;
+ uint16_t desc_count;
uint16_t in_use;
TAILQ_ENTRY(zcopy_mbuf) next;
unsigned long val;
};
+struct vring_used_elem_packed {
+ uint16_t id;
+ uint32_t len;
+ uint32_t count;
+};
+
/**
* Structure contains variables relevant to RX/TX virtqueues.
*/
struct vhost_virtqueue {
- struct vring_desc *desc;
- struct vring_avail *avail;
- struct vring_used *used;
+ union {
+ struct vring_desc *desc;
+ struct vring_packed_desc *desc_packed;
+ };
+ union {
+ struct vring_avail *avail;
+ struct vring_packed_desc_event *driver_event;
+ };
+ union {
+ struct vring_used *used;
+ struct vring_packed_desc_event *device_event;
+ };
uint32_t size;
uint16_t last_avail_idx;
uint16_t last_used_idx;
/* Last used index we notify to front end. */
uint16_t signalled_used;
+ bool signalled_used_valid;
#define VIRTIO_INVALID_EVENTFD (-1)
#define VIRTIO_UNINITIALIZED_EVENTFD (-2)
/* Physical address of used ring, for logging */
uint64_t log_guest_addr;
+ /* inflight share memory info */
+ union {
+ struct rte_vhost_inflight_info_split *inflight_split;
+ struct rte_vhost_inflight_info_packed *inflight_packed;
+ };
+ struct rte_vhost_resubmit_info *resubmit_inflight;
+ uint64_t global_counter;
+
uint16_t nr_zmbuf;
uint16_t zmbuf_size;
uint16_t last_zmbuf_idx;
struct zcopy_mbuf *zmbufs;
struct zcopy_mbuf_list zmbuf_list;
- struct vring_used_elem *shadow_used_ring;
+ union {
+ struct vring_used_elem *shadow_used_split;
+ struct vring_used_elem_packed *shadow_used_packed;
+ };
uint16_t shadow_used_idx;
struct vhost_vring_addr ring_addrs;
struct batch_copy_elem *batch_copy_elems;
uint16_t batch_copy_nb_elems;
+ bool used_wrap_counter;
+ bool avail_wrap_counter;
struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
uint16_t log_cache_nb_elem;
#define VIRTIO_F_RING_PACKED 34
-#define VRING_DESC_F_NEXT 1
-#define VRING_DESC_F_WRITE 2
-#define VRING_DESC_F_INDIRECT 4
-
-#define VRING_DESC_F_AVAIL (1ULL << 7)
-#define VRING_DESC_F_USED (1ULL << 15)
-
struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t id;
uint16_t flags;
};
+
+struct vring_packed_desc_event {
+ uint16_t off_wrap;
+ uint16_t flags;
+};
#endif
+/*
+ * Declare below packed ring defines unconditionally
+ * as Kernel header might use different names.
+ */
+#define VRING_DESC_F_AVAIL (1ULL << 7)
+#define VRING_DESC_F_USED (1ULL << 15)
+
+#define VRING_EVENT_F_ENABLE 0x0
+#define VRING_EVENT_F_DISABLE 0x1
+#define VRING_EVENT_F_DESC 0x2
+
/*
* Available and used descs are in same order
*/
(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
(1ULL << VIRTIO_NET_F_MTU) | \
(1ULL << VIRTIO_F_IN_ORDER) | \
- (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_F_RING_PACKED))
struct guest_page {
uint64_t size;
};
-/**
- * function prototype for the vhost backend to handler specific vhost user
- * messages prior to the master message handling
- *
- * @param vid
- * vhost device id
- * @param msg
- * Message pointer.
- * @param require_reply
- * If the handler requires sending a reply, this varaible shall be written 1,
- * otherwise 0.
- * @param skip_master
- * If the handler requires skipping the master message handling, this variable
- * shall be written 1, otherwise 0.
- * @return
- * 0 on success, -1 on failure
- */
-typedef int (*vhost_msg_pre_handle)(int vid, void *msg,
- uint32_t *require_reply, uint32_t *skip_master);
-
-/**
- * function prototype for the vhost backend to handler specific vhost user
- * messages after the master message handling is done
- *
- * @param vid
- * vhost device id
- * @param msg
- * Message pointer.
- * @param require_reply
- * If the handler requires sending a reply, this varaible shall be written 1,
- * otherwise 0.
- * @return
- * 0 on success, -1 on failure
- */
-typedef int (*vhost_msg_post_handle)(int vid, void *msg,
- uint32_t *require_reply);
-
-/**
- * pre and post vhost user message handlers
- */
-struct vhost_user_extern_ops {
- vhost_msg_pre_handle pre_msg_handle;
- vhost_msg_post_handle post_msg_handle;
+struct inflight_mem_info {
+ int fd;
+ void *addr;
+ uint64_t size;
};
/**
rte_atomic16_t broadcast_rarp;
uint32_t nr_vring;
int dequeue_zero_copy;
+ int extbuf;
+ int linearbuf;
struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+ struct inflight_mem_info *inflight_info;
#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
char ifname[IF_NAME_SZ];
uint64_t log_size;
uint64_t log_base;
uint64_t log_addr;
- struct ether_addr mac;
+ struct rte_ether_addr mac;
uint16_t mtu;
struct vhost_device_ops const *notify_ops;
int slave_req_fd;
rte_spinlock_t slave_req_lock;
+ int postcopy_ufd;
+ int postcopy_listening;
+
/*
* Device id to identify a specific backend device.
* It's set to -1 for the default software implementation.
*/
int vdpa_dev_id;
- /* private data for virtio device */
+ /* context data for the external message handlers */
void *extern_data;
/* pre and post vhost user message handlers for the device */
- struct vhost_user_extern_ops extern_ops;
+ struct rte_vhost_user_extern_ops extern_ops;
} __rte_cache_aligned;
static __rte_always_inline bool
static inline bool
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL) &&
- wrap_counter != !!(desc->flags & VRING_DESC_F_USED);
-}
+ uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
-#define VHOST_LOG_PAGE 4096
-
-/*
- * Atomically set a bit in memory.
- */
-static __rte_always_inline void
-vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
-{
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
- /*
- * __sync_ built-ins are deprecated, but __atomic_ ones
- * are sub-optimized in older GCC versions.
- */
- __sync_fetch_and_or_1(addr, (1U << nr));
-#else
- __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
-#endif
+ return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
+ wrap_counter != !!(flags & VRING_DESC_F_USED);
}
-static __rte_always_inline void
-vhost_log_page(uint8_t *log_base, uint64_t page)
-{
- vhost_set_bit(page % 8, &log_base[page / 8]);
-}
+void __vhost_log_cache_write(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
+void __vhost_log_cache_sync(struct virtio_net *dev,
+ struct vhost_virtqueue *vq);
+void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
static __rte_always_inline void
vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
{
- uint64_t page;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base || !len))
- return;
-
- if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
- return;
-
- /* To make sure guest memory updates are committed before logging */
- rte_smp_wmb();
-
- page = addr / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < addr + len) {
- vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
- page += 1;
- }
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_write(dev, addr, len);
}
static __rte_always_inline void
vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
- unsigned long *log_base;
- int i;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base))
- return;
-
- log_base = (unsigned long *)(uintptr_t)dev->log_base;
-
- /*
- * It is expected a write memory barrier has been issued
- * before this function is called.
- */
-
- for (i = 0; i < vq->log_cache_nb_elem; i++) {
- struct log_cache_entry *elem = vq->log_cache + i;
-
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
- /*
- * '__sync' builtins are deprecated, but '__atomic' ones
- * are sub-optimized in older GCC versions.
- */
- __sync_fetch_and_or(log_base + elem->offset, elem->val);
-#else
- __atomic_fetch_or(log_base + elem->offset, elem->val,
- __ATOMIC_RELAXED);
-#endif
- }
-
- rte_smp_wmb();
-
- vq->log_cache_nb_elem = 0;
-}
-
-static __rte_always_inline void
-vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
- uint64_t page)
-{
- uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
- uint32_t offset = page / (sizeof(unsigned long) << 3);
- int i;
-
- for (i = 0; i < vq->log_cache_nb_elem; i++) {
- struct log_cache_entry *elem = vq->log_cache + i;
-
- if (elem->offset == offset) {
- elem->val |= (1UL << bit_nr);
- return;
- }
- }
-
- if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
- /*
- * No more room for a new log cache entry,
- * so write the dirty log map directly.
- */
- rte_smp_wmb();
- vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-
- return;
- }
-
- vq->log_cache[i].offset = offset;
- vq->log_cache[i].val = (1UL << bit_nr);
- vq->log_cache_nb_elem++;
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_cache_sync(dev, vq);
}
static __rte_always_inline void
vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t addr, uint64_t len)
{
- uint64_t page;
-
- if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
- !dev->log_base || !len))
- return;
-
- if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
- return;
-
- page = addr / VHOST_LOG_PAGE;
- while (page * VHOST_LOG_PAGE < addr + len) {
- vhost_log_cache_page(dev, vq, page);
- page += 1;
- }
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_cache_write(dev, vq, addr, len);
}
static __rte_always_inline void
vhost_log_write(dev, vq->log_guest_addr + offset, len);
}
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_cache_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_write(dev, iova, len);
+}
+
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
return 0;
}
+static __rte_always_inline uint64_t
+hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
+{
+ struct rte_vhost_mem_region *r;
+ uint32_t i;
+
+ if (unlikely(!dev || !dev->mem))
+ return 0;
+
+ for (i = 0; i < dev->mem->nregions; i++) {
+ r = &dev->mem->regions[i];
+
+ if (vva >= r->host_user_addr &&
+ vva + len < r->host_user_addr + r->size) {
+ return r->guest_phys_addr + vva - r->host_user_addr;
+ }
+ }
+ return 0;
+}
+
static __rte_always_inline struct virtio_net *
get_device(int vid)
{
void vhost_destroy_device_notify(struct virtio_net *dev);
void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
-void free_vq(struct vhost_virtqueue *vq);
+void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
+void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
void vhost_attach_vdpa_device(int vid, int did);
-void vhost_detach_vdpa_device(int vid);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
void vhost_enable_dequeue_zero_copy(int vid);
void vhost_set_builtin_virtio_net(int vid, bool enable);
+void vhost_enable_extbuf(int vid);
+void vhost_enable_linearbuf(int vid);
struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint64_t iova, uint64_t *len, uint8_t perm);
+void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t desc_addr, uint64_t desc_len);
int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
return __vhost_iova_to_vva(dev, vq, iova, len, perm);
}
+#define vhost_avail_event(vr) \
+ (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
#define vhost_used_event(vr) \
(*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
}
static __rte_always_inline void
-vhost_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)
+vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
/* Flush used->idx update before we read avail->flags. */
rte_smp_mb();
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
uint16_t old = vq->signalled_used;
uint16_t new = vq->last_used_idx;
+ bool signalled_used_valid = vq->signalled_used_valid;
+
+ vq->signalled_used = new;
+ vq->signalled_used_valid = true;
VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
__func__,
vhost_used_event(vq),
old, new);
- if (vhost_need_event(vhost_used_event(vq), new, old)
- && (vq->callfd >= 0)) {
- vq->signalled_used = vq->last_used_idx;
+
+ if ((vhost_need_event(vhost_used_event(vq), new, old) &&
+ (vq->callfd >= 0)) ||
+ unlikely(!signalled_used_valid)) {
eventfd_write(vq->callfd, (eventfd_t) 1);
+ if (dev->notify_ops->guest_notified)
+ dev->notify_ops->guest_notified(dev->vid);
}
} else {
/* Kick the guest if necessary. */
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
- && (vq->callfd >= 0))
+ && (vq->callfd >= 0)) {
eventfd_write(vq->callfd, (eventfd_t)1);
+ if (dev->notify_ops->guest_notified)
+ dev->notify_ops->guest_notified(dev->vid);
+ }
+ }
+}
+
+static __rte_always_inline void
+vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ uint16_t old, new, off, off_wrap;
+ bool signalled_used_valid, kick = false;
+
+ /* Flush used desc update. */
+ rte_smp_mb();
+
+ if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
+ if (vq->driver_event->flags !=
+ VRING_EVENT_F_DISABLE)
+ kick = true;
+ goto kick;
+ }
+
+ old = vq->signalled_used;
+ new = vq->last_used_idx;
+ vq->signalled_used = new;
+ signalled_used_valid = vq->signalled_used_valid;
+ vq->signalled_used_valid = true;
+
+ if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
+ if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
+ kick = true;
+ goto kick;
}
+
+ if (unlikely(!signalled_used_valid)) {
+ kick = true;
+ goto kick;
+ }
+
+ rte_smp_rmb();
+
+ off_wrap = vq->driver_event->off_wrap;
+ off = off_wrap & ~(1 << 15);
+
+ if (new <= old)
+ old -= vq->size;
+
+ if (vq->used_wrap_counter != off_wrap >> 15)
+ off -= vq->size;
+
+ if (vhost_need_event(off, new, old))
+ kick = true;
+kick:
+ if (kick) {
+ eventfd_write(vq->callfd, (eventfd_t)1);
+ if (dev->notify_ops->guest_notified)
+ dev->notify_ops->guest_notified(dev->vid);
+ }
+}
+
+static __rte_always_inline void
+free_ind_table(void *idesc)
+{
+ rte_free(idesc);
+}
+
+static __rte_always_inline void
+restore_mbuf(struct rte_mbuf *m)
+{
+ uint32_t mbuf_size, priv_size;
+
+ while (m) {
+ priv_size = rte_pktmbuf_priv_size(m->pool);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ /* start of buffer is after mbuf structure and priv data */
+
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m = m->next;
+ }
+}
+
+static __rte_always_inline bool
+mbuf_is_consumed(struct rte_mbuf *m)
+{
+ while (m) {
+ if (rte_mbuf_refcnt_read(m) > 1)
+ return false;
+ m = m->next;
+ }
+
+ return true;
+}
+
+static __rte_always_inline void
+put_zmbuf(struct zcopy_mbuf *zmbuf)
+{
+ zmbuf->in_use = 0;
}
#endif /* _VHOST_NET_CDEV_H_ */