#include <stdlib.h>
#include <string.h>
#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
+#include <sys/syscall.h>
#include <assert.h>
#ifdef RTE_LIBRTE_VHOST_NUMA
#include <numaif.h>
#endif
+#ifdef RTE_LIBRTE_VHOST_POSTCOPY
+#include <linux/userfaultfd.h>
+#endif
#include <rte_common.h>
#include <rte_malloc.h>
[VHOST_USER_IOTLB_MSG] = "VHOST_USER_IOTLB_MSG",
[VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
[VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
+ [VHOST_USER_POSTCOPY_ADVISE] = "VHOST_USER_POSTCOPY_ADVISE",
+ [VHOST_USER_POSTCOPY_LISTEN] = "VHOST_USER_POSTCOPY_LISTEN",
+ [VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END",
};
-/* The possible results of a message handling function */
-enum vh_result {
- /* Message handling failed */
- VH_RESULT_ERR = -1,
- /* Message handling successful */
- VH_RESULT_OK = 0,
- /* Message handling successful and reply prepared */
- VH_RESULT_REPLY = 1,
-};
+static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
+static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
static uint64_t
get_blk_size(int fd)
return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
}
+/*
+ * Reclaim all the outstanding zmbufs for a virtqueue.
+ */
+static void
+drain_zmbuf_list(struct vhost_virtqueue *vq)
+{
+ struct zcopy_mbuf *zmbuf, *next;
+
+ for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+ zmbuf != NULL; zmbuf = next) {
+ next = TAILQ_NEXT(zmbuf, next);
+
+ while (!mbuf_is_consumed(zmbuf->mbuf))
+ usleep(1000);
+
+ TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
+ rte_pktmbuf_free(zmbuf->mbuf);
+ put_zmbuf(zmbuf);
+ vq->nr_zmbuf -= 1;
+ }
+}
+
static void
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
struct rte_vhost_mem_region *reg;
+ struct vhost_virtqueue *vq;
if (!dev || !dev->mem)
return;
+ if (dev->dequeue_zero_copy) {
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
+ if (vq)
+ drain_zmbuf_list(vq);
+ }
+ }
+
for (i = 0; i < dev->mem->nregions; i++) {
reg = &dev->mem->regions[i];
if (reg->host_user_addr) {
close(dev->slave_req_fd);
dev->slave_req_fd = -1;
}
+
+ if (dev->postcopy_ufd >= 0) {
+ close(dev->postcopy_ufd);
+ dev->postcopy_ufd = -1;
+ }
+
+ dev->postcopy_listening = 0;
}
/*
*/
static int
vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
- struct VhostUserMsg *msg __rte_unused)
+ struct VhostUserMsg *msg __rte_unused,
+ int main_fd __rte_unused)
{
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
vhost_user_reset_owner(struct virtio_net **pdev,
- struct VhostUserMsg *msg __rte_unused)
+ struct VhostUserMsg *msg __rte_unused,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
vhost_destroy_device_notify(dev);
cleanup_device(dev, 0);
reset_device(dev);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
* The features that we support are requested.
*/
static int
-vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint64_t features = 0;
msg->payload.u64 = features;
msg->size = sizeof(msg->payload.u64);
+ msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
/*
* The queue number that we support are requested.
*/
static int
-vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint32_t queue_num = 0;
msg->payload.u64 = (uint64_t)queue_num;
msg->size = sizeof(msg->payload.u64);
+ msg->fd_num = 0;
- return VH_RESULT_REPLY;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
/*
* We receive the negotiated features supported by us and the virtio device.
*/
static int
-vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint64_t features = msg->payload.u64;
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid negotiated features.\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->flags & VIRTIO_DEV_RUNNING) {
if (dev->features == features)
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
/*
* Error out if master tries to change features while device is
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) features changed while device is running.\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->notify_ops->features_changed)
if (vdpa_dev && vdpa_dev->ops->set_features)
vdpa_dev->ops->set_features(dev->vid);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
*/
static int
vhost_user_set_vring_num(struct virtio_net **pdev,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid virtqueue size %u\n", vq->size);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (dev->dequeue_zero_copy) {
if (!vq->shadow_used_packed) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for shadow used ring.\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
} else {
if (!vq->shadow_used_split) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for shadow used ring.\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
}
if (!vq->batch_copy_elems) {
RTE_LOG(ERR, VHOST_CONFIG,
"failed to allocate memory for batching copy.\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
struct rte_vhost_mem_region *r;
uint32_t i;
+ if (unlikely(!dev || !dev->mem))
+ goto out_error;
+
/* Find the region where the address lives. */
for (i = 0; i < dev->mem->nregions; i++) {
r = &dev->mem->regions[i];
r->host_user_addr;
}
}
+out_error:
*len = 0;
return 0;
{
struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
struct vhost_vring_addr *addr = &vq->ring_addrs;
- uint64_t len;
+ uint64_t len, expected_len;
if (vq_is_packed(dev)) {
len = sizeof(struct vring_packed_desc) * vq->size;
addr = &vq->ring_addrs;
len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ len += sizeof(uint16_t);
+ expected_len = len;
vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->avail_user_addr, &len);
- if (vq->avail == 0 ||
- len != sizeof(struct vring_avail) +
- sizeof(uint16_t) * vq->size) {
+ if (vq->avail == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map avail ring.\n",
dev->vid);
len = sizeof(struct vring_used) +
sizeof(struct vring_used_elem) * vq->size;
+ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+ len += sizeof(uint16_t);
+ expected_len = len;
vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
vq, addr->used_user_addr, &len);
- if (vq->used == 0 || len != sizeof(struct vring_used) +
- sizeof(struct vring_used_elem) * vq->size) {
+ if (vq->used == 0 || len != expected_len) {
RTE_LOG(DEBUG, VHOST_CONFIG,
"(%d) failed to map used ring.\n",
dev->vid);
* This function then converts these to our address space.
*/
static int
-vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq;
struct vhost_vring_addr *addr = &msg->payload.addr;
if (dev->mem == NULL)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
/* addr->index refers to the queue index. The txq 1, rxq is 0. */
vq = dev->virtqueue[msg->payload.addr.index];
(1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
dev = translate_ring_addresses(dev, msg->payload.addr.index);
if (!dev)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
*pdev = dev;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
*/
static int
vhost_user_set_vring_base(struct virtio_net **pdev,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
- dev->virtqueue[msg->payload.state.index]->last_used_idx =
- msg->payload.state.num;
- dev->virtqueue[msg->payload.state.index]->last_avail_idx =
- msg->payload.state.num;
+ struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
+ uint64_t val = msg->payload.state.num;
- return VH_RESULT_OK;
+ if (vq_is_packed(dev)) {
+ /*
+ * Bit[0:14]: avail index
+ * Bit[15]: avail wrap counter
+ */
+ vq->last_avail_idx = val & 0x7fff;
+ vq->avail_wrap_counter = !!(val & (0x1 << 15));
+ /*
+ * Set used index to same value as available one, as
+ * their values should be the same since ring processing
+ * was stopped at get time.
+ */
+ vq->last_used_idx = vq->last_avail_idx;
+ vq->used_wrap_counter = vq->avail_wrap_counter;
+ } else {
+ vq->last_used_idx = msg->payload.state.num;
+ vq->last_avail_idx = msg->payload.state.num;
+ }
+
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
uint64_t host_phys_addr, uint64_t size)
{
struct guest_page *page, *last_page;
+ struct guest_page *old_pages;
if (dev->nr_guest_pages == dev->max_guest_pages) {
dev->max_guest_pages *= 2;
+ old_pages = dev->guest_pages;
dev->guest_pages = realloc(dev->guest_pages,
dev->max_guest_pages * sizeof(*page));
if (!dev->guest_pages) {
RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
+ free(old_pages);
return -1;
}
}
}
static int
-vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd)
{
struct virtio_net *dev = *pdev;
- struct VhostUserMemory memory = msg->payload.memory;
+ struct VhostUserMemory *memory = &msg->payload.memory;
struct rte_vhost_mem_region *reg;
void *mmap_addr;
uint64_t mmap_size;
int populate;
int fd;
- if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
+ if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
RTE_LOG(ERR, VHOST_CONFIG,
- "too many memory regions (%u)\n", memory.nregions);
- return VH_RESULT_ERR;
+ "too many memory regions (%u)\n", memory->nregions);
+ return RTE_VHOST_MSG_RESULT_ERR;
}
- if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
+ if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
RTE_LOG(INFO, VHOST_CONFIG,
"(%d) memory regions not changed\n", dev->vid);
- for (i = 0; i < memory.nregions; i++)
+ for (i = 0; i < memory->nregions; i++)
close(msg->fds[i]);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
if (dev->mem) {
"(%d) failed to allocate memory "
"for dev->guest_pages\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
}
dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
- sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
+ sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
- dev->mem->nregions = memory.nregions;
+ dev->mem->nregions = memory->nregions;
- for (i = 0; i < memory.nregions; i++) {
+ for (i = 0; i < memory->nregions; i++) {
fd = msg->fds[i];
reg = &dev->mem->regions[i];
- reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
- reg->guest_user_addr = memory.regions[i].userspace_addr;
- reg->size = memory.regions[i].memory_size;
+ reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
+ reg->guest_user_addr = memory->regions[i].userspace_addr;
+ reg->size = memory->regions[i].memory_size;
reg->fd = fd;
- mmap_offset = memory.regions[i].mmap_offset;
+ mmap_offset = memory->regions[i].mmap_offset;
/* Check for memory_size + mmap_offset overflow */
if (mmap_offset >= -reg->size) {
mmap_size,
alignment,
mmap_offset);
+
+ if (dev->postcopy_listening) {
+ /*
+ * We haven't a better way right now than sharing
+ * DPDK's virtual address with Qemu, so that Qemu can
+ * retrieve the region offset when handling userfaults.
+ */
+ memory->regions[i].userspace_addr =
+ reg->host_user_addr;
+ }
+ }
+ if (dev->postcopy_listening) {
+ /* Send the addresses back to qemu */
+ msg->fd_num = 0;
+ send_vhost_reply(main_fd, msg);
+
+ /* Wait for qemu to acknolwedge it's got the addresses
+ * we've got to wait before we're allowed to generate faults.
+ */
+ VhostUserMsg ack_msg;
+ if (read_vhost_message(main_fd, &ack_msg) <= 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to read qemu ack on postcopy set-mem-table\n");
+ goto err_mmap;
+ }
+ if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Bad qemu ack on postcopy set-mem-table (%d)\n",
+ ack_msg.request.master);
+ goto err_mmap;
+ }
+
+ /* Now userfault register and we can use the memory */
+ for (i = 0; i < memory->nregions; i++) {
+#ifdef RTE_LIBRTE_VHOST_POSTCOPY
+ reg = &dev->mem->regions[i];
+ struct uffdio_register reg_struct;
+
+ /*
+ * Let's register all the mmap'ed area to ensure
+ * alignment on page boundary.
+ */
+ reg_struct.range.start =
+ (uint64_t)(uintptr_t)reg->mmap_addr;
+ reg_struct.range.len = reg->mmap_size;
+ reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
+
+ if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
+ ®_struct)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Failed to register ufd for region %d: (ufd = %d) %s\n",
+ i, dev->postcopy_ufd,
+ strerror(errno));
+ goto err_mmap;
+ }
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "\t userfaultfd registered for range : %llx - %llx\n",
+ reg_struct.range.start,
+ reg_struct.range.start +
+ reg_struct.range.len - 1);
+#else
+ goto err_mmap;
+#endif
+ }
}
for (i = 0; i < dev->nr_vring; i++) {
vring_invalidate(dev, vq);
dev = translate_ring_addresses(dev, i);
- if (!dev)
- return VH_RESULT_ERR;
+ if (!dev) {
+ dev = *pdev;
+ goto err_mmap;
+ }
*pdev = dev;
}
dump_guest_pages(dev);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
err_mmap:
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
static bool
}
static int
-vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
struct vhost_vring_file file;
vq->callfd = file.fd;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
close(msg->fds[0]);
RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
-vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
struct vhost_vring_file file;
/* Interpret ring addresses only when ring is started. */
dev = translate_ring_addresses(dev, file.index);
if (!dev)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
*pdev = dev;
* the ring starts already enabled. Otherwise, it is enabled via
* the SET_VRING_ENABLE message.
*/
- if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
+ if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
vq->enabled = 1;
+ if (dev->notify_ops->vring_state_changed)
+ dev->notify_ops->vring_state_changed(
+ dev->vid, file.index, 1);
+ }
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static void
free_zmbufs(struct vhost_virtqueue *vq)
{
- struct zcopy_mbuf *zmbuf, *next;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- rte_pktmbuf_free(zmbuf->mbuf);
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- }
+ drain_zmbuf_list(vq);
rte_free(vq->zmbufs);
}
*/
static int
vhost_user_get_vring_base(struct virtio_net **pdev,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
+ uint64_t val;
/* We have to stop the queue (virtio) if it is running. */
vhost_destroy_device_notify(dev);
dev->flags &= ~VIRTIO_DEV_READY;
dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
- /* Here we are safe to get the last avail index */
- msg->payload.state.num = vq->last_avail_idx;
+ /* Here we are safe to get the indexes */
+ if (vq_is_packed(dev)) {
+ /*
+ * Bit[0:14]: avail index
+ * Bit[15]: avail wrap counter
+ */
+ val = vq->last_avail_idx & 0x7fff;
+ val |= vq->avail_wrap_counter << 15;
+ msg->payload.state.num = val;
+ } else {
+ msg->payload.state.num = vq->last_avail_idx;
+ }
RTE_LOG(INFO, VHOST_CONFIG,
"vring base idx:%d file:%d\n", msg->payload.state.index,
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->signalled_used_valid = false;
+
if (dev->dequeue_zero_copy)
free_zmbufs(vq);
if (vq_is_packed(dev)) {
vq->batch_copy_elems = NULL;
msg->size = sizeof(msg->payload.state);
+ msg->fd_num = 0;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
/*
*/
static int
vhost_user_set_vring_enable(struct virtio_net **pdev,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
int enable = (int)msg->payload.state.num;
dev->notify_ops->vring_state_changed(dev->vid,
index, enable);
+ /* On disable, rings have to be stopped being processed. */
+ if (!enable && dev->dequeue_zero_copy)
+ drain_zmbuf_list(dev->virtqueue[index]);
+
dev->virtqueue[index]->enabled = enable;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
vhost_user_get_protocol_features(struct virtio_net **pdev,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint64_t features, protocol_features;
msg->payload.u64 = protocol_features;
msg->size = sizeof(msg->payload.u64);
+ msg->fd_num = 0;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
static int
vhost_user_set_protocol_features(struct virtio_net **pdev,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint64_t protocol_features = msg->payload.u64;
- if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) {
+ uint64_t slave_protocol_features = 0;
+
+ rte_vhost_driver_get_protocol_features(dev->ifname,
+ &slave_protocol_features);
+ if (protocol_features & ~slave_protocol_features) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) received invalid protocol features.\n",
dev->vid);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->protocol_features = protocol_features;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
-vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
int fd = msg->fds[0];
if (fd < 0) {
RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
if (msg->size != sizeof(VhostUserLog)) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid log base msg size: %"PRId32" != %d\n",
msg->size, (int)sizeof(VhostUserLog));
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
size = msg->payload.log.mmap_size;
RTE_LOG(ERR, VHOST_CONFIG,
"log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
off, size);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
RTE_LOG(INFO, VHOST_CONFIG,
close(fd);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
/*
dev->log_base = dev->log_addr + off;
dev->log_size = size;
- msg->size = sizeof(msg->payload.u64);
+ /*
+ * The spec is not clear about it (yet), but QEMU doesn't expect
+ * any payload in the reply.
+ */
+ msg->size = 0;
+ msg->fd_num = 0;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
- struct VhostUserMsg *msg)
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
close(msg->fds[0]);
RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
/*
* a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
*/
static int
-vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
uint8_t *mac = (uint8_t *)&msg->payload.u64;
if (vdpa_dev && vdpa_dev->ops->migration_done)
vdpa_dev->ops->migration_done(dev->vid);
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
-vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
if (msg->payload.u64 < VIRTIO_MIN_MTU ||
RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
msg->payload.u64);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->mtu = msg->payload.u64;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
-vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
int fd = msg->fds[0];
RTE_LOG(ERR, VHOST_CONFIG,
"Invalid file descriptor for slave channel (%d)\n",
fd);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
dev->slave_req_fd = fd;
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
}
static int
}
static int
-vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
+vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
{
struct virtio_net *dev = *pdev;
struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
len = imsg->size;
vva = qva_to_vva(dev, imsg->uaddr, &len);
if (!vva)
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
default:
RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
imsg->type);
- return VH_RESULT_ERR;
+ return RTE_VHOST_MSG_RESULT_ERR;
}
- return VH_RESULT_OK;
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_set_postcopy_advise(struct virtio_net **pdev,
+ struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+#ifdef RTE_LIBRTE_VHOST_POSTCOPY
+ struct uffdio_api api_struct;
+
+ dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
+
+ if (dev->postcopy_ufd == -1) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Userfaultfd not available: %s\n",
+ strerror(errno));
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ api_struct.api = UFFD_API;
+ api_struct.features = 0;
+ if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
+ RTE_LOG(ERR, VHOST_CONFIG, "UFFDIO_API ioctl failure: %s\n",
+ strerror(errno));
+ close(dev->postcopy_ufd);
+ dev->postcopy_ufd = -1;
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ msg->fds[0] = dev->postcopy_ufd;
+ msg->fd_num = 1;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
+#else
+ dev->postcopy_ufd = -1;
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_ERR;
+#endif
+}
+
+static int
+vhost_user_set_postcopy_listen(struct virtio_net **pdev,
+ struct VhostUserMsg *msg __rte_unused,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ if (dev->mem && dev->mem->nregions) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Regions already registered at postcopy-listen\n");
+ return RTE_VHOST_MSG_RESULT_ERR;
+ }
+ dev->postcopy_listening = 1;
+
+ return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
+ int main_fd __rte_unused)
+{
+ struct virtio_net *dev = *pdev;
+
+ dev->postcopy_listening = 0;
+ if (dev->postcopy_ufd >= 0) {
+ close(dev->postcopy_ufd);
+ dev->postcopy_ufd = -1;
+ }
+
+ msg->payload.u64 = 0;
+ msg->size = sizeof(msg->payload.u64);
+ msg->fd_num = 0;
+
+ return RTE_VHOST_MSG_RESULT_REPLY;
}
typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
- struct VhostUserMsg *msg);
+ struct VhostUserMsg *msg,
+ int main_fd);
static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
[VHOST_USER_NONE] = NULL,
[VHOST_USER_GET_FEATURES] = vhost_user_get_features,
[VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
[VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
[VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
+ [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
+ [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
+ [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
};
int ret;
ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
- msg->fds, VHOST_MEMORY_MAX_NREGIONS);
+ msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
if (ret <= 0)
return ret;
- if (msg && msg->size) {
+ if (msg->size) {
if (msg->size > sizeof(msg->payload)) {
RTE_LOG(ERR, VHOST_CONFIG,
"invalid msg size: %d\n", msg->size);
}
static int
-send_vhost_message(int sockfd, struct VhostUserMsg *msg, int *fds, int fd_num)
+send_vhost_message(int sockfd, struct VhostUserMsg *msg)
{
if (!msg)
return 0;
return send_fd_message(sockfd, (char *)msg,
- VHOST_USER_HDR_SIZE + msg->size, fds, fd_num);
+ VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
}
static int
msg->flags |= VHOST_USER_VERSION;
msg->flags |= VHOST_USER_REPLY_MASK;
- return send_vhost_message(sockfd, msg, NULL, 0);
+ return send_vhost_message(sockfd, msg);
}
static int
-send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
- int *fds, int fd_num)
+send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
{
int ret;
if (msg->flags & VHOST_USER_NEED_REPLY)
rte_spinlock_lock(&dev->slave_req_lock);
- ret = send_vhost_message(dev->slave_req_fd, msg, fds, fd_num);
+ ret = send_vhost_message(dev->slave_req_fd, msg);
if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
rte_spinlock_unlock(&dev->slave_req_lock);
int did = -1;
int ret;
int unlock_required = 0;
- uint32_t skip_master = 0;
+ bool handled;
int request;
dev = get_device(vid);
}
ret = read_vhost_message(fd, &msg);
- if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
+ if (ret <= 0) {
if (ret < 0)
RTE_LOG(ERR, VHOST_CONFIG,
"vhost read message failed\n");
- else if (ret == 0)
+ else
RTE_LOG(INFO, VHOST_CONFIG,
"vhost peer closed\n");
- else
- RTE_LOG(ERR, VHOST_CONFIG,
- "vhost read incorrect message\n");
return -1;
}
ret = 0;
- if (msg.request.master != VHOST_USER_IOTLB_MSG)
- RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request.master]);
- else
- RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request.master]);
+ request = msg.request.master;
+ if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
+ vhost_message_str[request]) {
+ if (request != VHOST_USER_IOTLB_MSG)
+ RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[request]);
+ else
+ RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
+ vhost_message_str[request]);
+ } else {
+ RTE_LOG(DEBUG, VHOST_CONFIG, "External request %d\n", request);
+ }
ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
if (ret < 0) {
* inactive, so it is safe. Otherwise taking the access_lock
* would cause a dead lock.
*/
- switch (msg.request.master) {
+ switch (request) {
case VHOST_USER_SET_FEATURES:
case VHOST_USER_SET_PROTOCOL_FEATURES:
case VHOST_USER_SET_OWNER:
}
+ handled = false;
if (dev->extern_ops.pre_msg_handle) {
- uint32_t need_reply;
-
ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
- (void *)&msg, &need_reply, &skip_master);
- if (ret < 0)
- goto skip_to_reply;
-
- if (need_reply)
+ (void *)&msg);
+ switch (ret) {
+ case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(fd, &msg);
-
- if (skip_master)
+ /* Fall-through */
+ case RTE_VHOST_MSG_RESULT_ERR:
+ case RTE_VHOST_MSG_RESULT_OK:
+ handled = true;
goto skip_to_post_handle;
+ case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
+ default:
+ break;
+ }
}
- request = msg.request.master;
if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
if (!vhost_message_handlers[request])
goto skip_to_post_handle;
- ret = vhost_message_handlers[request](&dev, &msg);
+ ret = vhost_message_handlers[request](&dev, &msg, fd);
switch (ret) {
- case VH_RESULT_ERR:
+ case RTE_VHOST_MSG_RESULT_ERR:
RTE_LOG(ERR, VHOST_CONFIG,
"Processing %s failed.\n",
vhost_message_str[request]);
+ handled = true;
break;
- case VH_RESULT_OK:
+ case RTE_VHOST_MSG_RESULT_OK:
RTE_LOG(DEBUG, VHOST_CONFIG,
"Processing %s succeeded.\n",
vhost_message_str[request]);
+ handled = true;
break;
- case VH_RESULT_REPLY:
+ case RTE_VHOST_MSG_RESULT_REPLY:
RTE_LOG(DEBUG, VHOST_CONFIG,
"Processing %s succeeded and needs reply.\n",
vhost_message_str[request]);
send_vhost_reply(fd, &msg);
+ handled = true;
+ break;
+ default:
break;
}
- } else {
- RTE_LOG(ERR, VHOST_CONFIG,
- "Requested invalid message type %d.\n", request);
- ret = VH_RESULT_ERR;
}
skip_to_post_handle:
- if (!ret && dev->extern_ops.post_msg_handle) {
- uint32_t need_reply;
-
- ret = (*dev->extern_ops.post_msg_handle)(
- dev->vid, (void *)&msg, &need_reply);
- if (ret < 0)
- goto skip_to_reply;
-
- if (need_reply)
+ if (ret != RTE_VHOST_MSG_RESULT_ERR &&
+ dev->extern_ops.post_msg_handle) {
+ ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
+ (void *)&msg);
+ switch (ret) {
+ case RTE_VHOST_MSG_RESULT_REPLY:
send_vhost_reply(fd, &msg);
+ /* Fall-through */
+ case RTE_VHOST_MSG_RESULT_ERR:
+ case RTE_VHOST_MSG_RESULT_OK:
+ handled = true;
+ case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
+ default:
+ break;
+ }
}
-skip_to_reply:
if (unlock_required)
vhost_user_unlock_all_queue_pairs(dev);
+ /* If message was not handled at this stage, treat it as an error */
+ if (!handled) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "vhost message (req: %d) was not handled.\n", request);
+ ret = RTE_VHOST_MSG_RESULT_ERR;
+ }
+
+ /*
+ * If the request required a reply that was already sent,
+ * this optional reply-ack won't be sent as the
+ * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
+ */
if (msg.flags & VHOST_USER_NEED_REPLY) {
- msg.payload.u64 = !!ret;
+ msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
msg.size = sizeof(msg.payload.u64);
+ msg.fd_num = 0;
send_vhost_reply(fd, &msg);
- } else if (ret) {
+ } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
RTE_LOG(ERR, VHOST_CONFIG,
"vhost message handling failed.\n");
return -1;
vdpa_dev = rte_vdpa_get_device(did);
if (vdpa_dev && virtio_is_ready(dev) &&
!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
- msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
+ msg.request.master == VHOST_USER_SET_VRING_CALL) {
if (vdpa_dev->ops->dev_conf)
vdpa_dev->ops->dev_conf(dev->vid);
dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
- if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "(%d) software relay is used for vDPA, performance may be low.\n",
- dev->vid);
- }
}
return 0;
},
};
- ret = send_vhost_message(dev->slave_req_fd, &msg, NULL, 0);
+ ret = send_vhost_message(dev->slave_req_fd, &msg);
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to send IOTLB miss message (%d)\n",
uint64_t offset,
uint64_t size)
{
- int *fdp = NULL;
- size_t fd_num = 0;
int ret;
struct VhostUserMsg msg = {
.request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
if (fd < 0)
msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
else {
- fdp = &fd;
- fd_num = 1;
+ msg.fds[0] = fd;
+ msg.fd_num = 1;
}
- ret = send_vhost_slave_message(dev, &msg, fdp, fd_num);
+ ret = send_vhost_slave_message(dev, &msg);
if (ret < 0) {
RTE_LOG(ERR, VHOST_CONFIG,
"Failed to set host notifier (%d)\n", ret);
return process_slave_message_reply(dev, &msg);
}
-int vhost_user_host_notifier_ctrl(int vid, bool enable)
+int rte_vhost_host_notifier_ctrl(int vid, bool enable)
{
struct virtio_net *dev;
struct rte_vdpa_device *vdpa_dev;