#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include <string.h>
#include <unistd.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
-#include <unistd.h>
#include <rte_common.h>
#include <rte_log.h>
{
struct orig_region_map *region;
unsigned int idx;
- uint64_t alignment;
if (!dev || !dev->mem)
return;
region = orig_region(dev->mem, dev->mem->nregions);
for (idx = 0; idx < dev->mem->nregions; idx++) {
if (region[idx].mapped_address) {
- alignment = region[idx].blksz;
- munmap((void *)(uintptr_t)
- RTE_ALIGN_FLOOR(
- region[idx].mapped_address, alignment),
- RTE_ALIGN_CEIL(
- region[idx].mapped_size, alignment));
+ munmap((void *)(uintptr_t)region[idx].mapped_address,
+ region[idx].mapped_size);
close(region[idx].fd);
}
}
}
+void
+vhost_backend_cleanup(struct virtio_net *dev)
+{
+ if (dev->mem) {
+ free_mem_region(dev);
+ free(dev->mem);
+ dev->mem = NULL;
+ }
+}
+
int
user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
{
/* This is ugly */
mapped_size = memory.regions[idx].memory_size +
memory.regions[idx].mmap_offset;
+
+ /* mmap() without flag of MAP_ANONYMOUS, should be called
+ * with length argument aligned with hugepagesz at older
+ * longterm version Linux, like 2.6.32 and 3.2.72, or
+ * mmap() will fail with EINVAL.
+ *
+ * to avoid failure, make sure in caller to keep length
+ * aligned.
+ */
+ alignment = get_blk_size(pmsg->fds[idx]);
+ mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment);
+
mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
mapped_size,
PROT_READ | PROT_WRITE, MAP_SHARED,
0);
RTE_LOG(INFO, VHOST_CONFIG,
- "mapped region %d fd:%d to %p sz:0x%"PRIx64" off:0x%"PRIx64"\n",
+ "mapped region %d fd:%d to:%p sz:0x%"PRIx64" "
+ "off:0x%"PRIx64" align:0x%"PRIx64"\n",
idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
- mapped_size, memory.regions[idx].mmap_offset);
+ mapped_size, memory.regions[idx].mmap_offset,
+ alignment);
if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG,
pregion_orig[idx].mapped_address = mapped_address;
pregion_orig[idx].mapped_size = mapped_size;
- pregion_orig[idx].blksz = get_blk_size(pmsg->fds[idx]);
+ pregion_orig[idx].blksz = alignment;
pregion_orig[idx].fd = pmsg->fds[idx];
mapped_address += memory.regions[idx].mmap_offset;
err_mmap:
while (idx--) {
- alignment = pregion_orig[idx].blksz;
- munmap((void *)(uintptr_t)RTE_ALIGN_FLOOR(
- pregion_orig[idx].mapped_address, alignment),
- RTE_ALIGN_CEIL(pregion_orig[idx].mapped_size,
- alignment));
+ munmap((void *)(uintptr_t)pregion_orig[idx].mapped_address,
+ pregion_orig[idx].mapped_size);
close(pregion_orig[idx].fd);
}
free(dev->mem);
return -1;
}
+static int
+vq_is_ready(struct vhost_virtqueue *vq)
+{
+ return vq && vq->desc &&
+ vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
+ vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
+}
+
static int
virtio_is_ready(struct virtio_net *dev)
{
struct vhost_virtqueue *rvq, *tvq;
+ uint32_t i;
- /* mq support in future.*/
- rvq = dev->virtqueue[VIRTIO_RXQ];
- tvq = dev->virtqueue[VIRTIO_TXQ];
- if (rvq && tvq && rvq->desc && tvq->desc &&
- (rvq->kickfd != -1) &&
- (rvq->callfd != -1) &&
- (tvq->kickfd != -1) &&
- (tvq->callfd != -1)) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "virtio is now ready for processing.\n");
- return 1;
+ for (i = 0; i < dev->virt_qp_nb; i++) {
+ rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
+ tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
+
+ if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "virtio is not ready for processing.\n");
+ return 0;
+ }
}
+
RTE_LOG(INFO, VHOST_CONFIG,
- "virtio isn't ready for processing.\n");
- return 0;
+ "virtio is now ready for processing.\n");
+ return 1;
}
void
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
- file.fd = -1;
+ file.fd = VIRTIO_INVALID_EVENTFD;
else
file.fd = pmsg->fds[0];
RTE_LOG(INFO, VHOST_CONFIG,
"vring call idx:%d file:%d\n", file.index, file.fd);
- ops->set_vring_call(ctx, &file);
+ vhost_set_vring_call(ctx, &file);
}
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
- file.fd = -1;
+ file.fd = VIRTIO_INVALID_EVENTFD;
else
file.fd = pmsg->fds[0];
RTE_LOG(INFO, VHOST_CONFIG,
"vring kick idx:%d file:%d\n", file.index, file.fd);
- ops->set_vring_kick(ctx, &file);
+ vhost_set_vring_kick(ctx, &file);
if (virtio_is_ready(dev) &&
!(dev->flags & VIRTIO_DEV_RUNNING))
notify_ops->destroy_device(dev);
/* Here we are safe to get the last used index */
- ops->get_vring_base(ctx, state->index, state);
+ vhost_get_vring_base(ctx, state->index, state);
RTE_LOG(INFO, VHOST_CONFIG,
"vring base idx:%d file:%d\n", state->index, state->num);
* sent and only sent in vhost_vring_stop.
* TODO: cleanup the vring, it isn't usable since here.
*/
- if ((dev->virtqueue[VIRTIO_RXQ]->kickfd) >= 0) {
- close(dev->virtqueue[VIRTIO_RXQ]->kickfd);
- dev->virtqueue[VIRTIO_RXQ]->kickfd = -1;
- }
- if ((dev->virtqueue[VIRTIO_TXQ]->kickfd) >= 0) {
- close(dev->virtqueue[VIRTIO_TXQ]->kickfd);
- dev->virtqueue[VIRTIO_TXQ]->kickfd = -1;
- }
+ if (dev->virtqueue[state->index]->kickfd >= 0)
+ close(dev->virtqueue[state->index]->kickfd);
+
+ dev->virtqueue[state->index]->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
return 0;
}
-void
-user_destroy_device(struct vhost_device_ctx ctx)
+/*
+ * when virtio queues are ready to work, qemu will send us to
+ * enable the virtio queue pair.
+ */
+int
+user_set_vring_enable(struct vhost_device_ctx ctx,
+ struct vhost_vring_state *state)
{
struct virtio_net *dev = get_device(ctx);
+ int enable = (int)state->num;
- if (dev && (dev->flags & VIRTIO_DEV_RUNNING))
- notify_ops->destroy_device(dev);
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "set queue enable: %d to qp idx: %d\n",
+ enable, state->index);
- if (dev && dev->mem) {
- free_mem_region(dev);
- free(dev->mem);
- dev->mem = NULL;
+ if (notify_ops->vring_state_changed) {
+ notify_ops->vring_state_changed(dev, state->index, enable);
}
+
+ dev->virtqueue[state->index]->enabled = enable;
+
+ return 0;
}
void
dev->protocol_features = protocol_features;
}
+
+int
+user_set_log_base(struct vhost_device_ctx ctx,
+ struct VhostUserMsg *msg)
+{
+ struct virtio_net *dev;
+ int fd = msg->fds[0];
+ uint64_t size, off;
+ void *addr;
+
+ dev = get_device(ctx);
+ if (!dev)
+ return -1;
+
+ if (fd < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
+ return -1;
+ }
+
+ if (msg->size != sizeof(VhostUserLog)) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "invalid log base msg size: %"PRId32" != %d\n",
+ msg->size, (int)sizeof(VhostUserLog));
+ return -1;
+ }
+
+ size = msg->payload.log.mmap_size;
+ off = msg->payload.log.mmap_offset;
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "log mmap size: %"PRId64", offset: %"PRId64"\n",
+ size, off);
+
+ /*
+ * mmap from 0 to workaround a hugepage mmap bug: mmap will
+ * fail when offset is not page size aligned.
+ */
+ addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (addr == MAP_FAILED) {
+ RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
+ return -1;
+ }
+
+ /* TODO: unmap on stop */
+ dev->log_base = (uint64_t)(uintptr_t)addr + off;
+ dev->log_size = size;
+
+ return 0;
+}
+
+/*
+ * An rarp packet is constructed and broadcasted to notify switches about
+ * the new location of the migrated VM, so that packets from outside will
+ * not be lost after migration.
+ *
+ * However, we don't actually "send" a rarp packet here, instead, we set
+ * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
+ */
+int
+user_send_rarp(struct vhost_device_ctx ctx, struct VhostUserMsg *msg)
+{
+ struct virtio_net *dev;
+ uint8_t *mac = (uint8_t *)&msg->payload.u64;
+
+ dev = get_device(ctx);
+ if (!dev)
+ return -1;
+
+ RTE_LOG(DEBUG, VHOST_CONFIG,
+ ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
+ memcpy(dev->mac.addr_bytes, mac, 6);
+
+ /*
+ * Set the flag to inject a RARP broadcast packet at
+ * rte_vhost_dequeue_burst().
+ *
+ * rte_smp_wmb() is for making sure the mac is copied
+ * before the flag is set.
+ */
+ rte_smp_wmb();
+ rte_atomic16_set(&dev->broadcast_rarp, 1);
+
+ return 0;
+}