X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_user.c;h=4b4b7570bf2804564042d4df8a4e47fcd78adb2e;hb=06fc1159772de295ae2cb8d11f98ea6313288cc9;hp=1f6cba4b942572d7a997cb0f549487d5c31e46b7;hpb=5cd690e4fda97bc4465195ef2f500fffde63fb64;p=dpdk.git diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 1f6cba4b94..4b4b7570bf 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -1,34 +1,22 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2016 Intel Corporation + */ + +/* Security model + * -------------- + * The vhost-user protocol connection is an external interface, so it must be + * robust against invalid inputs. * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: + * This is important because the vhost-user master is only one step removed + * from the guest. Malicious guests that have escaped will then launch further + * attacks from the vhost-user master. * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. + * Even in deployments where guests are trusted, a bug in the vhost-user master + * can still cause invalid messages to be sent. Such messages must not + * compromise the stability of the DPDK application by causing crashes, memory + * corruption, or other problematic behavior. * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * Do not assume received VhostUserMsg fields contain sensible values! */ #include @@ -183,7 +171,22 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features) return -1; } - if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) { + if (dev->flags & VIRTIO_DEV_RUNNING) { + if (dev->features == features) + return 0; + + /* + * Error out if master tries to change features while device is + * in running state. The exception being VHOST_F_LOG_ALL, which + * is enabled when the live-migration starts. + */ + if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) { + RTE_LOG(ERR, VHOST_CONFIG, + "(%d) features changed while device is running.\n", + dev->vid); + return -1; + } + if (dev->notify_ops->features_changed) dev->notify_ops->features_changed(dev->vid, features); } @@ -195,12 +198,32 @@ vhost_user_set_features(struct virtio_net *dev, uint64_t features) } else { dev->vhost_hlen = sizeof(struct virtio_net_hdr); } - LOG_DEBUG(VHOST_CONFIG, + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mergeable RX buffers %s, virtio 1 %s\n", dev->vid, (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off", (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off"); + if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) && + !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) { + /* + * Remove all but first queue pair if MQ hasn't been + * negotiated. This is safe because the device is not + * running at this stage. + */ + while (dev->nr_vring > 2) { + struct vhost_virtqueue *vq; + + vq = dev->virtqueue[--dev->nr_vring]; + if (!vq) + continue; + + dev->virtqueue[dev->nr_vring] = NULL; + cleanup_vq(vq, 1); + free_vq(vq); + } + } + return 0; } @@ -215,6 +238,17 @@ vhost_user_set_vring_num(struct virtio_net *dev, vq->size = msg->payload.state.num; + /* VIRTIO 1.0, 2.4 Virtqueues says: + * + * Queue Size value is always a power of 2. The maximum Queue Size + * value is 32768. + */ + if ((vq->size & (vq->size - 1)) || vq->size > 32768) { + RTE_LOG(ERR, VHOST_CONFIG, + "invalid virtqueue size %u\n", vq->size); + return -1; + } + if (dev->dequeue_zero_copy) { vq->nr_zmbuf = 0; vq->last_zmbuf_idx = 0; @@ -227,6 +261,7 @@ vhost_user_set_vring_num(struct virtio_net *dev, "zero copy is force disabled\n"); dev->dequeue_zero_copy = 0; } + TAILQ_INIT(&vq->zmbuf_list); } vq->shadow_used_ring = rte_malloc(NULL, @@ -261,6 +296,9 @@ numa_realloc(struct virtio_net *dev, int index) int oldnode, newnode; struct virtio_net *old_dev; struct vhost_virtqueue *old_vq, *vq; + struct zcopy_mbuf *new_zmbuf; + struct vring_used_elem *new_shadow_used_ring; + struct batch_copy_elem *new_batch_copy_elems; int ret; old_dev = dev; @@ -285,6 +323,33 @@ numa_realloc(struct virtio_net *dev, int index) return dev; memcpy(vq, old_vq, sizeof(*vq)); + TAILQ_INIT(&vq->zmbuf_list); + + new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size * + sizeof(struct zcopy_mbuf), 0, newnode); + if (new_zmbuf) { + rte_free(vq->zmbufs); + vq->zmbufs = new_zmbuf; + } + + new_shadow_used_ring = rte_malloc_socket(NULL, + vq->size * sizeof(struct vring_used_elem), + RTE_CACHE_LINE_SIZE, + newnode); + if (new_shadow_used_ring) { + rte_free(vq->shadow_used_ring); + vq->shadow_used_ring = new_shadow_used_ring; + } + + new_batch_copy_elems = rte_malloc_socket(NULL, + vq->size * sizeof(struct batch_copy_elem), + RTE_CACHE_LINE_SIZE, + newnode); + if (new_batch_copy_elems) { + rte_free(vq->batch_copy_elems); + vq->batch_copy_elems = new_batch_copy_elems; + } + rte_free(old_vq); } @@ -424,13 +489,13 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index) vq->log_guest_addr = addr->log_guest_addr; - LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n", dev->vid, vq->desc); - LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n", dev->vid, vq->avail); - LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n", dev->vid, vq->used); - LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n", + VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n", dev->vid, vq->log_guest_addr); return dev; @@ -463,7 +528,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg) if (vq->enabled && (dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { - dev = translate_ring_addresses(dev, msg->payload.state.index); + dev = translate_ring_addresses(dev, msg->payload.addr.index); if (!dev) return -1; @@ -488,7 +553,7 @@ vhost_user_set_vring_base(struct virtio_net *dev, return 0; } -static void +static int add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, uint64_t host_phys_addr, uint64_t size) { @@ -498,6 +563,10 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, dev->max_guest_pages *= 2; dev->guest_pages = realloc(dev->guest_pages, dev->max_guest_pages * sizeof(*page)); + if (!dev->guest_pages) { + RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n"); + return -1; + } } if (dev->nr_guest_pages > 0) { @@ -506,7 +575,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, if (host_phys_addr == last_page->host_phys_addr + last_page->size) { last_page->size += size; - return; + return 0; } } @@ -514,9 +583,11 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, page->guest_phys_addr = guest_phys_addr; page->host_phys_addr = host_phys_addr; page->size = size; + + return 0; } -static void +static int add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { @@ -526,25 +597,31 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t host_phys_addr; uint64_t size; - host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr); + host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr); size = page_size - (guest_phys_addr & (page_size - 1)); size = RTE_MIN(size, reg_size); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); + if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0) + return -1; + host_user_addr += size; guest_phys_addr += size; reg_size -= size; while (reg_size > 0) { size = RTE_MIN(reg_size, page_size); - host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t) + host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t) host_user_addr); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); + if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, + size) < 0) + return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; } + + return 0; } #ifdef RTE_LIBRTE_VHOST_DEBUG @@ -573,6 +650,30 @@ dump_guest_pages(struct virtio_net *dev) #define dump_guest_pages(dev) #endif +static bool +vhost_memory_changed(struct VhostUserMemory *new, + struct rte_vhost_memory *old) +{ + uint32_t i; + + if (new->nregions != old->nregions) + return true; + + for (i = 0; i < new->nregions; ++i) { + VhostUserMemoryRegion *new_r = &new->regions[i]; + struct rte_vhost_mem_region *old_r = &old->regions[i]; + + if (new_r->guest_phys_addr != old_r->guest_phys_addr) + return true; + if (new_r->memory_size != old_r->size) + return true; + if (new_r->userspace_addr != old_r->guest_user_addr) + return true; + } + + return false; +} + static int vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) { @@ -585,6 +686,22 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) uint32_t i; int fd; + if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) { + RTE_LOG(ERR, VHOST_CONFIG, + "too many memory regions (%u)\n", memory.nregions); + return -1; + } + + if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) { + RTE_LOG(INFO, VHOST_CONFIG, + "(%d) memory regions not changed\n", dev->vid); + + for (i = 0; i < memory.nregions; i++) + close(pmsg->fds[i]); + + return 0; + } + if (dev->mem) { free_mem_region(dev); rte_free(dev->mem); @@ -625,7 +742,17 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) reg->fd = fd; mmap_offset = memory.regions[i].mmap_offset; - mmap_size = reg->size + mmap_offset; + + /* Check for memory_size + mmap_offset overflow */ + if (mmap_offset >= -reg->size) { + RTE_LOG(ERR, VHOST_CONFIG, + "mmap_offset (%#"PRIx64") and memory_size " + "(%#"PRIx64") overflow\n", + mmap_offset, reg->size); + goto err_mmap; + } + + mmap_size = reg->size + mmap_offset; /* mmap() without flag of MAP_ANONYMOUS, should be called * with length argument aligned with hugepagesz at older @@ -658,7 +785,12 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) mmap_offset; if (dev->dequeue_zero_copy) - add_guest_pages(dev, reg, alignment); + if (add_guest_pages(dev, reg, alignment) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "adding guest pages to region %u failed.\n", + i); + goto err_mmap; + } RTE_LOG(INFO, VHOST_CONFIG, "guest memory region %u, size: 0x%" PRIx64 "\n" @@ -755,15 +887,12 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg) RTE_LOG(INFO, VHOST_CONFIG, "vring kick idx:%d file:%d\n", file.index, file.fd); - /* - * Interpret ring addresses only when ring is started and enabled. - * This is now if protocol features aren't supported. - */ - if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { - *pdev = dev = translate_ring_addresses(dev, file.index); - if (!dev) - return; - } + /* Interpret ring addresses only when ring is started. */ + dev = translate_ring_addresses(dev, file.index); + if (!dev) + return; + + *pdev = dev; vq = dev->virtqueue[file.index]; @@ -829,6 +958,11 @@ vhost_user_get_vring_base(struct virtio_net *dev, vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; + if (vq->callfd >= 0) + close(vq->callfd); + + vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; + if (dev->dequeue_zero_copy) free_zmbufs(vq); rte_free(vq->shadow_used_ring); @@ -845,29 +979,15 @@ vhost_user_get_vring_base(struct virtio_net *dev, * enable the virtio queue pair. */ static int -vhost_user_set_vring_enable(struct virtio_net **pdev, +vhost_user_set_vring_enable(struct virtio_net *dev, VhostUserMsg *msg) { - struct virtio_net *dev = *pdev; int enable = (int)msg->payload.state.num; RTE_LOG(INFO, VHOST_CONFIG, "set queue enable: %d to qp idx: %d\n", enable, msg->payload.state.index); - /* - * Interpret ring addresses only when ring is started and enabled. - * This is now if protocol features are supported. - */ - if (enable && (dev->features & - (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { - dev = translate_ring_addresses(dev, msg->payload.state.index); - if (!dev) - return -1; - - *pdev = dev; - } - if (dev->notify_ops->vring_state_changed) dev->notify_ops->vring_state_changed(dev->vid, msg->payload.state.index, enable); @@ -877,6 +997,27 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, return 0; } +static void +vhost_user_get_protocol_features(struct virtio_net *dev, + struct VhostUserMsg *msg) +{ + uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES; + + rte_vhost_driver_get_features(dev->ifname, &features); + + /* + * REPLY_ACK protocol feature is only mandatory for now + * for IOMMU feature. If IOMMU is explicitly disabled by the + * application, disable also REPLY_ACK feature for older buggy + * Qemu versions (from v2.7.0 to v2.9.0). + */ + if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) + protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK); + + msg->payload.u64 = protocol_features; + msg->size = sizeof(msg->payload.u64); +} + static void vhost_user_set_protocol_features(struct virtio_net *dev, uint64_t protocol_features) @@ -908,6 +1049,15 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg) size = msg->payload.log.mmap_size; off = msg->payload.log.mmap_offset; + + /* Don't allow mmap_offset to point outside the mmap region */ + if (off > size) { + RTE_LOG(ERR, VHOST_CONFIG, + "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n", + off, size); + return -1; + } + RTE_LOG(INFO, VHOST_CONFIG, "log mmap size: %"PRId64", offset: %"PRId64"\n", size, off); @@ -916,7 +1066,7 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg) * mmap from 0 to workaround a hugepage mmap bug: mmap will * fail when offset is not page size aligned. */ - addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); if (addr == MAP_FAILED) { RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n"); @@ -1186,12 +1336,47 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg) return alloc_vring_queue(dev, vring_idx); } +static void +vhost_user_lock_all_queue_pairs(struct virtio_net *dev) +{ + unsigned int i = 0; + unsigned int vq_num = 0; + + while (vq_num < dev->nr_vring) { + struct vhost_virtqueue *vq = dev->virtqueue[i]; + + if (vq) { + rte_spinlock_lock(&vq->access_lock); + vq_num++; + } + i++; + } +} + +static void +vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) +{ + unsigned int i = 0; + unsigned int vq_num = 0; + + while (vq_num < dev->nr_vring) { + struct vhost_virtqueue *vq = dev->virtqueue[i]; + + if (vq) { + rte_spinlock_unlock(&vq->access_lock); + vq_num++; + } + i++; + } +} + int vhost_user_msg_handler(int vid, int fd) { struct virtio_net *dev; struct VhostUserMsg msg; int ret; + int unlock_required = 0; dev = get_device(vid); if (dev == NULL) @@ -1237,6 +1422,38 @@ vhost_user_msg_handler(int vid, int fd) return -1; } + /* + * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE + * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops + * and device is destroyed. destroy_device waits for queues to be + * inactive, so it is safe. Otherwise taking the access_lock + * would cause a dead lock. + */ + switch (msg.request.master) { + case VHOST_USER_SET_FEATURES: + case VHOST_USER_SET_PROTOCOL_FEATURES: + case VHOST_USER_SET_OWNER: + case VHOST_USER_SET_MEM_TABLE: + case VHOST_USER_SET_LOG_BASE: + case VHOST_USER_SET_LOG_FD: + case VHOST_USER_SET_VRING_NUM: + case VHOST_USER_SET_VRING_ADDR: + case VHOST_USER_SET_VRING_BASE: + case VHOST_USER_SET_VRING_KICK: + case VHOST_USER_SET_VRING_CALL: + case VHOST_USER_SET_VRING_ERR: + case VHOST_USER_SET_VRING_ENABLE: + case VHOST_USER_SEND_RARP: + case VHOST_USER_NET_SET_MTU: + case VHOST_USER_SET_SLAVE_REQ_FD: + vhost_user_lock_all_queue_pairs(dev); + unlock_required = 1; + break; + default: + break; + + } + switch (msg.request.master) { case VHOST_USER_GET_FEATURES: msg.payload.u64 = vhost_user_get_features(dev); @@ -1244,12 +1461,13 @@ vhost_user_msg_handler(int vid, int fd) send_vhost_reply(fd, &msg); break; case VHOST_USER_SET_FEATURES: - vhost_user_set_features(dev, msg.payload.u64); + ret = vhost_user_set_features(dev, msg.payload.u64); + if (ret) + return -1; break; case VHOST_USER_GET_PROTOCOL_FEATURES: - msg.payload.u64 = VHOST_USER_PROTOCOL_FEATURES; - msg.size = sizeof(msg.payload.u64); + vhost_user_get_protocol_features(dev, &msg); send_vhost_reply(fd, &msg); break; case VHOST_USER_SET_PROTOCOL_FEATURES: @@ -1315,7 +1533,7 @@ vhost_user_msg_handler(int vid, int fd) break; case VHOST_USER_SET_VRING_ENABLE: - vhost_user_set_vring_enable(&dev, &msg); + vhost_user_set_vring_enable(dev, &msg); break; case VHOST_USER_SEND_RARP: vhost_user_send_rarp(dev, &msg); @@ -1339,6 +1557,9 @@ vhost_user_msg_handler(int vid, int fd) } + if (unlock_required) + vhost_user_unlock_all_queue_pairs(dev); + if (msg.flags & VHOST_USER_NEED_REPLY) { msg.payload.u64 = !!ret; msg.size = sizeof(msg.payload.u64);