X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_user.c;h=a60bb945add85ba9f52476049927ee93bad80259;hb=e863fe3a13da89787fdf3b5c590101a3c0f10af6;hp=971ccdb01c88e806952092aaabc39c544fff445f;hpb=41f32b052c78898d20d04913a7e205970b96309a;p=dpdk.git diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 971ccdb01c..a60bb945ad 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -87,6 +87,8 @@ static const char *vhost_message_str[VHOST_USER_MAX] = { [VHOST_USER_POSTCOPY_END] = "VHOST_USER_POSTCOPY_END", [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD", [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD", + [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS", + [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS", }; static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg); @@ -97,8 +99,15 @@ close_msg_fds(struct VhostUserMsg *msg) { int i; - for (i = 0; i < msg->fd_num; i++) - close(msg->fds[i]); + for (i = 0; i < msg->fd_num; i++) { + int fd = msg->fds[i]; + + if (fd == -1) + continue; + + msg->fds[i] = -1; + close(fd); + } } /* @@ -132,47 +141,15 @@ get_blk_size(int fd) return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; } -/* - * Reclaim all the outstanding zmbufs for a virtqueue. - */ -static void -drain_zmbuf_list(struct vhost_virtqueue *vq) -{ - struct zcopy_mbuf *zmbuf, *next; - - for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); - zmbuf != NULL; zmbuf = next) { - next = TAILQ_NEXT(zmbuf, next); - - while (!mbuf_is_consumed(zmbuf->mbuf)) - usleep(1000); - - TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); - restore_mbuf(zmbuf->mbuf); - rte_pktmbuf_free(zmbuf->mbuf); - put_zmbuf(zmbuf); - vq->nr_zmbuf -= 1; - } -} - static void free_mem_region(struct virtio_net *dev) { uint32_t i; struct rte_vhost_mem_region *reg; - struct vhost_virtqueue *vq; if (!dev || !dev->mem) return; - if (dev->dequeue_zero_copy) { - for (i = 0; i < dev->nr_vring; i++) { - vq = dev->virtqueue[i]; - if (vq) - drain_zmbuf_list(vq); - } - } - for (i = 0; i < dev->mem->nregions; i++) { reg = &dev->mem->regions[i]; if (reg->host_user_addr) { @@ -191,7 +168,7 @@ vhost_backend_cleanup(struct virtio_net *dev) dev->mem = NULL; } - free(dev->guest_pages); + rte_free(dev->guest_pages); dev->guest_pages = NULL; if (dev->log_addr) { @@ -206,7 +183,7 @@ vhost_backend_cleanup(struct virtio_net *dev) dev->inflight_info->addr = NULL; } - if (dev->inflight_info->fd > 0) { + if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); dev->inflight_info->fd = -1; } @@ -228,6 +205,25 @@ vhost_backend_cleanup(struct virtio_net *dev) dev->postcopy_listening = 0; } +static void +vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index, + int enable) +{ + struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; + struct vhost_virtqueue *vq = dev->virtqueue[index]; + + /* Configure guest notifications on enable */ + if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF) + vhost_enable_guest_notification(dev, vq, vq->notif_enable); + + if (vdpa_dev && vdpa_dev->ops->set_vring_state) + vdpa_dev->ops->set_vring_state(dev->vid, index, enable); + + if (dev->notify_ops->vring_state_changed) + dev->notify_ops->vring_state_changed(dev->vid, + index, enable); +} + /* * This function just returns success at the moment unless * the device hasn't been initialised. @@ -315,7 +311,6 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, uint64_t features = msg->payload.u64; uint64_t vhost_features = 0; struct rte_vdpa_device *vdpa_dev; - int did = -1; if (validate_msg_fds(msg, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; @@ -325,6 +320,9 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "(%d) received invalid negotiated features.\n", dev->vid); + dev->flags |= VIRTIO_DEV_FEATURES_FAILED; + dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; + return RTE_VHOST_MSG_RESULT_ERR; } @@ -350,7 +348,9 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, dev->features = features; if (dev->features & - ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) { + ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | + (1ULL << VIRTIO_F_VERSION_1) | + (1ULL << VIRTIO_F_RING_PACKED))) { dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else { dev->vhost_hlen = sizeof(struct virtio_net_hdr); @@ -384,11 +384,11 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, } } - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (vdpa_dev && vdpa_dev->ops->set_features) + vdpa_dev = dev->vdpa_dev; + if (vdpa_dev) vdpa_dev->ops->set_features(dev->vid); + dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED; return RTE_VHOST_MSG_RESULT_OK; } @@ -431,23 +431,6 @@ vhost_user_set_vring_num(struct virtio_net **pdev, return RTE_VHOST_MSG_RESULT_ERR; } - if (dev->dequeue_zero_copy) { - vq->nr_zmbuf = 0; - vq->last_zmbuf_idx = 0; - vq->zmbuf_size = vq->size; - if (vq->zmbufs) - rte_free(vq->zmbufs); - vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size * - sizeof(struct zcopy_mbuf), 0); - if (vq->zmbufs == NULL) { - VHOST_LOG_CONFIG(WARNING, - "failed to allocate mem for zero copy; " - "zero copy is force disabled\n"); - dev->dequeue_zero_copy = 0; - } - TAILQ_INIT(&vq->zmbuf_list); - } - if (vq_is_packed(dev)) { if (vq->shadow_used_packed) rte_free(vq->shadow_used_packed); @@ -464,12 +447,14 @@ vhost_user_set_vring_num(struct virtio_net **pdev, } else { if (vq->shadow_used_split) rte_free(vq->shadow_used_split); + vq->shadow_used_split = rte_malloc(NULL, vq->size * sizeof(struct vring_used_elem), RTE_CACHE_LINE_SIZE); + if (!vq->shadow_used_split) { VHOST_LOG_CONFIG(ERR, - "failed to allocate memory for shadow used ring.\n"); + "failed to allocate memory for vq internal data.\n"); return RTE_VHOST_MSG_RESULT_ERR; } } @@ -499,7 +484,6 @@ numa_realloc(struct virtio_net *dev, int index) int oldnode, newnode; struct virtio_net *old_dev; struct vhost_virtqueue *old_vq, *vq; - struct zcopy_mbuf *new_zmbuf; struct vring_used_elem *new_shadow_used_split; struct vring_used_elem_packed *new_shadow_used_packed; struct batch_copy_elem *new_batch_copy_elems; @@ -530,16 +514,6 @@ numa_realloc(struct virtio_net *dev, int index) return dev; memcpy(vq, old_vq, sizeof(*vq)); - TAILQ_INIT(&vq->zmbuf_list); - - if (dev->dequeue_zero_copy) { - new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size * - sizeof(struct zcopy_mbuf), 0, newnode); - if (new_zmbuf) { - rte_free(vq->zmbufs); - vq->zmbufs = new_zmbuf; - } - } if (vq_is_packed(dev)) { new_shadow_used_packed = rte_malloc_socket(NULL, @@ -903,11 +877,12 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, if (dev->nr_guest_pages == dev->max_guest_pages) { dev->max_guest_pages *= 2; old_pages = dev->guest_pages; - dev->guest_pages = realloc(dev->guest_pages, - dev->max_guest_pages * sizeof(*page)); - if (!dev->guest_pages) { + dev->guest_pages = rte_realloc(dev->guest_pages, + dev->max_guest_pages * sizeof(*page), + RTE_CACHE_LINE_SIZE); + if (dev->guest_pages == NULL) { VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n"); - free(old_pages); + rte_free(old_pages); return -1; } } @@ -964,6 +939,12 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, reg_size -= size; } + /* sort guest page array if over binary search threshold */ + if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { + qsort((void *)dev->guest_pages, dev->nr_guest_pages, + sizeof(struct guest_page), guest_page_addrcmp); + } + return 0; } @@ -1017,6 +998,195 @@ vhost_memory_changed(struct VhostUserMemory *new, return false; } +#ifdef RTE_LIBRTE_VHOST_POSTCOPY +static int +vhost_user_postcopy_region_register(struct virtio_net *dev, + struct rte_vhost_mem_region *reg) +{ + struct uffdio_register reg_struct; + + /* + * Let's register all the mmap'ed area to ensure + * alignment on page boundary. + */ + reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr; + reg_struct.range.len = reg->mmap_size; + reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; + + if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, + ®_struct)) { + VHOST_LOG_CONFIG(ERR, "Failed to register ufd for region " + "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n", + (uint64_t)reg_struct.range.start, + (uint64_t)reg_struct.range.start + + (uint64_t)reg_struct.range.len - 1, + dev->postcopy_ufd, + strerror(errno)); + return -1; + } + + VHOST_LOG_CONFIG(INFO, "\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n", + (uint64_t)reg_struct.range.start, + (uint64_t)reg_struct.range.start + + (uint64_t)reg_struct.range.len - 1); + + return 0; +} +#else +static int +vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused, + struct rte_vhost_mem_region *reg __rte_unused) +{ + return -1; +} +#endif + +static int +vhost_user_postcopy_register(struct virtio_net *dev, int main_fd, + struct VhostUserMsg *msg) +{ + struct VhostUserMemory *memory; + struct rte_vhost_mem_region *reg; + VhostUserMsg ack_msg; + uint32_t i; + + if (!dev->postcopy_listening) + return 0; + + /* + * We haven't a better way right now than sharing + * DPDK's virtual address with Qemu, so that Qemu can + * retrieve the region offset when handling userfaults. + */ + memory = &msg->payload.memory; + for (i = 0; i < memory->nregions; i++) { + reg = &dev->mem->regions[i]; + memory->regions[i].userspace_addr = reg->host_user_addr; + } + + /* Send the addresses back to qemu */ + msg->fd_num = 0; + send_vhost_reply(main_fd, msg); + + /* Wait for qemu to acknolwedge it's got the addresses + * we've got to wait before we're allowed to generate faults. + */ + if (read_vhost_message(main_fd, &ack_msg) <= 0) { + VHOST_LOG_CONFIG(ERR, + "Failed to read qemu ack on postcopy set-mem-table\n"); + return -1; + } + + if (validate_msg_fds(&ack_msg, 0) != 0) + return -1; + + if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) { + VHOST_LOG_CONFIG(ERR, + "Bad qemu ack on postcopy set-mem-table (%d)\n", + ack_msg.request.master); + return -1; + } + + /* Now userfault register and we can use the memory */ + for (i = 0; i < memory->nregions; i++) { + reg = &dev->mem->regions[i]; + if (vhost_user_postcopy_region_register(dev, reg) < 0) + return -1; + } + + return 0; +} + +static int +vhost_user_mmap_region(struct virtio_net *dev, + struct rte_vhost_mem_region *region, + uint64_t mmap_offset) +{ + void *mmap_addr; + uint64_t mmap_size; + uint64_t alignment; + int populate; + + /* Check for memory_size + mmap_offset overflow */ + if (mmap_offset >= -region->size) { + VHOST_LOG_CONFIG(ERR, + "mmap_offset (%#"PRIx64") and memory_size " + "(%#"PRIx64") overflow\n", + mmap_offset, region->size); + return -1; + } + + mmap_size = region->size + mmap_offset; + + /* mmap() without flag of MAP_ANONYMOUS, should be called with length + * argument aligned with hugepagesz at older longterm version Linux, + * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL. + * + * To avoid failure, make sure in caller to keep length aligned. + */ + alignment = get_blk_size(region->fd); + if (alignment == (uint64_t)-1) { + VHOST_LOG_CONFIG(ERR, + "couldn't get hugepage size through fstat\n"); + return -1; + } + mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); + if (mmap_size == 0) { + /* + * It could happen if initial mmap_size + alignment overflows + * the sizeof uint64, which could happen if either mmap_size or + * alignment value is wrong. + * + * mmap() kernel implementation would return an error, but + * better catch it before and provide useful info in the logs. + */ + VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") " + "or alignment (0x%" PRIx64 ") is invalid\n", + region->size + mmap_offset, alignment); + return -1; + } + + populate = dev->async_copy ? MAP_POPULATE : 0; + mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, + MAP_SHARED | populate, region->fd, 0); + + if (mmap_addr == MAP_FAILED) { + VHOST_LOG_CONFIG(ERR, "mmap failed (%s).\n", strerror(errno)); + return -1; + } + + region->mmap_addr = mmap_addr; + region->mmap_size = mmap_size; + region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset; + + if (dev->async_copy) + if (add_guest_pages(dev, region, alignment) < 0) { + VHOST_LOG_CONFIG(ERR, + "adding guest pages to region failed.\n"); + return -1; + } + + VHOST_LOG_CONFIG(INFO, + "guest memory region size: 0x%" PRIx64 "\n" + "\t guest physical addr: 0x%" PRIx64 "\n" + "\t guest virtual addr: 0x%" PRIx64 "\n" + "\t host virtual addr: 0x%" PRIx64 "\n" + "\t mmap addr : 0x%" PRIx64 "\n" + "\t mmap size : 0x%" PRIx64 "\n" + "\t mmap align: 0x%" PRIx64 "\n" + "\t mmap off : 0x%" PRIx64 "\n", + region->size, + region->guest_phys_addr, + region->guest_user_addr, + region->host_user_addr, + (uint64_t)(uintptr_t)mmap_addr, + mmap_size, + alignment, + mmap_offset); + + return 0; +} + static int vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, int main_fd) @@ -1024,13 +1194,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; struct VhostUserMemory *memory = &msg->payload.memory; struct rte_vhost_mem_region *reg; - void *mmap_addr; - uint64_t mmap_size; + uint64_t mmap_offset; - uint64_t alignment; uint32_t i; - int populate; - int fd; if (validate_msg_fds(msg, memory->nregions) != 0) return RTE_VHOST_MSG_RESULT_ERR; @@ -1038,7 +1204,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { VHOST_LOG_CONFIG(ERR, "too many memory regions (%u)\n", memory->nregions); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { @@ -1051,6 +1217,13 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, } if (dev->mem) { + if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) { + struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev; + + if (vdpa_dev && vdpa_dev->ops->dev_close) + vdpa_dev->ops->dev_close(dev->vid); + dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED; + } free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; @@ -1062,16 +1235,18 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, vhost_user_iotlb_flush_all(dev->virtqueue[i]); dev->nr_guest_pages = 0; - if (!dev->guest_pages) { + if (dev->guest_pages == NULL) { dev->max_guest_pages = 8; - dev->guest_pages = malloc(dev->max_guest_pages * - sizeof(struct guest_page)); + dev->guest_pages = rte_zmalloc(NULL, + dev->max_guest_pages * + sizeof(struct guest_page), + RTE_CACHE_LINE_SIZE); if (dev->guest_pages == NULL) { VHOST_LOG_CONFIG(ERR, "(%d) failed to allocate memory " "for dev->guest_pages\n", dev->vid); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } } @@ -1081,177 +1256,42 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "(%d) failed to allocate memory for dev->mem\n", dev->vid); - return RTE_VHOST_MSG_RESULT_ERR; + goto free_guest_pages; } - dev->mem->nregions = memory->nregions; for (i = 0; i < memory->nregions; i++) { - fd = msg->fds[i]; reg = &dev->mem->regions[i]; reg->guest_phys_addr = memory->regions[i].guest_phys_addr; reg->guest_user_addr = memory->regions[i].userspace_addr; reg->size = memory->regions[i].memory_size; - reg->fd = fd; - - mmap_offset = memory->regions[i].mmap_offset; + reg->fd = msg->fds[i]; - /* Check for memory_size + mmap_offset overflow */ - if (mmap_offset >= -reg->size) { - VHOST_LOG_CONFIG(ERR, - "mmap_offset (%#"PRIx64") and memory_size " - "(%#"PRIx64") overflow\n", - mmap_offset, reg->size); - goto err_mmap; - } - - mmap_size = reg->size + mmap_offset; - - /* mmap() without flag of MAP_ANONYMOUS, should be called - * with length argument aligned with hugepagesz at older - * longterm version Linux, like 2.6.32 and 3.2.72, or - * mmap() will fail with EINVAL. - * - * to avoid failure, make sure in caller to keep length - * aligned. + /* + * Assign invalid file descriptor value to avoid double + * closing on error path. */ - alignment = get_blk_size(fd); - if (alignment == (uint64_t)-1) { - VHOST_LOG_CONFIG(ERR, - "couldn't get hugepage size through fstat\n"); - goto err_mmap; - } - mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); - if (mmap_size == 0) { - /* - * It could happen if initial mmap_size + alignment - * overflows the sizeof uint64, which could happen if - * either mmap_size or alignment value is wrong. - * - * mmap() kernel implementation would return an error, - * but better catch it before and provide useful info - * in the logs. - */ - VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") " - "or alignment (0x%" PRIx64 ") is invalid\n", - reg->size + mmap_offset, alignment); - goto err_mmap; - } + msg->fds[i] = -1; - populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0; - mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, - MAP_SHARED | populate, fd, 0); + mmap_offset = memory->regions[i].mmap_offset; - if (mmap_addr == MAP_FAILED) { - VHOST_LOG_CONFIG(ERR, - "mmap region %u failed.\n", i); - goto err_mmap; + if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) { + VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i); + goto free_mem_table; } - reg->mmap_addr = mmap_addr; - reg->mmap_size = mmap_size; - reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + - mmap_offset; - - if (dev->dequeue_zero_copy) - if (add_guest_pages(dev, reg, alignment) < 0) { - VHOST_LOG_CONFIG(ERR, - "adding guest pages to region %u failed.\n", - i); - goto err_mmap; - } - - VHOST_LOG_CONFIG(INFO, - "guest memory region %u, size: 0x%" PRIx64 "\n" - "\t guest physical addr: 0x%" PRIx64 "\n" - "\t guest virtual addr: 0x%" PRIx64 "\n" - "\t host virtual addr: 0x%" PRIx64 "\n" - "\t mmap addr : 0x%" PRIx64 "\n" - "\t mmap size : 0x%" PRIx64 "\n" - "\t mmap align: 0x%" PRIx64 "\n" - "\t mmap off : 0x%" PRIx64 "\n", - i, reg->size, - reg->guest_phys_addr, - reg->guest_user_addr, - reg->host_user_addr, - (uint64_t)(uintptr_t)mmap_addr, - mmap_size, - alignment, - mmap_offset); - - if (dev->postcopy_listening) { - /* - * We haven't a better way right now than sharing - * DPDK's virtual address with Qemu, so that Qemu can - * retrieve the region offset when handling userfaults. - */ - memory->regions[i].userspace_addr = - reg->host_user_addr; - } + dev->mem->nregions++; } - if (dev->postcopy_listening) { - /* Send the addresses back to qemu */ - msg->fd_num = 0; - send_vhost_reply(main_fd, msg); - - /* Wait for qemu to acknolwedge it's got the addresses - * we've got to wait before we're allowed to generate faults. - */ - VhostUserMsg ack_msg; - if (read_vhost_message(main_fd, &ack_msg) <= 0) { - VHOST_LOG_CONFIG(ERR, - "Failed to read qemu ack on postcopy set-mem-table\n"); - goto err_mmap; - } - - if (validate_msg_fds(&ack_msg, 0) != 0) - goto err_mmap; - - if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) { - VHOST_LOG_CONFIG(ERR, - "Bad qemu ack on postcopy set-mem-table (%d)\n", - ack_msg.request.master); - goto err_mmap; - } - /* Now userfault register and we can use the memory */ - for (i = 0; i < memory->nregions; i++) { -#ifdef RTE_LIBRTE_VHOST_POSTCOPY - reg = &dev->mem->regions[i]; - struct uffdio_register reg_struct; - - /* - * Let's register all the mmap'ed area to ensure - * alignment on page boundary. - */ - reg_struct.range.start = - (uint64_t)(uintptr_t)reg->mmap_addr; - reg_struct.range.len = reg->mmap_size; - reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; - - if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, - ®_struct)) { - VHOST_LOG_CONFIG(ERR, - "Failed to register ufd for region %d: (ufd = %d) %s\n", - i, dev->postcopy_ufd, - strerror(errno)); - goto err_mmap; - } - VHOST_LOG_CONFIG(INFO, - "\t userfaultfd registered for range : " - "%" PRIx64 " - %" PRIx64 "\n", - (uint64_t)reg_struct.range.start, - (uint64_t)reg_struct.range.start + - (uint64_t)reg_struct.range.len - 1); -#else - goto err_mmap; -#endif - } - } + if (vhost_user_postcopy_register(dev, main_fd, msg) < 0) + goto free_mem_table; for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; + if (!vq) + continue; + if (vq->desc || vq->avail || vq->used) { /* * If the memory table got updated, the ring addresses @@ -1263,7 +1303,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, dev = translate_ring_addresses(dev, i); if (!dev) { dev = *pdev; - goto err_mmap; + goto free_mem_table; } *pdev = dev; @@ -1274,10 +1314,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_OK; -err_mmap: +free_mem_table: free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; +free_guest_pages: + rte_free(dev->guest_pages); + dev->guest_pages = NULL; +close_msg_fds: + close_msg_fds(msg); return RTE_VHOST_MSG_RESULT_ERR; } @@ -1297,27 +1342,48 @@ vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq) return rings_ok && vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD && - vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD; + vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD && + vq->enabled; } +#define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u + static int virtio_is_ready(struct virtio_net *dev) { struct vhost_virtqueue *vq; - uint32_t i; + uint32_t i, nr_vring = dev->nr_vring; + + if (dev->flags & VIRTIO_DEV_READY) + return 1; - if (dev->nr_vring == 0) + if (!dev->nr_vring) return 0; - for (i = 0; i < dev->nr_vring; i++) { + if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) { + nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY; + + if (dev->nr_vring < nr_vring) + return 0; + } + + for (i = 0; i < nr_vring; i++) { vq = dev->virtqueue[i]; if (!vq_is_ready(dev, vq)) return 0; } - VHOST_LOG_CONFIG(INFO, - "virtio is now ready for processing.\n"); + /* If supported, ensure the frontend is really done with config */ + if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS)) + if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK)) + return 0; + + dev->flags |= VIRTIO_DEV_READY; + + if (!(dev->flags & VIRTIO_DEV_RUNNING)) + VHOST_LOG_CONFIG(INFO, + "virtio is now ready for processing.\n"); return 1; } @@ -1408,6 +1474,7 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev, "failed to alloc dev inflight area\n"); return RTE_VHOST_MSG_RESULT_ERR; } + dev->inflight_info->fd = -1; } num_queues = msg->payload.inflight.num_queues; @@ -1433,6 +1500,16 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev, } memset(addr, 0, mmap_size); + if (dev->inflight_info->addr) { + munmap(dev->inflight_info->addr, dev->inflight_info->size); + dev->inflight_info->addr = NULL; + } + + if (dev->inflight_info->fd >= 0) { + close(dev->inflight_info->fd); + dev->inflight_info->fd = -1; + } + dev->inflight_info->addr = addr; dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size; dev->inflight_info->fd = msg->fds[0] = fd; @@ -1515,10 +1592,13 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, "failed to alloc dev inflight area\n"); return RTE_VHOST_MSG_RESULT_ERR; } + dev->inflight_info->fd = -1; } - if (dev->inflight_info->addr) + if (dev->inflight_info->addr) { munmap(dev->inflight_info->addr, dev->inflight_info->size); + dev->inflight_info->addr = NULL; + } addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, mmap_offset); @@ -1527,8 +1607,10 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_ERR; } - if (dev->inflight_info->fd) + if (dev->inflight_info->fd >= 0) { close(dev->inflight_info->fd); + dev->inflight_info->fd = -1; + } dev->inflight_info->fd = fd; dev->inflight_info->addr = addr; @@ -1536,6 +1618,9 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, for (i = 0; i < num_queues; i++) { vq = dev->virtqueue[i]; + if (!vq) + continue; + if (vq_is_packed(dev)) { vq->inflight_packed = addr; vq->inflight_packed->desc_num = queue_size; @@ -1571,6 +1656,12 @@ vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg, "vring call idx:%d file:%d\n", file.index, file.fd); vq = dev->virtqueue[file.index]; + + if (vq->ready) { + vq->ready = 0; + vhost_user_notify_queue_state(dev, file.index, 0); + } + if (vq->callfd >= 0) close(vq->callfd); @@ -1642,7 +1733,7 @@ vhost_check_queue_inflights_split(struct virtio_net *dev, if (inflight_split->used_idx != used->idx) { inflight_split->desc[last_io].inflight = 0; - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); inflight_split->used_idx = used->idx; } @@ -1810,8 +1901,12 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, /* Interpret ring addresses only when ring is started. */ dev = translate_ring_addresses(dev, file.index); - if (!dev) + if (!dev) { + if (file.fd != VIRTIO_INVALID_EVENTFD) + close(file.fd); + return RTE_VHOST_MSG_RESULT_ERR; + } *pdev = dev; @@ -1829,6 +1924,11 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, dev->vid, file.index, 1); } + if (vq->ready) { + vq->ready = 0; + vhost_user_notify_queue_state(dev, file.index, 0); + } + if (vq->kickfd >= 0) close(vq->kickfd); vq->kickfd = file.fd; @@ -1850,14 +1950,6 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_OK; } -static void -free_zmbufs(struct vhost_virtqueue *vq) -{ - drain_zmbuf_list(vq); - - rte_free(vq->zmbufs); -} - /* * when virtio is stopped, qemu will send us the GET_VRING_BASE message. */ @@ -1912,14 +2004,19 @@ vhost_user_get_vring_base(struct virtio_net **pdev, vq->signalled_used_valid = false; - if (dev->dequeue_zero_copy) - free_zmbufs(vq); if (vq_is_packed(dev)) { rte_free(vq->shadow_used_packed); vq->shadow_used_packed = NULL; } else { rte_free(vq->shadow_used_split); vq->shadow_used_split = NULL; + + if (vq->async_pkts_info) + rte_free(vq->async_pkts_info); + if (vq->async_descs_split) + rte_free(vq->async_descs_split); + vq->async_pkts_info = NULL; + vq->async_descs_split = NULL; } rte_free(vq->batch_copy_elems); @@ -1945,8 +2042,6 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, struct virtio_net *dev = *pdev; int enable = (int)msg->payload.state.num; int index = (int)msg->payload.state.index; - struct rte_vdpa_device *vdpa_dev; - int did = -1; if (validate_msg_fds(msg, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; @@ -1955,18 +2050,13 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, "set queue enable: %d to qp idx: %d\n", enable, index); - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (vdpa_dev && vdpa_dev->ops->set_vring_state) - vdpa_dev->ops->set_vring_state(dev->vid, index, enable); - - if (dev->notify_ops->vring_state_changed) - dev->notify_ops->vring_state_changed(dev->vid, - index, enable); - - /* On disable, rings have to be stopped being processed. */ - if (!enable && dev->dequeue_zero_copy) - drain_zmbuf_list(dev->virtqueue[index]); + if (enable && dev->virtqueue[index]->async_registered) { + if (dev->virtqueue[index]->async_pkts_inflight_n) { + VHOST_LOG_CONFIG(ERR, "failed to enable vring. " + "async inflight packets must be completed first\n"); + return RTE_VHOST_MSG_RESULT_ERR; + } + } dev->virtqueue[index]->enabled = enable; @@ -1987,15 +2077,6 @@ vhost_user_get_protocol_features(struct virtio_net **pdev, rte_vhost_driver_get_features(dev->ifname, &features); rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features); - /* - * REPLY_ACK protocol feature is only mandatory for now - * for IOMMU feature. If IOMMU is explicitly disabled by the - * application, disable also REPLY_ACK feature for older buggy - * Qemu versions (from v2.7.0 to v2.9.0). - */ - if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) - protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK); - msg->payload.u64 = protocol_features; msg->size = sizeof(msg->payload.u64); msg->fd_num = 0; @@ -2053,18 +2134,18 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "invalid log base msg size: %"PRId32" != %d\n", msg->size, (int)sizeof(VhostUserLog)); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } size = msg->payload.log.mmap_size; off = msg->payload.log.mmap_offset; - /* Don't allow mmap_offset to point outside the mmap region */ - if (off > size) { + /* Check for mmap size and offset overflow. */ + if (off >= -size) { VHOST_LOG_CONFIG(ERR, - "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n", + "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n", off, size); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } VHOST_LOG_CONFIG(INFO, @@ -2101,6 +2182,10 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, msg->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; + +close_msg_fds: + close_msg_fds(msg); + return RTE_VHOST_MSG_RESULT_ERR; } static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, @@ -2131,7 +2216,6 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg, struct virtio_net *dev = *pdev; uint8_t *mac = (uint8_t *)&msg->payload.u64; struct rte_vdpa_device *vdpa_dev; - int did = -1; if (validate_msg_fds(msg, 0) != 0) return RTE_VHOST_MSG_RESULT_ERR; @@ -2145,13 +2229,11 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg, * Set the flag to inject a RARP broadcast packet at * rte_vhost_dequeue_burst(). * - * rte_smp_wmb() is for making sure the mac is copied - * before the flag is set. + * __ATOMIC_RELEASE ordering is for making sure the mac is + * copied before the flag is set. */ - rte_smp_wmb(); - rte_atomic16_set(&dev->broadcast_rarp, 1); - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); + __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE); + vdpa_dev = dev->vdpa_dev; if (vdpa_dev && vdpa_dev->ops->migration_done) vdpa_dev->ops->migration_done(dev->vid); @@ -2302,6 +2384,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; + if (!vq) + continue; + vhost_user_iotlb_cache_insert(vq, imsg->iova, vva, len, imsg->perm); @@ -2313,6 +2398,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; + if (!vq) + continue; + vhost_user_iotlb_cache_remove(vq, imsg->iova, imsg->size); @@ -2411,6 +2499,70 @@ vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_REPLY; } +static int +vhost_user_get_status(struct virtio_net **pdev, struct VhostUserMsg *msg, + int main_fd __rte_unused) +{ + struct virtio_net *dev = *pdev; + + if (validate_msg_fds(msg, 0) != 0) + return RTE_VHOST_MSG_RESULT_ERR; + + msg->payload.u64 = dev->status; + msg->size = sizeof(msg->payload.u64); + msg->fd_num = 0; + + return RTE_VHOST_MSG_RESULT_REPLY; +} + +static int +vhost_user_set_status(struct virtio_net **pdev, struct VhostUserMsg *msg, + int main_fd __rte_unused) +{ + struct virtio_net *dev = *pdev; + + if (validate_msg_fds(msg, 0) != 0) + return RTE_VHOST_MSG_RESULT_ERR; + + /* As per Virtio specification, the device status is 8bits long */ + if (msg->payload.u64 > UINT8_MAX) { + VHOST_LOG_CONFIG(ERR, "Invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n", + msg->payload.u64); + return RTE_VHOST_MSG_RESULT_ERR; + } + + dev->status = msg->payload.u64; + + if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) && + (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) { + VHOST_LOG_CONFIG(ERR, "FEATURES_OK bit is set but feature negotiation failed\n"); + /* + * Clear the bit to let the driver know about the feature + * negotiation failure + */ + dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK; + } + + VHOST_LOG_CONFIG(INFO, "New device status(0x%08x):\n" + "\t-RESET: %u\n" + "\t-ACKNOWLEDGE: %u\n" + "\t-DRIVER: %u\n" + "\t-FEATURES_OK: %u\n" + "\t-DRIVER_OK: %u\n" + "\t-DEVICE_NEED_RESET: %u\n" + "\t-FAILED: %u\n", + dev->status, + (dev->status == VIRTIO_DEVICE_STATUS_RESET), + !!(dev->status & VIRTIO_DEVICE_STATUS_ACK), + !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER), + !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK), + !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK), + !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET), + !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED)); + + return RTE_VHOST_MSG_RESULT_OK; +} + typedef int (*vhost_message_handler_t)(struct virtio_net **pdev, struct VhostUserMsg *msg, int main_fd); @@ -2443,6 +2595,8 @@ static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = { [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end, [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd, [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd, + [VHOST_USER_SET_STATUS] = vhost_user_set_status, + [VHOST_USER_GET_STATUS] = vhost_user_get_status, }; /* return bytes# of read on success or negative val on failure. */ @@ -2526,7 +2680,7 @@ static int vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, struct VhostUserMsg *msg) { - uint16_t vring_idx; + uint32_t vring_idx; switch (msg->request.master) { case VHOST_USER_SET_VRING_KICK: @@ -2598,11 +2752,11 @@ vhost_user_msg_handler(int vid, int fd) struct virtio_net *dev; struct VhostUserMsg msg; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret; int unlock_required = 0; bool handled; int request; + uint32_t i; dev = get_device(vid); if (dev == NULL) @@ -2675,8 +2829,10 @@ vhost_user_msg_handler(int vid, int fd) case VHOST_USER_SEND_RARP: case VHOST_USER_NET_SET_MTU: case VHOST_USER_SET_SLAVE_REQ_FD: - vhost_user_lock_all_queue_pairs(dev); - unlock_required = 1; + if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { + vhost_user_lock_all_queue_pairs(dev); + unlock_required = 1; + } break; default: break; @@ -2776,30 +2932,44 @@ skip_to_post_handle: return -1; } - if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) { - dev->flags |= VIRTIO_DEV_READY; - - if (!(dev->flags & VIRTIO_DEV_RUNNING)) { - if (dev->dequeue_zero_copy) { - VHOST_LOG_CONFIG(INFO, - "dequeue zero copy is enabled\n"); - } + for (i = 0; i < dev->nr_vring; i++) { + struct vhost_virtqueue *vq = dev->virtqueue[i]; + bool cur_ready = vq_is_ready(dev, vq); - if (dev->notify_ops->new_device(dev->vid) == 0) - dev->flags |= VIRTIO_DEV_RUNNING; + if (cur_ready != (vq && vq->ready)) { + vq->ready = cur_ready; + vhost_user_notify_queue_state(dev, i, cur_ready); } } - did = dev->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (vdpa_dev && virtio_is_ready(dev) && - !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) && - msg.request.master == VHOST_USER_SET_VRING_CALL) { - if (vdpa_dev->ops->dev_conf) - vdpa_dev->ops->dev_conf(dev->vid); - dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; + + if (!virtio_is_ready(dev)) + goto out; + + /* + * Virtio is now ready. If not done already, it is time + * to notify the application it can process the rings and + * configure the vDPA device if present. + */ + + if (!(dev->flags & VIRTIO_DEV_RUNNING)) { + if (dev->notify_ops->new_device(dev->vid) == 0) + dev->flags |= VIRTIO_DEV_RUNNING; } + vdpa_dev = dev->vdpa_dev; + if (!vdpa_dev) + goto out; + + if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) { + if (vdpa_dev->ops->dev_conf(dev->vid)) + VHOST_LOG_CONFIG(ERR, + "Failed to configure vDPA device\n"); + else + dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED; + } + +out: return 0; } @@ -2936,21 +3106,21 @@ static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev, return process_slave_message_reply(dev, &msg); } -int rte_vhost_host_notifier_ctrl(int vid, bool enable) +int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable) { struct virtio_net *dev; struct rte_vdpa_device *vdpa_dev; - int vfio_device_fd, did, ret = 0; + int vfio_device_fd, ret = 0; uint64_t offset, size; - unsigned int i; + unsigned int i, q_start, q_last; dev = get_device(vid); if (!dev) return -ENODEV; - did = dev->vdpa_dev_id; - if (did < 0) - return -EINVAL; + vdpa_dev = dev->vdpa_dev; + if (vdpa_dev == NULL) + return -ENODEV; if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) || !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) || @@ -2962,9 +3132,15 @@ int rte_vhost_host_notifier_ctrl(int vid, bool enable) (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER))) return -ENOTSUP; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev) - return -ENODEV; + if (qid == RTE_VHOST_QUEUE_ALL) { + q_start = 0; + q_last = dev->nr_vring - 1; + } else { + if (qid >= dev->nr_vring) + return -EINVAL; + q_start = qid; + q_last = qid; + } RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP); @@ -2974,7 +3150,7 @@ int rte_vhost_host_notifier_ctrl(int vid, bool enable) return -ENOTSUP; if (enable) { - for (i = 0; i < dev->nr_vring; i++) { + for (i = q_start; i <= q_last; i++) { if (vdpa_dev->ops->get_notify_area(vid, i, &offset, &size) < 0) { ret = -ENOTSUP; @@ -2989,7 +3165,7 @@ int rte_vhost_host_notifier_ctrl(int vid, bool enable) } } else { disable: - for (i = 0; i < dev->nr_vring; i++) { + for (i = q_start; i <= q_last; i++) { vhost_user_slave_set_vring_host_notifier(dev, i, -1, 0, 0); }