X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_user.c;h=6e94a9bb64b621976bd25a0fe7dbc6ae9ad2c1d2;hb=a33c3584f36083063b0faf1d41251cd75bee921b;hp=1656ec73647da2ea3957ead06e5fe66205a21f16;hpb=6b3c81db8bb7427dc69402a25018781a689bd37a;p=dpdk.git diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 1656ec7364..6e94a9bb64 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -99,8 +99,15 @@ close_msg_fds(struct VhostUserMsg *msg) { int i; - for (i = 0; i < msg->fd_num; i++) - close(msg->fds[i]); + for (i = 0; i < msg->fd_num; i++) { + int fd = msg->fds[i]; + + if (fd == -1) + continue; + + msg->fds[i] = -1; + close(fd); + } } /* @@ -1004,7 +1011,6 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, uint64_t alignment; uint32_t i; int populate; - int fd; if (validate_msg_fds(msg, memory->nregions) != 0) return RTE_VHOST_MSG_RESULT_ERR; @@ -1012,7 +1018,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) { VHOST_LOG_CONFIG(ERR, "too many memory regions (%u)\n", memory->nregions); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } if (dev->mem && !vhost_memory_changed(memory, dev->mem)) { @@ -1054,7 +1060,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, "(%d) failed to allocate memory " "for dev->guest_pages\n", dev->vid); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } } @@ -1064,18 +1070,23 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "(%d) failed to allocate memory for dev->mem\n", dev->vid); - return RTE_VHOST_MSG_RESULT_ERR; + goto free_guest_pages; } dev->mem->nregions = memory->nregions; for (i = 0; i < memory->nregions; i++) { - fd = msg->fds[i]; reg = &dev->mem->regions[i]; reg->guest_phys_addr = memory->regions[i].guest_phys_addr; reg->guest_user_addr = memory->regions[i].userspace_addr; reg->size = memory->regions[i].memory_size; - reg->fd = fd; + reg->fd = msg->fds[i]; + + /* + * Assign invalid file descriptor value to avoid double + * closing on error path. + */ + msg->fds[i] = -1; mmap_offset = memory->regions[i].mmap_offset; @@ -1085,7 +1096,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, "mmap_offset (%#"PRIx64") and memory_size " "(%#"PRIx64") overflow\n", mmap_offset, reg->size); - goto err_mmap; + goto free_mem_table; } mmap_size = reg->size + mmap_offset; @@ -1098,11 +1109,11 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, * to avoid failure, make sure in caller to keep length * aligned. */ - alignment = get_blk_size(fd); + alignment = get_blk_size(reg->fd); if (alignment == (uint64_t)-1) { VHOST_LOG_CONFIG(ERR, "couldn't get hugepage size through fstat\n"); - goto err_mmap; + goto free_mem_table; } mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment); if (mmap_size == 0) { @@ -1118,17 +1129,17 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") " "or alignment (0x%" PRIx64 ") is invalid\n", reg->size + mmap_offset, alignment); - goto err_mmap; + goto free_mem_table; } populate = dev->async_copy ? MAP_POPULATE : 0; mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, - MAP_SHARED | populate, fd, 0); + MAP_SHARED | populate, reg->fd, 0); if (mmap_addr == MAP_FAILED) { VHOST_LOG_CONFIG(ERR, "mmap region %u failed.\n", i); - goto err_mmap; + goto free_mem_table; } reg->mmap_addr = mmap_addr; @@ -1141,7 +1152,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "adding guest pages to region %u failed.\n", i); - goto err_mmap; + goto free_mem_table; } VHOST_LOG_CONFIG(INFO, @@ -1184,17 +1195,17 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, if (read_vhost_message(main_fd, &ack_msg) <= 0) { VHOST_LOG_CONFIG(ERR, "Failed to read qemu ack on postcopy set-mem-table\n"); - goto err_mmap; + goto free_mem_table; } if (validate_msg_fds(&ack_msg, 0) != 0) - goto err_mmap; + goto free_mem_table; if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) { VHOST_LOG_CONFIG(ERR, "Bad qemu ack on postcopy set-mem-table (%d)\n", ack_msg.request.master); - goto err_mmap; + goto free_mem_table; } /* Now userfault register and we can use the memory */ @@ -1218,7 +1229,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, "Failed to register ufd for region %d: (ufd = %d) %s\n", i, dev->postcopy_ufd, strerror(errno)); - goto err_mmap; + goto free_mem_table; } VHOST_LOG_CONFIG(INFO, "\t userfaultfd registered for range : " @@ -1227,7 +1238,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, (uint64_t)reg_struct.range.start + (uint64_t)reg_struct.range.len - 1); #else - goto err_mmap; + goto free_mem_table; #endif } } @@ -1235,6 +1246,9 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; + if (!vq) + continue; + if (vq->desc || vq->avail || vq->used) { /* * If the memory table got updated, the ring addresses @@ -1246,7 +1260,7 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, dev = translate_ring_addresses(dev, i); if (!dev) { dev = *pdev; - goto err_mmap; + goto free_mem_table; } *pdev = dev; @@ -1257,10 +1271,15 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_OK; -err_mmap: +free_mem_table: free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; +free_guest_pages: + rte_free(dev->guest_pages); + dev->guest_pages = NULL; +close_msg_fds: + close_msg_fds(msg); return RTE_VHOST_MSG_RESULT_ERR; } @@ -1556,6 +1575,9 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg, for (i = 0; i < num_queues; i++) { vq = dev->virtqueue[i]; + if (!vq) + continue; + if (vq_is_packed(dev)) { vq->inflight_packed = addr; vq->inflight_packed->desc_num = queue_size; @@ -1668,7 +1690,7 @@ vhost_check_queue_inflights_split(struct virtio_net *dev, if (inflight_split->used_idx != used->idx) { inflight_split->desc[last_io].inflight = 0; - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); inflight_split->used_idx = used->idx; } @@ -1836,8 +1858,12 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, /* Interpret ring addresses only when ring is started. */ dev = translate_ring_addresses(dev, file.index); - if (!dev) + if (!dev) { + if (file.fd != VIRTIO_INVALID_EVENTFD) + close(file.fd); + return RTE_VHOST_MSG_RESULT_ERR; + } *pdev = dev; @@ -1980,9 +2006,9 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, "set queue enable: %d to qp idx: %d\n", enable, index); - if (!enable && dev->virtqueue[index]->async_registered) { + if (enable && dev->virtqueue[index]->async_registered) { if (dev->virtqueue[index]->async_pkts_inflight_n) { - VHOST_LOG_CONFIG(ERR, "failed to disable vring. " + VHOST_LOG_CONFIG(ERR, "failed to enable vring. " "async inflight packets must be completed first\n"); return RTE_VHOST_MSG_RESULT_ERR; } @@ -2064,7 +2090,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "invalid log base msg size: %"PRId32" != %d\n", msg->size, (int)sizeof(VhostUserLog)); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } size = msg->payload.log.mmap_size; @@ -2075,7 +2101,7 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, VHOST_LOG_CONFIG(ERR, "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n", off, size); - return RTE_VHOST_MSG_RESULT_ERR; + goto close_msg_fds; } VHOST_LOG_CONFIG(INFO, @@ -2112,6 +2138,10 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg, msg->fd_num = 0; return RTE_VHOST_MSG_RESULT_REPLY; + +close_msg_fds: + close_msg_fds(msg); + return RTE_VHOST_MSG_RESULT_ERR; } static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused, @@ -2310,6 +2340,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; + if (!vq) + continue; + vhost_user_iotlb_cache_insert(vq, imsg->iova, vva, len, imsg->perm); @@ -2321,6 +2354,9 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg, for (i = 0; i < dev->nr_vring; i++) { struct vhost_virtqueue *vq = dev->virtqueue[i]; + if (!vq) + continue; + vhost_user_iotlb_cache_remove(vq, imsg->iova, imsg->size);