X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_user.c;h=c9e29ece8fa37159507d2d0e9451856b02ea0ddc;hb=b149a7064261c2424ce5eeaeceb884dce46967cb;hp=6d825351496b731be6af38913e133316fd9ee07c;hpb=d767436ee5d26d1d417ae17d1a2a47879bf632a6;p=dpdk.git diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 6d82535149..c9e29ece8f 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -93,15 +93,47 @@ get_blk_size(int fd) return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; } +/* + * Reclaim all the outstanding zmbufs for a virtqueue. + */ +static void +drain_zmbuf_list(struct vhost_virtqueue *vq) +{ + struct zcopy_mbuf *zmbuf, *next; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + while (!mbuf_is_consumed(zmbuf->mbuf)) + usleep(1000); + + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); + rte_pktmbuf_free(zmbuf->mbuf); + put_zmbuf(zmbuf); + vq->nr_zmbuf -= 1; + } +} + static void free_mem_region(struct virtio_net *dev) { uint32_t i; struct rte_vhost_mem_region *reg; + struct vhost_virtqueue *vq; if (!dev || !dev->mem) return; + if (dev->dequeue_zero_copy) { + for (i = 0; i < dev->nr_vring; i++) { + vq = dev->virtqueue[i]; + if (vq) + drain_zmbuf_list(vq); + } + } + for (i = 0; i < dev->mem->nregions; i++) { reg = &dev->mem->regions[i]; if (reg->host_user_addr) { @@ -1199,8 +1231,12 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, * the ring starts already enabled. Otherwise, it is enabled via * the SET_VRING_ENABLE message. */ - if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) + if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { vq->enabled = 1; + if (dev->notify_ops->vring_state_changed) + dev->notify_ops->vring_state_changed( + dev->vid, file.index, 1); + } if (vq->kickfd >= 0) close(vq->kickfd); @@ -1212,19 +1248,7 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, static void free_zmbufs(struct vhost_virtqueue *vq) { - struct zcopy_mbuf *zmbuf, *next; - - for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); - zmbuf != NULL; zmbuf = next) { - next = TAILQ_NEXT(zmbuf, next); - - while (!mbuf_is_consumed(zmbuf->mbuf)) - usleep(1000); - - restore_mbuf(zmbuf->mbuf); - rte_pktmbuf_free(zmbuf->mbuf); - TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); - } + drain_zmbuf_list(vq); rte_free(vq->zmbufs); } @@ -1278,6 +1302,8 @@ vhost_user_get_vring_base(struct virtio_net **pdev, vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; + vq->signalled_used_valid = false; + if (dev->dequeue_zero_copy) free_zmbufs(vq); if (vq_is_packed(dev)) { @@ -1325,6 +1351,10 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, dev->notify_ops->vring_state_changed(dev->vid, index, enable); + /* On disable, rings have to be stopped being processed. */ + if (!enable && dev->dequeue_zero_copy) + drain_zmbuf_list(dev->virtqueue[index]); + dev->virtqueue[index]->enabled = enable; return RTE_VHOST_MSG_RESULT_OK; @@ -1886,7 +1916,7 @@ vhost_user_msg_handler(int vid, int fd) int did = -1; int ret; int unlock_required = 0; - uint32_t skip_master = 0; + bool handled; int request; dev = get_device(vid); @@ -1904,27 +1934,30 @@ vhost_user_msg_handler(int vid, int fd) } ret = read_vhost_message(fd, &msg); - if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) { + if (ret <= 0) { if (ret < 0) RTE_LOG(ERR, VHOST_CONFIG, "vhost read message failed\n"); - else if (ret == 0) + else RTE_LOG(INFO, VHOST_CONFIG, "vhost peer closed\n"); - else - RTE_LOG(ERR, VHOST_CONFIG, - "vhost read incorrect message\n"); return -1; } ret = 0; - if (msg.request.master != VHOST_USER_IOTLB_MSG) - RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n", - vhost_message_str[msg.request.master]); - else - RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n", - vhost_message_str[msg.request.master]); + request = msg.request.master; + if (request > VHOST_USER_NONE && request < VHOST_USER_MAX && + vhost_message_str[request]) { + if (request != VHOST_USER_IOTLB_MSG) + RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n", + vhost_message_str[request]); + else + RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n", + vhost_message_str[request]); + } else { + RTE_LOG(DEBUG, VHOST_CONFIG, "External request %d\n", request); + } ret = vhost_user_check_and_alloc_queue_pair(dev, &msg); if (ret < 0) { @@ -1940,7 +1973,7 @@ vhost_user_msg_handler(int vid, int fd) * inactive, so it is safe. Otherwise taking the access_lock * would cause a dead lock. */ - switch (msg.request.master) { + switch (request) { case VHOST_USER_SET_FEATURES: case VHOST_USER_SET_PROTOCOL_FEATURES: case VHOST_USER_SET_OWNER: @@ -1965,19 +1998,24 @@ vhost_user_msg_handler(int vid, int fd) } + handled = false; if (dev->extern_ops.pre_msg_handle) { ret = (*dev->extern_ops.pre_msg_handle)(dev->vid, - (void *)&msg, &skip_master); - if (ret == RTE_VHOST_MSG_RESULT_ERR) - goto skip_to_reply; - else if (ret == RTE_VHOST_MSG_RESULT_REPLY) + (void *)&msg); + switch (ret) { + case RTE_VHOST_MSG_RESULT_REPLY: send_vhost_reply(fd, &msg); - - if (skip_master) + /* Fall-through */ + case RTE_VHOST_MSG_RESULT_ERR: + case RTE_VHOST_MSG_RESULT_OK: + handled = true; goto skip_to_post_handle; + case RTE_VHOST_MSG_RESULT_NOT_HANDLED: + default: + break; + } } - request = msg.request.master; if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) { if (!vhost_message_handlers[request]) goto skip_to_post_handle; @@ -1988,40 +2026,54 @@ vhost_user_msg_handler(int vid, int fd) RTE_LOG(ERR, VHOST_CONFIG, "Processing %s failed.\n", vhost_message_str[request]); + handled = true; break; case RTE_VHOST_MSG_RESULT_OK: RTE_LOG(DEBUG, VHOST_CONFIG, "Processing %s succeeded.\n", vhost_message_str[request]); + handled = true; break; case RTE_VHOST_MSG_RESULT_REPLY: RTE_LOG(DEBUG, VHOST_CONFIG, "Processing %s succeeded and needs reply.\n", vhost_message_str[request]); send_vhost_reply(fd, &msg); + handled = true; + break; + default: break; } - } else { - RTE_LOG(ERR, VHOST_CONFIG, - "Requested invalid message type %d.\n", request); - ret = RTE_VHOST_MSG_RESULT_ERR; } skip_to_post_handle: if (ret != RTE_VHOST_MSG_RESULT_ERR && dev->extern_ops.post_msg_handle) { - ret = (*dev->extern_ops.post_msg_handle)( - dev->vid, (void *)&msg); - if (ret == RTE_VHOST_MSG_RESULT_ERR) - goto skip_to_reply; - else if (ret == RTE_VHOST_MSG_RESULT_REPLY) + ret = (*dev->extern_ops.post_msg_handle)(dev->vid, + (void *)&msg); + switch (ret) { + case RTE_VHOST_MSG_RESULT_REPLY: send_vhost_reply(fd, &msg); + /* Fall-through */ + case RTE_VHOST_MSG_RESULT_ERR: + case RTE_VHOST_MSG_RESULT_OK: + handled = true; + case RTE_VHOST_MSG_RESULT_NOT_HANDLED: + default: + break; + } } -skip_to_reply: if (unlock_required) vhost_user_unlock_all_queue_pairs(dev); + /* If message was not handled at this stage, treat it as an error */ + if (!handled) { + RTE_LOG(ERR, VHOST_CONFIG, + "vhost message (req: %d) was not handled.\n", request); + ret = RTE_VHOST_MSG_RESULT_ERR; + } + /* * If the request required a reply that was already sent, * this optional reply-ack won't be sent as the