X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fsocket.c;h=0169d364813de21bc6c74108bd26c68a6242f7f1;hb=6184a02223722b929ae99dff2ba967626448dd63;hp=85c64485c2bb0af42d7f00d1f782823ed2c5bd7a;hpb=499fd8e5b89c2ce65d57875d0a7d67f3a7f2fb6b;p=dpdk.git diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c index 85c64485c2..0169d36481 100644 --- a/lib/librte_vhost/socket.c +++ b/lib/librte_vhost/socket.c @@ -37,11 +37,11 @@ struct vhost_user_socket { struct sockaddr_un un; bool is_server; bool reconnect; - bool dequeue_zero_copy; bool iommu_support; bool use_builtin_virtio_net; bool extbuf; bool linearbuf; + bool async_copy; /* * The "supported_features" indicates the feature bits the @@ -55,12 +55,7 @@ struct vhost_user_socket { uint64_t protocol_features; - /* - * Device id to identify a specific backend device. - * It's set to -1 for the default software implementation. - * If valid, one socket can have 1 connection only. - */ - int vdpa_dev_id; + struct rte_vdpa_device *vdpa_dev; struct vhost_device_ops const *notify_ops; }; @@ -127,7 +122,8 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds, ret = recvmsg(sockfd, &msgh, 0); if (ret <= 0) { - VHOST_LOG_CONFIG(ERR, "recvmsg failed\n"); + if (ret) + VHOST_LOG_CONFIG(ERR, "recvmsg failed\n"); return ret; } @@ -209,6 +205,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) size_t size; struct vhost_user_connection *conn; int ret; + struct virtio_net *dev; if (vsocket == NULL) return; @@ -229,10 +226,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net); - vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id); - - if (vsocket->dequeue_zero_copy) - vhost_enable_dequeue_zero_copy(vid); + vhost_attach_vdpa_device(vid, vsocket->vdpa_dev); if (vsocket->extbuf) vhost_enable_extbuf(vid); @@ -240,6 +234,13 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) if (vsocket->linearbuf) vhost_enable_linearbuf(vid); + if (vsocket->async_copy) { + dev = get_device(vid); + + if (dev) + dev->async_copy = 1; + } + VHOST_LOG_CONFIG(INFO, "new device, handle is %d\n", vid); if (vsocket->notify_ops->new_connection) { @@ -577,17 +578,18 @@ find_vhost_user_socket(const char *path) } int -rte_vhost_driver_attach_vdpa_device(const char *path, int did) +rte_vhost_driver_attach_vdpa_device(const char *path, + struct rte_vdpa_device *dev) { struct vhost_user_socket *vsocket; - if (rte_vdpa_get_device(did) == NULL || path == NULL) + if (dev == NULL || path == NULL) return -1; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) - vsocket->vdpa_dev_id = did; + vsocket->vdpa_dev = dev; pthread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; @@ -601,25 +603,25 @@ rte_vhost_driver_detach_vdpa_device(const char *path) pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) - vsocket->vdpa_dev_id = -1; + vsocket->vdpa_dev = NULL; pthread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } -int -rte_vhost_driver_get_vdpa_device_id(const char *path) +struct rte_vdpa_device * +rte_vhost_driver_get_vdpa_device(const char *path) { struct vhost_user_socket *vsocket; - int did = -1; + struct rte_vdpa_device *dev = NULL; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) - did = vsocket->vdpa_dev_id; + dev = vsocket->vdpa_dev; pthread_mutex_unlock(&vhost_user.mutex); - return did; + return dev; } int @@ -692,7 +694,6 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features) struct vhost_user_socket *vsocket; uint64_t vdpa_features; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret = 0; pthread_mutex_lock(&vhost_user.mutex); @@ -704,14 +705,13 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features) goto unlock_exit; } - did = vsocket->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev || !vdpa_dev->ops->get_features) { + vdpa_dev = vsocket->vdpa_dev; + if (!vdpa_dev) { *features = vsocket->features; goto unlock_exit; } - if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) { + if (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) { VHOST_LOG_CONFIG(ERR, "failed to get vdpa features " "for socket file %s.\n", path); @@ -747,7 +747,6 @@ rte_vhost_driver_get_protocol_features(const char *path, struct vhost_user_socket *vsocket; uint64_t vdpa_protocol_features; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret = 0; pthread_mutex_lock(&vhost_user.mutex); @@ -759,14 +758,13 @@ rte_vhost_driver_get_protocol_features(const char *path, goto unlock_exit; } - did = vsocket->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) { + vdpa_dev = vsocket->vdpa_dev; + if (!vdpa_dev) { *protocol_features = vsocket->protocol_features; goto unlock_exit; } - if (vdpa_dev->ops->get_protocol_features(did, + if (vdpa_dev->ops->get_protocol_features(vdpa_dev, &vdpa_protocol_features) < 0) { VHOST_LOG_CONFIG(ERR, "failed to get vdpa protocol features " @@ -789,7 +787,6 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num) struct vhost_user_socket *vsocket; uint32_t vdpa_queue_num; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret = 0; pthread_mutex_lock(&vhost_user.mutex); @@ -801,14 +798,13 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num) goto unlock_exit; } - did = vsocket->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) { + vdpa_dev = vsocket->vdpa_dev; + if (!vdpa_dev) { *queue_num = VHOST_MAX_QUEUE_PAIRS; goto unlock_exit; } - if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) { + if (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) { VHOST_LOG_CONFIG(ERR, "failed to get vdpa queue number " "for socket file %s.\n", path); @@ -877,16 +873,17 @@ rte_vhost_driver_register(const char *path, uint64_t flags) "error: failed to init connection mutex\n"); goto out_free; } - vsocket->vdpa_dev_id = -1; - vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY; + vsocket->vdpa_dev = NULL; vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT; vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT; - - if (vsocket->dequeue_zero_copy && - (flags & RTE_VHOST_USER_IOMMU_SUPPORT)) { - VHOST_LOG_CONFIG(ERR, - "error: enabling dequeue zero copy and IOMMU features " - "simultaneously is not supported\n"); + vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY; + + if (vsocket->async_copy && + (flags & (RTE_VHOST_USER_IOMMU_SUPPORT | + RTE_VHOST_USER_POSTCOPY_SUPPORT))) { + VHOST_LOG_CONFIG(ERR, "error: enabling async copy and IOMMU " + "or post-copy feature simultaneously is not " + "supported\n"); goto out_mutex; } @@ -907,31 +904,11 @@ rte_vhost_driver_register(const char *path, uint64_t flags) vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES; vsocket->protocol_features = VHOST_USER_PROTOCOL_FEATURES; - /* - * Dequeue zero copy can't assure descriptors returned in order. - * Also, it requires that the guest memory is populated, which is - * not compatible with postcopy. - */ - if (vsocket->dequeue_zero_copy) { - if (vsocket->extbuf) { - VHOST_LOG_CONFIG(ERR, - "error: zero copy is incompatible with external buffers\n"); - ret = -1; - goto out_mutex; - } - if (vsocket->linearbuf) { - VHOST_LOG_CONFIG(ERR, - "error: zero copy is incompatible with linear buffers\n"); - ret = -1; - goto out_mutex; - } - vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER); - vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER); - + if (vsocket->async_copy) { + vsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL); + vsocket->features &= ~(1ULL << VHOST_F_LOG_ALL); VHOST_LOG_CONFIG(INFO, - "Dequeue zero copy requested, disabling postcopy support\n"); - vsocket->protocol_features &= - ~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT); + "Logging feature is disabled in async copy mode\n"); } /*