X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fsocket.c;h=a34bc7f9a308634f75d8104c8db8c61a56a6f2ec;hb=70c7747689082abba6452803b6e23e6aed649ace;hp=a71e271ffd3de0857560062a83898f9014891195;hpb=ae0b1de941c5db657d202d78132ab78065468e77;p=dpdk.git diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c index a71e271ffd..a34bc7f9a3 100644 --- a/lib/librte_vhost/socket.c +++ b/lib/librte_vhost/socket.c @@ -4,7 +4,6 @@ #include #include -#include #include #include #include @@ -40,6 +39,9 @@ struct vhost_user_socket { bool reconnect; bool dequeue_zero_copy; bool iommu_support; + bool use_builtin_virtio_net; + bool extbuf; + bool linearbuf; /* * The "supported_features" indicates the feature bits the @@ -51,6 +53,15 @@ struct vhost_user_socket { uint64_t supported_features; uint64_t features; + uint64_t protocol_features; + + /* + * Device id to identify a specific backend device. + * It's set to -1 for the default software implementation. + * If valid, one socket can have 1 connection only. + */ + int vdpa_dev_id; + struct vhost_device_ops const *notify_ops; }; @@ -81,23 +92,30 @@ static struct vhost_user vhost_user = { .fdset = { .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} }, .fd_mutex = PTHREAD_MUTEX_INITIALIZER, + .fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER, .num = 0 }, .vsocket_cnt = 0, .mutex = PTHREAD_MUTEX_INITIALIZER, }; -/* return bytes# of read on success or negative val on failure. */ +/* + * return bytes# of read on success or negative val on failure. Update fdnum + * with number of fds read. + */ int -read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) +read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds, + int *fd_num) { struct iovec iov; struct msghdr msgh; - size_t fdsize = fd_num * sizeof(int); - char control[CMSG_SPACE(fdsize)]; + char control[CMSG_SPACE(max_fds * sizeof(int))]; struct cmsghdr *cmsg; + int got_fds = 0; int ret; + *fd_num = 0; + memset(&msgh, 0, sizeof(msgh)); iov.iov_base = buf; iov.iov_len = buflen; @@ -122,11 +140,17 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) cmsg = CMSG_NXTHDR(&msgh, cmsg)) { if ((cmsg->cmsg_level == SOL_SOCKET) && (cmsg->cmsg_type == SCM_RIGHTS)) { - memcpy(fds, CMSG_DATA(cmsg), fdsize); + got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int); + *fd_num = got_fds; + memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int)); break; } } + /* Clear out unused file descriptors */ + while (got_fds < max_fds) + fds[got_fds++] = -1; + return ret; } @@ -152,6 +176,11 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) msgh.msg_control = control; msgh.msg_controllen = sizeof(control); cmsg = CMSG_FIRSTHDR(&msgh); + if (cmsg == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n"); + errno = EINVAL; + return -1; + } cmsg->cmsg_len = CMSG_LEN(fdsize); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; @@ -162,7 +191,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) } do { - ret = sendmsg(sockfd, &msgh, 0); + ret = sendmsg(sockfd, &msgh, MSG_NOSIGNAL); } while (ret < 0 && errno == EINTR); if (ret < 0) { @@ -181,6 +210,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) struct vhost_user_connection *conn; int ret; + if (vsocket == NULL) + return; + conn = malloc(sizeof(*conn)); if (conn == NULL) { close(fd); @@ -195,9 +227,19 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) size = strnlen(vsocket->path, PATH_MAX); vhost_set_ifname(vid, vsocket->path, size); + vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net); + + vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id); + if (vsocket->dequeue_zero_copy) vhost_enable_dequeue_zero_copy(vid); + if (vsocket->extbuf) + vhost_enable_extbuf(vid); + + if (vsocket->linearbuf) + vhost_enable_linearbuf(vid); + RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid); if (vsocket->notify_ops->new_connection) { @@ -206,7 +248,7 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) RTE_LOG(ERR, VHOST_CONFIG, "failed to add vhost user connection with fd %d\n", fd); - goto err; + goto err_cleanup; } } @@ -223,14 +265,18 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) if (vsocket->notify_ops->destroy_connection) vsocket->notify_ops->destroy_connection(conn->vid); - goto err; + goto err_cleanup; } pthread_mutex_lock(&vsocket->conn_mutex); TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next); pthread_mutex_unlock(&vsocket->conn_mutex); + + fdset_pipe_notify(&vhost_user.fdset); return; +err_cleanup: + vhost_destroy_device(vid); err: free(conn); close(fd); @@ -259,13 +305,19 @@ vhost_user_read_cb(int connfd, void *dat, int *remove) ret = vhost_user_msg_handler(conn->vid, connfd); if (ret < 0) { + struct virtio_net *dev = get_device(conn->vid); + close(connfd); *remove = 1; - vhost_destroy_device(conn->vid); + + if (dev) + vhost_destroy_device_notify(dev); if (vsocket->notify_ops->destroy_connection) vsocket->notify_ops->destroy_connection(conn->vid); + vhost_destroy_device(conn->vid); + pthread_mutex_lock(&vsocket->conn_mutex); TAILQ_REMOVE(&vsocket->conn_list, conn, next); pthread_mutex_unlock(&vsocket->conn_mutex); @@ -315,6 +367,16 @@ vhost_user_start_server(struct vhost_user_socket *vsocket) int fd = vsocket->socket_fd; const char *path = vsocket->path; + /* + * bind () may fail if the socket file with the same name already + * exists. But the library obviously should not delete the file + * provided by the user, since we can not be sure that it is not + * being used by other applications. Moreover, many applications form + * socket names based on user input, which is prone to errors. + * + * The user must ensure that the socket does not exist before + * registering the vhost driver in server mode. + */ ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un)); if (ret < 0) { RTE_LOG(ERR, VHOST_CONFIG, @@ -433,7 +495,6 @@ static int vhost_user_reconnect_init(void) { int ret; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; ret = pthread_mutex_init(&reconn_list.mutex, NULL); if (ret < 0) { @@ -442,22 +503,14 @@ vhost_user_reconnect_init(void) } TAILQ_INIT(&reconn_list.head); - ret = pthread_create(&reconn_tid, NULL, + ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL, vhost_user_client_reconnect, NULL); - if (ret < 0) { + if (ret != 0) { RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread"); if (pthread_mutex_destroy(&reconn_list.mutex)) { RTE_LOG(ERR, VHOST_CONFIG, "failed to destroy reconnect mutex"); } - } else { - snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, - "vhost-reconn"); - - if (rte_thread_setname(reconn_tid, thread_name)) { - RTE_LOG(DEBUG, VHOST_CONFIG, - "failed to set reconnect thread name"); - } } return ret; @@ -510,6 +563,9 @@ find_vhost_user_socket(const char *path) { int i; + if (path == NULL) + return NULL; + for (i = 0; i < vhost_user.vsocket_cnt; i++) { struct vhost_user_socket *vsocket = vhost_user.vsockets[i]; @@ -520,6 +576,52 @@ find_vhost_user_socket(const char *path) return NULL; } +int +rte_vhost_driver_attach_vdpa_device(const char *path, int did) +{ + struct vhost_user_socket *vsocket; + + if (rte_vdpa_get_device(did) == NULL || path == NULL) + return -1; + + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); + if (vsocket) + vsocket->vdpa_dev_id = did; + pthread_mutex_unlock(&vhost_user.mutex); + + return vsocket ? 0 : -1; +} + +int +rte_vhost_driver_detach_vdpa_device(const char *path) +{ + struct vhost_user_socket *vsocket; + + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); + if (vsocket) + vsocket->vdpa_dev_id = -1; + pthread_mutex_unlock(&vhost_user.mutex); + + return vsocket ? 0 : -1; +} + +int +rte_vhost_driver_get_vdpa_device_id(const char *path) +{ + struct vhost_user_socket *vsocket; + int did = -1; + + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); + if (vsocket) + did = vsocket->vdpa_dev_id; + pthread_mutex_unlock(&vhost_user.mutex); + + return did; +} + int rte_vhost_driver_disable_features(const char *path, uint64_t features) { @@ -527,6 +629,12 @@ rte_vhost_driver_disable_features(const char *path, uint64_t features) pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); + + /* Note that use_builtin_virtio_net is not affected by this function + * since callers may want to selectively disable features of the + * built-in vhost net device backend. + */ + if (vsocket) vsocket->features &= ~features; pthread_mutex_unlock(&vhost_user.mutex); @@ -567,6 +675,11 @@ rte_vhost_driver_set_features(const char *path, uint64_t features) if (vsocket) { vsocket->supported_features = features; vsocket->features = features; + + /* Anyone setting feature bits is implementing their own vhost + * device backend. + */ + vsocket->use_builtin_virtio_net = false; } pthread_mutex_unlock(&vhost_user.mutex); @@ -577,19 +690,150 @@ int rte_vhost_driver_get_features(const char *path, uint64_t *features) { struct vhost_user_socket *vsocket; + uint64_t vdpa_features; + struct rte_vdpa_device *vdpa_dev; + int did = -1; + int ret = 0; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); - if (vsocket) + if (!vsocket) { + RTE_LOG(ERR, VHOST_CONFIG, + "socket file %s is not registered yet.\n", path); + ret = -1; + goto unlock_exit; + } + + did = vsocket->vdpa_dev_id; + vdpa_dev = rte_vdpa_get_device(did); + if (!vdpa_dev || !vdpa_dev->ops->get_features) { *features = vsocket->features; + goto unlock_exit; + } + + if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to get vdpa features " + "for socket file %s.\n", path); + ret = -1; + goto unlock_exit; + } + + *features = vsocket->features & vdpa_features; + +unlock_exit: pthread_mutex_unlock(&vhost_user.mutex); + return ret; +} + +int +rte_vhost_driver_set_protocol_features(const char *path, + uint64_t protocol_features) +{ + struct vhost_user_socket *vsocket; + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); + if (vsocket) + vsocket->protocol_features = protocol_features; + pthread_mutex_unlock(&vhost_user.mutex); + return vsocket ? 0 : -1; +} + +int +rte_vhost_driver_get_protocol_features(const char *path, + uint64_t *protocol_features) +{ + struct vhost_user_socket *vsocket; + uint64_t vdpa_protocol_features; + struct rte_vdpa_device *vdpa_dev; + int did = -1; + int ret = 0; + + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); if (!vsocket) { RTE_LOG(ERR, VHOST_CONFIG, "socket file %s is not registered yet.\n", path); - return -1; - } else { - return 0; + ret = -1; + goto unlock_exit; + } + + did = vsocket->vdpa_dev_id; + vdpa_dev = rte_vdpa_get_device(did); + if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) { + *protocol_features = vsocket->protocol_features; + goto unlock_exit; + } + + if (vdpa_dev->ops->get_protocol_features(did, + &vdpa_protocol_features) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to get vdpa protocol features " + "for socket file %s.\n", path); + ret = -1; + goto unlock_exit; + } + + *protocol_features = vsocket->protocol_features + & vdpa_protocol_features; + +unlock_exit: + pthread_mutex_unlock(&vhost_user.mutex); + return ret; +} + +int +rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num) +{ + struct vhost_user_socket *vsocket; + uint32_t vdpa_queue_num; + struct rte_vdpa_device *vdpa_dev; + int did = -1; + int ret = 0; + + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); + if (!vsocket) { + RTE_LOG(ERR, VHOST_CONFIG, + "socket file %s is not registered yet.\n", path); + ret = -1; + goto unlock_exit; + } + + did = vsocket->vdpa_dev_id; + vdpa_dev = rte_vdpa_get_device(did); + if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) { + *queue_num = VHOST_MAX_QUEUE_PAIRS; + goto unlock_exit; + } + + if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to get vdpa queue number " + "for socket file %s.\n", path); + ret = -1; + goto unlock_exit; + } + + *queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num); + +unlock_exit: + pthread_mutex_unlock(&vhost_user.mutex); + return ret; +} + +static void +vhost_user_socket_mem_free(struct vhost_user_socket *vsocket) +{ + if (vsocket && vsocket->path) { + free(vsocket->path); + vsocket->path = NULL; + } + + if (vsocket) { + free(vsocket); + vsocket = NULL; } } @@ -623,7 +867,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags) if (vsocket->path == NULL) { RTE_LOG(ERR, VHOST_CONFIG, "error: failed to copy socket path string\n"); - free(vsocket); + vhost_user_socket_mem_free(vsocket); goto out; } TAILQ_INIT(&vsocket->conn_list); @@ -634,6 +878,16 @@ rte_vhost_driver_register(const char *path, uint64_t flags) goto out_free; } vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY; + vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT; + vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT; + + if (vsocket->dequeue_zero_copy && + (flags & RTE_VHOST_USER_IOMMU_SUPPORT)) { + RTE_LOG(ERR, VHOST_CONFIG, + "error: enabling dequeue zero copy and IOMMU features " + "simultaneously is not supported\n"); + goto out_mutex; + } /* * Set the supported features correctly for the builtin vhost-user @@ -647,20 +901,78 @@ rte_vhost_driver_register(const char *path, uint64_t flags) * rte_vhost_driver_set_features(), which will overwrite following * two values. */ + vsocket->use_builtin_virtio_net = true; vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES; vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES; + vsocket->protocol_features = VHOST_USER_PROTOCOL_FEATURES; + + /* + * Dequeue zero copy can't assure descriptors returned in order. + * Also, it requires that the guest memory is populated, which is + * not compatible with postcopy. + */ + if (vsocket->dequeue_zero_copy) { + if (vsocket->extbuf) { + RTE_LOG(ERR, VHOST_CONFIG, + "error: zero copy is incompatible with external buffers\n"); + ret = -1; + goto out_mutex; + } + if (vsocket->linearbuf) { + RTE_LOG(ERR, VHOST_CONFIG, + "error: zero copy is incompatible with linear buffers\n"); + ret = -1; + goto out_mutex; + } + vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER); + vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER); + + RTE_LOG(INFO, VHOST_CONFIG, + "Dequeue zero copy requested, disabling postcopy support\n"); + vsocket->protocol_features &= + ~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT); + } + + /* + * We'll not be able to receive a buffer from guest in linear mode + * without external buffer if it will not fit in a single mbuf, which is + * likely if segmentation offloading enabled. + */ + if (vsocket->linearbuf && !vsocket->extbuf) { + uint64_t seg_offload_features = + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_UFO); + + RTE_LOG(INFO, VHOST_CONFIG, + "Linear buffers requested without external buffers, " + "disabling host segmentation offloading support\n"); + vsocket->supported_features &= ~seg_offload_features; + vsocket->features &= ~seg_offload_features; + } if (!(flags & RTE_VHOST_USER_IOMMU_SUPPORT)) { vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM); vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM); } + if (!(flags & RTE_VHOST_USER_POSTCOPY_SUPPORT)) { + vsocket->protocol_features &= + ~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT); + } else { +#ifndef RTE_LIBRTE_VHOST_POSTCOPY + RTE_LOG(ERR, VHOST_CONFIG, + "Postcopy requested but not compiled\n"); + ret = -1; + goto out_mutex; +#endif + } + if ((flags & RTE_VHOST_USER_CLIENT) != 0) { vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT); if (vsocket->reconnect && reconn_tid == 0) { - if (vhost_user_reconnect_init() < 0) { + if (vhost_user_reconnect_init() != 0) goto out_mutex; - } } } else { vsocket->is_server = true; @@ -681,8 +993,7 @@ out_mutex: "error: failed to destroy connection mutex\n"); } out_free: - free(vsocket->path); - free(vsocket); + vhost_user_socket_mem_free(vsocket); out: pthread_mutex_unlock(&vhost_user.mutex); @@ -723,27 +1034,35 @@ rte_vhost_driver_unregister(const char *path) int count; struct vhost_user_connection *conn, *next; + if (path == NULL) + return -1; + +again: pthread_mutex_lock(&vhost_user.mutex); for (i = 0; i < vhost_user.vsocket_cnt; i++) { struct vhost_user_socket *vsocket = vhost_user.vsockets[i]; if (!strcmp(vsocket->path, path)) { - if (vsocket->is_server) { - fdset_del(&vhost_user.fdset, vsocket->socket_fd); - close(vsocket->socket_fd); - unlink(path); - } else if (vsocket->reconnect) { - vhost_user_remove_reconnect(vsocket); - } - pthread_mutex_lock(&vsocket->conn_mutex); for (conn = TAILQ_FIRST(&vsocket->conn_list); conn != NULL; conn = next) { next = TAILQ_NEXT(conn, next); - fdset_del(&vhost_user.fdset, conn->connfd); + /* + * If r/wcb is executing, release the + * conn_mutex lock, and try again since + * the r/wcb may use the conn_mutex lock. + */ + if (fdset_try_del(&vhost_user.fdset, + conn->connfd) == -1) { + pthread_mutex_unlock( + &vsocket->conn_mutex); + pthread_mutex_unlock(&vhost_user.mutex); + goto again; + } + RTE_LOG(INFO, VHOST_CONFIG, "free connfd = %d for device '%s'\n", conn->connfd, path); @@ -754,9 +1073,17 @@ rte_vhost_driver_unregister(const char *path) } pthread_mutex_unlock(&vsocket->conn_mutex); + if (vsocket->is_server) { + fdset_del(&vhost_user.fdset, + vsocket->socket_fd); + close(vsocket->socket_fd); + unlink(path); + } else if (vsocket->reconnect) { + vhost_user_remove_reconnect(vsocket); + } + pthread_mutex_destroy(&vsocket->conn_mutex); - free(vsocket->path); - free(vsocket); + vhost_user_socket_mem_free(vsocket); count = --vhost_user.vsocket_cnt; vhost_user.vsockets[i] = vhost_user.vsockets[count]; @@ -815,11 +1142,26 @@ rte_vhost_driver_start(const char *path) return -1; if (fdset_tid == 0) { - int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch, - &vhost_user.fdset); - if (ret < 0) + /** + * create a pipe which will be waited by poll and notified to + * rebuild the wait list of poll. + */ + if (fdset_pipe_init(&vhost_user.fdset) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "failed to create pipe for vhost fdset\n"); + return -1; + } + + int ret = rte_ctrl_thread_create(&fdset_tid, + "vhost-events", NULL, fdset_event_dispatch, + &vhost_user.fdset); + if (ret != 0) { RTE_LOG(ERR, VHOST_CONFIG, "failed to create fdset handling thread"); + + fdset_pipe_uninit(&vhost_user.fdset); + return -1; + } } if (vsocket->is_server)