X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fsocket.c;h=73e1dca95e1d8685416a8ffdfe58d2bfdf7f1ca7;hb=b46a99c6006ab8edf098e4ac351d366336f6ac71;hp=6eec427e466401529f4be17b7c4fe40cd6d3907d;hpb=dec7b1884a555c941110526cc6cb9dfa08fd51b9;p=dpdk.git diff --git a/lib/librte_vhost/socket.c b/lib/librte_vhost/socket.c index 6eec427e46..73e1dca95e 100644 --- a/lib/librte_vhost/socket.c +++ b/lib/librte_vhost/socket.c @@ -40,6 +40,9 @@ struct vhost_user_socket { bool dequeue_zero_copy; bool iommu_support; bool use_builtin_virtio_net; + bool extbuf; + bool linearbuf; + bool async_copy; /* * The "supported_features" indicates the feature bits the @@ -51,12 +54,9 @@ struct vhost_user_socket { uint64_t supported_features; uint64_t features; - /* - * Device id to identify a specific backend device. - * It's set to -1 for the default software implementation. - * If valid, one socket can have 1 connection only. - */ - int vdpa_dev_id; + uint64_t protocol_features; + + struct rte_vdpa_device *vdpa_dev; struct vhost_device_ops const *notify_ops; }; @@ -88,24 +88,30 @@ static struct vhost_user vhost_user = { .fdset = { .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} }, .fd_mutex = PTHREAD_MUTEX_INITIALIZER, + .fd_pooling_mutex = PTHREAD_MUTEX_INITIALIZER, .num = 0 }, .vsocket_cnt = 0, .mutex = PTHREAD_MUTEX_INITIALIZER, }; -/* return bytes# of read on success or negative val on failure. */ +/* + * return bytes# of read on success or negative val on failure. Update fdnum + * with number of fds read. + */ int -read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) +read_fd_message(int sockfd, char *buf, int buflen, int *fds, int max_fds, + int *fd_num) { struct iovec iov; struct msghdr msgh; - size_t fdsize = fd_num * sizeof(int); - char control[CMSG_SPACE(fdsize)]; + char control[CMSG_SPACE(max_fds * sizeof(int))]; struct cmsghdr *cmsg; int got_fds = 0; int ret; + *fd_num = 0; + memset(&msgh, 0, sizeof(msgh)); iov.iov_base = buf; iov.iov_len = buflen; @@ -117,12 +123,13 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) ret = recvmsg(sockfd, &msgh, 0); if (ret <= 0) { - RTE_LOG(ERR, VHOST_CONFIG, "recvmsg failed\n"); + if (ret) + VHOST_LOG_CONFIG(ERR, "recvmsg failed\n"); return ret; } if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) { - RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n"); + VHOST_LOG_CONFIG(ERR, "truncated msg\n"); return -1; } @@ -131,13 +138,14 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) if ((cmsg->cmsg_level == SOL_SOCKET) && (cmsg->cmsg_type == SCM_RIGHTS)) { got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int); + *fd_num = got_fds; memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int)); break; } } /* Clear out unused file descriptors */ - while (got_fds < fd_num) + while (got_fds < max_fds) fds[got_fds++] = -1; return ret; @@ -166,7 +174,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) msgh.msg_controllen = sizeof(control); cmsg = CMSG_FIRSTHDR(&msgh); if (cmsg == NULL) { - RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n"); + VHOST_LOG_CONFIG(ERR, "cmsg == NULL\n"); errno = EINVAL; return -1; } @@ -184,7 +192,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num) } while (ret < 0 && errno == EINTR); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, "sendmsg error\n"); + VHOST_LOG_CONFIG(ERR, "sendmsg error\n"); return ret; } @@ -198,6 +206,10 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) size_t size; struct vhost_user_connection *conn; int ret; + struct virtio_net *dev; + + if (vsocket == NULL) + return; conn = malloc(sizeof(*conn)); if (conn == NULL) { @@ -215,20 +227,33 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net); - vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id); + vhost_attach_vdpa_device(vid, vsocket->vdpa_dev); if (vsocket->dequeue_zero_copy) vhost_enable_dequeue_zero_copy(vid); - RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid); + if (vsocket->extbuf) + vhost_enable_extbuf(vid); + + if (vsocket->linearbuf) + vhost_enable_linearbuf(vid); + + if (vsocket->async_copy) { + dev = get_device(vid); + + if (dev) + dev->async_copy = 1; + } + + VHOST_LOG_CONFIG(INFO, "new device, handle is %d\n", vid); if (vsocket->notify_ops->new_connection) { ret = vsocket->notify_ops->new_connection(vid); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to add vhost user connection with fd %d\n", fd); - goto err; + goto err_cleanup; } } @@ -238,14 +263,14 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb, NULL, conn); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to add fd %d into vhost server fdset\n", fd); if (vsocket->notify_ops->destroy_connection) vsocket->notify_ops->destroy_connection(conn->vid); - goto err; + goto err_cleanup; } pthread_mutex_lock(&vsocket->conn_mutex); @@ -255,6 +280,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket) fdset_pipe_notify(&vhost_user.fdset); return; +err_cleanup: + vhost_destroy_device(vid); err: free(conn); close(fd); @@ -270,7 +297,7 @@ vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused) if (fd < 0) return; - RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd); + VHOST_LOG_CONFIG(INFO, "new vhost user connection is %d\n", fd); vhost_user_add_connection(fd, vsocket); } @@ -283,23 +310,29 @@ vhost_user_read_cb(int connfd, void *dat, int *remove) ret = vhost_user_msg_handler(conn->vid, connfd); if (ret < 0) { + struct virtio_net *dev = get_device(conn->vid); + close(connfd); *remove = 1; - vhost_destroy_device(conn->vid); + + if (dev) + vhost_destroy_device_notify(dev); if (vsocket->notify_ops->destroy_connection) vsocket->notify_ops->destroy_connection(conn->vid); - pthread_mutex_lock(&vsocket->conn_mutex); - TAILQ_REMOVE(&vsocket->conn_list, conn, next); - pthread_mutex_unlock(&vsocket->conn_mutex); - - free(conn); + vhost_destroy_device(conn->vid); if (vsocket->reconnect) { create_unix_socket(vsocket); vhost_user_start_client(vsocket); } + + pthread_mutex_lock(&vsocket->conn_mutex); + TAILQ_REMOVE(&vsocket->conn_list, conn, next); + pthread_mutex_unlock(&vsocket->conn_mutex); + + free(conn); } } @@ -312,11 +345,11 @@ create_unix_socket(struct vhost_user_socket *vsocket) fd = socket(AF_UNIX, SOCK_STREAM, 0); if (fd < 0) return -1; - RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n", + VHOST_LOG_CONFIG(INFO, "vhost-user %s: socket created, fd: %d\n", vsocket->is_server ? "server" : "client", fd); if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "vhost-user: can't set nonblocking mode for socket, fd: " "%d (%s)\n", fd, strerror(errno)); close(fd); @@ -351,12 +384,12 @@ vhost_user_start_server(struct vhost_user_socket *vsocket) */ ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un)); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to bind to %s: %s; remove it and try again\n", path, strerror(errno)); goto err; } - RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path); + VHOST_LOG_CONFIG(INFO, "bind to %s\n", path); ret = listen(fd, MAX_VIRTIO_BACKLOG); if (ret < 0) @@ -365,7 +398,7 @@ vhost_user_start_server(struct vhost_user_socket *vsocket) ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection, NULL, vsocket); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to add listen fd %d to vhost server fdset\n", fd); goto err; @@ -406,12 +439,12 @@ vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz) flags = fcntl(fd, F_GETFL, 0); if (flags < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "can't get flags for connfd %d\n", fd); return -2; } if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "can't disable nonblocking on fd %d\n", fd); return -2; } @@ -440,7 +473,7 @@ vhost_user_client_reconnect(void *arg __rte_unused) sizeof(reconn->un)); if (ret == -2) { close(reconn->fd); - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "reconnection for fd %d failed\n", reconn->fd); goto remove_fd; @@ -448,7 +481,7 @@ vhost_user_client_reconnect(void *arg __rte_unused) if (ret == -1) continue; - RTE_LOG(INFO, VHOST_CONFIG, + VHOST_LOG_CONFIG(INFO, "%s: connected\n", reconn->vsocket->path); vhost_user_add_connection(reconn->fd, reconn->vsocket); remove_fd: @@ -467,31 +500,22 @@ static int vhost_user_reconnect_init(void) { int ret; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; ret = pthread_mutex_init(&reconn_list.mutex, NULL); if (ret < 0) { - RTE_LOG(ERR, VHOST_CONFIG, "failed to initialize mutex"); + VHOST_LOG_CONFIG(ERR, "failed to initialize mutex"); return ret; } TAILQ_INIT(&reconn_list.head); - ret = pthread_create(&reconn_tid, NULL, + ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL, vhost_user_client_reconnect, NULL); if (ret != 0) { - RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread"); + VHOST_LOG_CONFIG(ERR, "failed to create reconnect thread"); if (pthread_mutex_destroy(&reconn_list.mutex)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to destroy reconnect mutex"); } - } else { - snprintf(thread_name, sizeof(thread_name), - "vhost-reconn"); - - if (rte_thread_setname(reconn_tid, thread_name)) { - RTE_LOG(DEBUG, VHOST_CONFIG, - "failed to set reconnect thread name"); - } } return ret; @@ -512,7 +536,7 @@ vhost_user_start_client(struct vhost_user_socket *vsocket) return 0; } - RTE_LOG(WARNING, VHOST_CONFIG, + VHOST_LOG_CONFIG(WARNING, "failed to connect to %s: %s\n", path, strerror(errno)); @@ -521,10 +545,10 @@ vhost_user_start_client(struct vhost_user_socket *vsocket) return -1; } - RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path); + VHOST_LOG_CONFIG(INFO, "%s: reconnecting...\n", path); reconn = malloc(sizeof(*reconn)); if (reconn == NULL) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to allocate memory for reconnect\n"); close(fd); return -1; @@ -544,6 +568,9 @@ find_vhost_user_socket(const char *path) { int i; + if (path == NULL) + return NULL; + for (i = 0; i < vhost_user.vsocket_cnt; i++) { struct vhost_user_socket *vsocket = vhost_user.vsockets[i]; @@ -555,17 +582,18 @@ find_vhost_user_socket(const char *path) } int -rte_vhost_driver_attach_vdpa_device(const char *path, int did) +rte_vhost_driver_attach_vdpa_device(const char *path, + struct rte_vdpa_device *dev) { struct vhost_user_socket *vsocket; - if (rte_vdpa_get_device(did) == NULL) + if (dev == NULL || path == NULL) return -1; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) - vsocket->vdpa_dev_id = did; + vsocket->vdpa_dev = dev; pthread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; @@ -579,25 +607,25 @@ rte_vhost_driver_detach_vdpa_device(const char *path) pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) - vsocket->vdpa_dev_id = -1; + vsocket->vdpa_dev = NULL; pthread_mutex_unlock(&vhost_user.mutex); return vsocket ? 0 : -1; } -int -rte_vhost_driver_get_vdpa_device_id(const char *path) +struct rte_vdpa_device * +rte_vhost_driver_get_vdpa_device(const char *path) { struct vhost_user_socket *vsocket; - int did = -1; + struct rte_vdpa_device *dev = NULL; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (vsocket) - did = vsocket->vdpa_dev_id; + dev = vsocket->vdpa_dev; pthread_mutex_unlock(&vhost_user.mutex); - return did; + return dev; } int @@ -670,27 +698,25 @@ rte_vhost_driver_get_features(const char *path, uint64_t *features) struct vhost_user_socket *vsocket; uint64_t vdpa_features; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret = 0; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (!vsocket) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "socket file %s is not registered yet.\n", path); ret = -1; goto unlock_exit; } - did = vsocket->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev || !vdpa_dev->ops->get_features) { + vdpa_dev = vsocket->vdpa_dev; + if (!vdpa_dev) { *features = vsocket->features; goto unlock_exit; } - if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + if (vdpa_dev->ops->get_features(vdpa_dev, &vdpa_features) < 0) { + VHOST_LOG_CONFIG(ERR, "failed to get vdpa features " "for socket file %s.\n", path); ret = -1; @@ -704,6 +730,20 @@ unlock_exit: return ret; } +int +rte_vhost_driver_set_protocol_features(const char *path, + uint64_t protocol_features) +{ + struct vhost_user_socket *vsocket; + + pthread_mutex_lock(&vhost_user.mutex); + vsocket = find_vhost_user_socket(path); + if (vsocket) + vsocket->protocol_features = protocol_features; + pthread_mutex_unlock(&vhost_user.mutex); + return vsocket ? 0 : -1; +} + int rte_vhost_driver_get_protocol_features(const char *path, uint64_t *protocol_features) @@ -711,35 +751,33 @@ rte_vhost_driver_get_protocol_features(const char *path, struct vhost_user_socket *vsocket; uint64_t vdpa_protocol_features; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret = 0; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (!vsocket) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "socket file %s is not registered yet.\n", path); ret = -1; goto unlock_exit; } - did = vsocket->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) { - *protocol_features = VHOST_USER_PROTOCOL_FEATURES; + vdpa_dev = vsocket->vdpa_dev; + if (!vdpa_dev) { + *protocol_features = vsocket->protocol_features; goto unlock_exit; } - if (vdpa_dev->ops->get_protocol_features(did, + if (vdpa_dev->ops->get_protocol_features(vdpa_dev, &vdpa_protocol_features) < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to get vdpa protocol features " "for socket file %s.\n", path); ret = -1; goto unlock_exit; } - *protocol_features = VHOST_USER_PROTOCOL_FEATURES + *protocol_features = vsocket->protocol_features & vdpa_protocol_features; unlock_exit: @@ -753,27 +791,25 @@ rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num) struct vhost_user_socket *vsocket; uint32_t vdpa_queue_num; struct rte_vdpa_device *vdpa_dev; - int did = -1; int ret = 0; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); if (!vsocket) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "socket file %s is not registered yet.\n", path); ret = -1; goto unlock_exit; } - did = vsocket->vdpa_dev_id; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) { + vdpa_dev = vsocket->vdpa_dev; + if (!vdpa_dev) { *queue_num = VHOST_MAX_QUEUE_PAIRS; goto unlock_exit; } - if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + if (vdpa_dev->ops->get_queue_num(vdpa_dev, &vdpa_queue_num) < 0) { + VHOST_LOG_CONFIG(ERR, "failed to get vdpa queue number " "for socket file %s.\n", path); ret = -1; @@ -787,6 +823,20 @@ unlock_exit: return ret; } +static void +vhost_user_socket_mem_free(struct vhost_user_socket *vsocket) +{ + if (vsocket && vsocket->path) { + free(vsocket->path); + vsocket->path = NULL; + } + + if (vsocket) { + free(vsocket); + vsocket = NULL; + } +} + /* * Register a new vhost-user socket; here we could act as server * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag @@ -804,7 +854,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags) pthread_mutex_lock(&vhost_user.mutex); if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "error: the number of vhost sockets reaches maximum\n"); goto out; } @@ -815,19 +865,41 @@ rte_vhost_driver_register(const char *path, uint64_t flags) memset(vsocket, 0, sizeof(struct vhost_user_socket)); vsocket->path = strdup(path); if (vsocket->path == NULL) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "error: failed to copy socket path string\n"); - free(vsocket); + vhost_user_socket_mem_free(vsocket); goto out; } TAILQ_INIT(&vsocket->conn_list); ret = pthread_mutex_init(&vsocket->conn_mutex, NULL); if (ret) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "error: failed to init connection mutex\n"); goto out_free; } + vsocket->vdpa_dev = NULL; vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY; + vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT; + vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT; + + if (vsocket->dequeue_zero_copy && + (flags & RTE_VHOST_USER_IOMMU_SUPPORT)) { + VHOST_LOG_CONFIG(ERR, + "error: enabling dequeue zero copy and IOMMU features " + "simultaneously is not supported\n"); + goto out_mutex; + } + + vsocket->async_copy = flags & RTE_VHOST_USER_ASYNC_COPY; + + if (vsocket->async_copy && + (flags & (RTE_VHOST_USER_IOMMU_SUPPORT | + RTE_VHOST_USER_POSTCOPY_SUPPORT))) { + VHOST_LOG_CONFIG(ERR, "error: enabling async copy and IOMMU " + "or post-copy feature simultaneously is not " + "supported\n"); + goto out_mutex; + } /* * Set the supported features correctly for the builtin vhost-user @@ -844,12 +916,83 @@ rte_vhost_driver_register(const char *path, uint64_t flags) vsocket->use_builtin_virtio_net = true; vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES; vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES; + vsocket->protocol_features = VHOST_USER_PROTOCOL_FEATURES; + + /* + * Dequeue zero copy can't assure descriptors returned in order. + * Also, it requires that the guest memory is populated, which is + * not compatible with postcopy. + */ + if (vsocket->dequeue_zero_copy) { + if (vsocket->extbuf) { + VHOST_LOG_CONFIG(ERR, + "error: zero copy is incompatible with external buffers\n"); + ret = -1; + goto out_mutex; + } + if (vsocket->linearbuf) { + VHOST_LOG_CONFIG(ERR, + "error: zero copy is incompatible with linear buffers\n"); + ret = -1; + goto out_mutex; + } + if ((flags & RTE_VHOST_USER_CLIENT) != 0) { + VHOST_LOG_CONFIG(ERR, + "error: zero copy is incompatible with vhost client mode\n"); + ret = -1; + goto out_mutex; + } + vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER); + vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER); + + VHOST_LOG_CONFIG(INFO, + "Dequeue zero copy requested, disabling postcopy support\n"); + vsocket->protocol_features &= + ~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT); + } + + if (vsocket->async_copy) { + vsocket->supported_features &= ~(1ULL << VHOST_F_LOG_ALL); + vsocket->features &= ~(1ULL << VHOST_F_LOG_ALL); + VHOST_LOG_CONFIG(INFO, + "Logging feature is disabled in async copy mode\n"); + } + + /* + * We'll not be able to receive a buffer from guest in linear mode + * without external buffer if it will not fit in a single mbuf, which is + * likely if segmentation offloading enabled. + */ + if (vsocket->linearbuf && !vsocket->extbuf) { + uint64_t seg_offload_features = + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_UFO); + + VHOST_LOG_CONFIG(INFO, + "Linear buffers requested without external buffers, " + "disabling host segmentation offloading support\n"); + vsocket->supported_features &= ~seg_offload_features; + vsocket->features &= ~seg_offload_features; + } if (!(flags & RTE_VHOST_USER_IOMMU_SUPPORT)) { vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM); vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM); } + if (!(flags & RTE_VHOST_USER_POSTCOPY_SUPPORT)) { + vsocket->protocol_features &= + ~(1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT); + } else { +#ifndef RTE_LIBRTE_VHOST_POSTCOPY + VHOST_LOG_CONFIG(ERR, + "Postcopy requested but not compiled\n"); + ret = -1; + goto out_mutex; +#endif + } + if ((flags & RTE_VHOST_USER_CLIENT) != 0) { vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT); if (vsocket->reconnect && reconn_tid == 0) { @@ -871,12 +1014,11 @@ rte_vhost_driver_register(const char *path, uint64_t flags) out_mutex: if (pthread_mutex_destroy(&vsocket->conn_mutex)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "error: failed to destroy connection mutex\n"); } out_free: - free(vsocket->path); - free(vsocket); + vhost_user_socket_mem_free(vsocket); out: pthread_mutex_unlock(&vhost_user.mutex); @@ -917,28 +1059,37 @@ rte_vhost_driver_unregister(const char *path) int count; struct vhost_user_connection *conn, *next; + if (path == NULL) + return -1; + +again: pthread_mutex_lock(&vhost_user.mutex); for (i = 0; i < vhost_user.vsocket_cnt; i++) { struct vhost_user_socket *vsocket = vhost_user.vsockets[i]; if (!strcmp(vsocket->path, path)) { - if (vsocket->is_server) { - fdset_del(&vhost_user.fdset, vsocket->socket_fd); - close(vsocket->socket_fd); - unlink(path); - } else if (vsocket->reconnect) { - vhost_user_remove_reconnect(vsocket); - } - pthread_mutex_lock(&vsocket->conn_mutex); for (conn = TAILQ_FIRST(&vsocket->conn_list); conn != NULL; conn = next) { next = TAILQ_NEXT(conn, next); - fdset_del(&vhost_user.fdset, conn->connfd); - RTE_LOG(INFO, VHOST_CONFIG, + /* + * If r/wcb is executing, release vsocket's + * conn_mutex and vhost_user's mutex locks, and + * try again since the r/wcb may use the + * conn_mutex and mutex locks. + */ + if (fdset_try_del(&vhost_user.fdset, + conn->connfd) == -1) { + pthread_mutex_unlock( + &vsocket->conn_mutex); + pthread_mutex_unlock(&vhost_user.mutex); + goto again; + } + + VHOST_LOG_CONFIG(INFO, "free connfd = %d for device '%s'\n", conn->connfd, path); close(conn->connfd); @@ -948,9 +1099,26 @@ rte_vhost_driver_unregister(const char *path) } pthread_mutex_unlock(&vsocket->conn_mutex); + if (vsocket->is_server) { + /* + * If r/wcb is executing, release vhost_user's + * mutex lock, and try again since the r/wcb + * may use the mutex lock. + */ + if (fdset_try_del(&vhost_user.fdset, + vsocket->socket_fd) == -1) { + pthread_mutex_unlock(&vhost_user.mutex); + goto again; + } + + close(vsocket->socket_fd); + unlink(path); + } else if (vsocket->reconnect) { + vhost_user_remove_reconnect(vsocket); + } + pthread_mutex_destroy(&vsocket->conn_mutex); - free(vsocket->path); - free(vsocket); + vhost_user_socket_mem_free(vsocket); count = --vhost_user.vsocket_cnt; vhost_user.vsockets[i] = vhost_user.vsockets[count]; @@ -1000,7 +1168,6 @@ rte_vhost_driver_start(const char *path) { struct vhost_user_socket *vsocket; static pthread_t fdset_tid; - char thread_name[RTE_MAX_THREAD_NAME_LEN]; pthread_mutex_lock(&vhost_user.mutex); vsocket = find_vhost_user_socket(path); @@ -1015,27 +1182,20 @@ rte_vhost_driver_start(const char *path) * rebuild the wait list of poll. */ if (fdset_pipe_init(&vhost_user.fdset) < 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to create pipe for vhost fdset\n"); return -1; } - int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch, - &vhost_user.fdset); + int ret = rte_ctrl_thread_create(&fdset_tid, + "vhost-events", NULL, fdset_event_dispatch, + &vhost_user.fdset); if (ret != 0) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "failed to create fdset handling thread"); fdset_pipe_uninit(&vhost_user.fdset); return -1; - } else { - snprintf(thread_name, sizeof(thread_name), - "vhost-events"); - - if (rte_thread_setname(fdset_tid, thread_name)) { - RTE_LOG(DEBUG, VHOST_CONFIG, - "failed to set vhost-event thread name"); - } } }