1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <sys/types.h>
12 #include <sys/socket.h>
14 #include <sys/queue.h>
23 #include "vhost_user.h"
26 TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
29 * Every time rte_vhost_driver_register() is invoked, an associated
30 * vhost_user_socket struct will be created.
32 struct vhost_user_socket {
33 struct vhost_user_connection_list conn_list;
34 pthread_mutex_t conn_mutex;
37 struct sockaddr_un un;
40 bool dequeue_zero_copy;
42 bool use_builtin_virtio_net;
45 * The "supported_features" indicates the feature bits the
46 * vhost driver supports. The "features" indicates the feature
47 * bits after the rte_vhost_driver_features_disable/enable().
48 * It is also the final feature bits used for vhost-user
49 * features negotiation.
51 uint64_t supported_features;
55 * Device id to identify a specific backend device.
56 * It's set to -1 for the default software implementation.
57 * If valid, one socket can have 1 connection only.
61 struct vhost_device_ops const *notify_ops;
64 struct vhost_user_connection {
65 struct vhost_user_socket *vsocket;
69 TAILQ_ENTRY(vhost_user_connection) next;
72 #define MAX_VHOST_SOCKET 1024
74 struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
77 pthread_mutex_t mutex;
80 #define MAX_VIRTIO_BACKLOG 128
82 static void vhost_user_server_new_connection(int fd, void *data, int *remove);
83 static void vhost_user_read_cb(int fd, void *dat, int *remove);
84 static int create_unix_socket(struct vhost_user_socket *vsocket);
85 static int vhost_user_start_client(struct vhost_user_socket *vsocket);
87 static struct vhost_user vhost_user = {
89 .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
90 .fd_mutex = PTHREAD_MUTEX_INITIALIZER,
94 .mutex = PTHREAD_MUTEX_INITIALIZER,
97 /* return bytes# of read on success or negative val on failure. */
99 read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
103 size_t fdsize = fd_num * sizeof(int);
104 char control[CMSG_SPACE(fdsize)];
105 struct cmsghdr *cmsg;
109 memset(&msgh, 0, sizeof(msgh));
111 iov.iov_len = buflen;
115 msgh.msg_control = control;
116 msgh.msg_controllen = sizeof(control);
118 ret = recvmsg(sockfd, &msgh, 0);
120 RTE_LOG(ERR, VHOST_CONFIG, "recvmsg failed\n");
124 if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
125 RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n");
129 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
130 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
131 if ((cmsg->cmsg_level == SOL_SOCKET) &&
132 (cmsg->cmsg_type == SCM_RIGHTS)) {
133 got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
134 memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int));
139 /* Clear out unused file descriptors */
140 while (got_fds < fd_num)
147 send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
152 size_t fdsize = fd_num * sizeof(int);
153 char control[CMSG_SPACE(fdsize)];
154 struct cmsghdr *cmsg;
157 memset(&msgh, 0, sizeof(msgh));
159 iov.iov_len = buflen;
164 if (fds && fd_num > 0) {
165 msgh.msg_control = control;
166 msgh.msg_controllen = sizeof(control);
167 cmsg = CMSG_FIRSTHDR(&msgh);
169 RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n");
173 cmsg->cmsg_len = CMSG_LEN(fdsize);
174 cmsg->cmsg_level = SOL_SOCKET;
175 cmsg->cmsg_type = SCM_RIGHTS;
176 memcpy(CMSG_DATA(cmsg), fds, fdsize);
178 msgh.msg_control = NULL;
179 msgh.msg_controllen = 0;
183 ret = sendmsg(sockfd, &msgh, MSG_NOSIGNAL);
184 } while (ret < 0 && errno == EINTR);
187 RTE_LOG(ERR, VHOST_CONFIG, "sendmsg error\n");
195 vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
199 struct vhost_user_connection *conn;
205 conn = malloc(sizeof(*conn));
211 vid = vhost_new_device();
216 size = strnlen(vsocket->path, PATH_MAX);
217 vhost_set_ifname(vid, vsocket->path, size);
219 vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
221 vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id);
223 if (vsocket->dequeue_zero_copy)
224 vhost_enable_dequeue_zero_copy(vid);
226 RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
228 if (vsocket->notify_ops->new_connection) {
229 ret = vsocket->notify_ops->new_connection(vid);
231 RTE_LOG(ERR, VHOST_CONFIG,
232 "failed to add vhost user connection with fd %d\n",
239 conn->vsocket = vsocket;
241 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
244 RTE_LOG(ERR, VHOST_CONFIG,
245 "failed to add fd %d into vhost server fdset\n",
248 if (vsocket->notify_ops->destroy_connection)
249 vsocket->notify_ops->destroy_connection(conn->vid);
254 pthread_mutex_lock(&vsocket->conn_mutex);
255 TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
256 pthread_mutex_unlock(&vsocket->conn_mutex);
258 fdset_pipe_notify(&vhost_user.fdset);
266 /* call back when there is new vhost-user connection from client */
268 vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
270 struct vhost_user_socket *vsocket = dat;
272 fd = accept(fd, NULL, NULL);
276 RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd);
277 vhost_user_add_connection(fd, vsocket);
281 vhost_user_read_cb(int connfd, void *dat, int *remove)
283 struct vhost_user_connection *conn = dat;
284 struct vhost_user_socket *vsocket = conn->vsocket;
287 ret = vhost_user_msg_handler(conn->vid, connfd);
291 vhost_destroy_device(conn->vid);
293 if (vsocket->notify_ops->destroy_connection)
294 vsocket->notify_ops->destroy_connection(conn->vid);
296 pthread_mutex_lock(&vsocket->conn_mutex);
297 TAILQ_REMOVE(&vsocket->conn_list, conn, next);
298 pthread_mutex_unlock(&vsocket->conn_mutex);
302 if (vsocket->reconnect) {
303 create_unix_socket(vsocket);
304 vhost_user_start_client(vsocket);
310 create_unix_socket(struct vhost_user_socket *vsocket)
313 struct sockaddr_un *un = &vsocket->un;
315 fd = socket(AF_UNIX, SOCK_STREAM, 0);
318 RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
319 vsocket->is_server ? "server" : "client", fd);
321 if (!vsocket->is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
322 RTE_LOG(ERR, VHOST_CONFIG,
323 "vhost-user: can't set nonblocking mode for socket, fd: "
324 "%d (%s)\n", fd, strerror(errno));
329 memset(un, 0, sizeof(*un));
330 un->sun_family = AF_UNIX;
331 strncpy(un->sun_path, vsocket->path, sizeof(un->sun_path));
332 un->sun_path[sizeof(un->sun_path) - 1] = '\0';
334 vsocket->socket_fd = fd;
339 vhost_user_start_server(struct vhost_user_socket *vsocket)
342 int fd = vsocket->socket_fd;
343 const char *path = vsocket->path;
346 * bind () may fail if the socket file with the same name already
347 * exists. But the library obviously should not delete the file
348 * provided by the user, since we can not be sure that it is not
349 * being used by other applications. Moreover, many applications form
350 * socket names based on user input, which is prone to errors.
352 * The user must ensure that the socket does not exist before
353 * registering the vhost driver in server mode.
355 ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
357 RTE_LOG(ERR, VHOST_CONFIG,
358 "failed to bind to %s: %s; remove it and try again\n",
359 path, strerror(errno));
362 RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
364 ret = listen(fd, MAX_VIRTIO_BACKLOG);
368 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
371 RTE_LOG(ERR, VHOST_CONFIG,
372 "failed to add listen fd %d to vhost server fdset\n",
384 struct vhost_user_reconnect {
385 struct sockaddr_un un;
387 struct vhost_user_socket *vsocket;
389 TAILQ_ENTRY(vhost_user_reconnect) next;
392 TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
393 struct vhost_user_reconnect_list {
394 struct vhost_user_reconnect_tailq_list head;
395 pthread_mutex_t mutex;
398 static struct vhost_user_reconnect_list reconn_list;
399 static pthread_t reconn_tid;
402 vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
406 ret = connect(fd, un, sz);
407 if (ret < 0 && errno != EISCONN)
410 flags = fcntl(fd, F_GETFL, 0);
412 RTE_LOG(ERR, VHOST_CONFIG,
413 "can't get flags for connfd %d\n", fd);
416 if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
417 RTE_LOG(ERR, VHOST_CONFIG,
418 "can't disable nonblocking on fd %d\n", fd);
425 vhost_user_client_reconnect(void *arg __rte_unused)
428 struct vhost_user_reconnect *reconn, *next;
431 pthread_mutex_lock(&reconn_list.mutex);
434 * An equal implementation of TAILQ_FOREACH_SAFE,
435 * which does not exist on all platforms.
437 for (reconn = TAILQ_FIRST(&reconn_list.head);
438 reconn != NULL; reconn = next) {
439 next = TAILQ_NEXT(reconn, next);
441 ret = vhost_user_connect_nonblock(reconn->fd,
442 (struct sockaddr *)&reconn->un,
446 RTE_LOG(ERR, VHOST_CONFIG,
447 "reconnection for fd %d failed\n",
454 RTE_LOG(INFO, VHOST_CONFIG,
455 "%s: connected\n", reconn->vsocket->path);
456 vhost_user_add_connection(reconn->fd, reconn->vsocket);
458 TAILQ_REMOVE(&reconn_list.head, reconn, next);
462 pthread_mutex_unlock(&reconn_list.mutex);
470 vhost_user_reconnect_init(void)
474 ret = pthread_mutex_init(&reconn_list.mutex, NULL);
476 RTE_LOG(ERR, VHOST_CONFIG, "failed to initialize mutex");
479 TAILQ_INIT(&reconn_list.head);
481 ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
482 vhost_user_client_reconnect, NULL);
484 RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
485 if (pthread_mutex_destroy(&reconn_list.mutex)) {
486 RTE_LOG(ERR, VHOST_CONFIG,
487 "failed to destroy reconnect mutex");
495 vhost_user_start_client(struct vhost_user_socket *vsocket)
498 int fd = vsocket->socket_fd;
499 const char *path = vsocket->path;
500 struct vhost_user_reconnect *reconn;
502 ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&vsocket->un,
503 sizeof(vsocket->un));
505 vhost_user_add_connection(fd, vsocket);
509 RTE_LOG(WARNING, VHOST_CONFIG,
510 "failed to connect to %s: %s\n",
511 path, strerror(errno));
513 if (ret == -2 || !vsocket->reconnect) {
518 RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path);
519 reconn = malloc(sizeof(*reconn));
520 if (reconn == NULL) {
521 RTE_LOG(ERR, VHOST_CONFIG,
522 "failed to allocate memory for reconnect\n");
526 reconn->un = vsocket->un;
528 reconn->vsocket = vsocket;
529 pthread_mutex_lock(&reconn_list.mutex);
530 TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
531 pthread_mutex_unlock(&reconn_list.mutex);
536 static struct vhost_user_socket *
537 find_vhost_user_socket(const char *path)
541 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
542 struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
544 if (!strcmp(vsocket->path, path))
552 rte_vhost_driver_attach_vdpa_device(const char *path, int did)
554 struct vhost_user_socket *vsocket;
556 if (rte_vdpa_get_device(did) == NULL)
559 pthread_mutex_lock(&vhost_user.mutex);
560 vsocket = find_vhost_user_socket(path);
562 vsocket->vdpa_dev_id = did;
563 pthread_mutex_unlock(&vhost_user.mutex);
565 return vsocket ? 0 : -1;
569 rte_vhost_driver_detach_vdpa_device(const char *path)
571 struct vhost_user_socket *vsocket;
573 pthread_mutex_lock(&vhost_user.mutex);
574 vsocket = find_vhost_user_socket(path);
576 vsocket->vdpa_dev_id = -1;
577 pthread_mutex_unlock(&vhost_user.mutex);
579 return vsocket ? 0 : -1;
583 rte_vhost_driver_get_vdpa_device_id(const char *path)
585 struct vhost_user_socket *vsocket;
588 pthread_mutex_lock(&vhost_user.mutex);
589 vsocket = find_vhost_user_socket(path);
591 did = vsocket->vdpa_dev_id;
592 pthread_mutex_unlock(&vhost_user.mutex);
598 rte_vhost_driver_disable_features(const char *path, uint64_t features)
600 struct vhost_user_socket *vsocket;
602 pthread_mutex_lock(&vhost_user.mutex);
603 vsocket = find_vhost_user_socket(path);
605 /* Note that use_builtin_virtio_net is not affected by this function
606 * since callers may want to selectively disable features of the
607 * built-in vhost net device backend.
611 vsocket->features &= ~features;
612 pthread_mutex_unlock(&vhost_user.mutex);
614 return vsocket ? 0 : -1;
618 rte_vhost_driver_enable_features(const char *path, uint64_t features)
620 struct vhost_user_socket *vsocket;
622 pthread_mutex_lock(&vhost_user.mutex);
623 vsocket = find_vhost_user_socket(path);
625 if ((vsocket->supported_features & features) != features) {
627 * trying to enable features the driver doesn't
630 pthread_mutex_unlock(&vhost_user.mutex);
633 vsocket->features |= features;
635 pthread_mutex_unlock(&vhost_user.mutex);
637 return vsocket ? 0 : -1;
641 rte_vhost_driver_set_features(const char *path, uint64_t features)
643 struct vhost_user_socket *vsocket;
645 pthread_mutex_lock(&vhost_user.mutex);
646 vsocket = find_vhost_user_socket(path);
648 vsocket->supported_features = features;
649 vsocket->features = features;
651 /* Anyone setting feature bits is implementing their own vhost
654 vsocket->use_builtin_virtio_net = false;
656 pthread_mutex_unlock(&vhost_user.mutex);
658 return vsocket ? 0 : -1;
662 rte_vhost_driver_get_features(const char *path, uint64_t *features)
664 struct vhost_user_socket *vsocket;
665 uint64_t vdpa_features;
666 struct rte_vdpa_device *vdpa_dev;
670 pthread_mutex_lock(&vhost_user.mutex);
671 vsocket = find_vhost_user_socket(path);
673 RTE_LOG(ERR, VHOST_CONFIG,
674 "socket file %s is not registered yet.\n", path);
679 did = vsocket->vdpa_dev_id;
680 vdpa_dev = rte_vdpa_get_device(did);
681 if (!vdpa_dev || !vdpa_dev->ops->get_features) {
682 *features = vsocket->features;
686 if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) {
687 RTE_LOG(ERR, VHOST_CONFIG,
688 "failed to get vdpa features "
689 "for socket file %s.\n", path);
694 *features = vsocket->features & vdpa_features;
697 pthread_mutex_unlock(&vhost_user.mutex);
702 rte_vhost_driver_get_protocol_features(const char *path,
703 uint64_t *protocol_features)
705 struct vhost_user_socket *vsocket;
706 uint64_t vdpa_protocol_features;
707 struct rte_vdpa_device *vdpa_dev;
711 pthread_mutex_lock(&vhost_user.mutex);
712 vsocket = find_vhost_user_socket(path);
714 RTE_LOG(ERR, VHOST_CONFIG,
715 "socket file %s is not registered yet.\n", path);
720 did = vsocket->vdpa_dev_id;
721 vdpa_dev = rte_vdpa_get_device(did);
722 if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) {
723 *protocol_features = VHOST_USER_PROTOCOL_FEATURES;
727 if (vdpa_dev->ops->get_protocol_features(did,
728 &vdpa_protocol_features) < 0) {
729 RTE_LOG(ERR, VHOST_CONFIG,
730 "failed to get vdpa protocol features "
731 "for socket file %s.\n", path);
736 *protocol_features = VHOST_USER_PROTOCOL_FEATURES
737 & vdpa_protocol_features;
740 pthread_mutex_unlock(&vhost_user.mutex);
745 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
747 struct vhost_user_socket *vsocket;
748 uint32_t vdpa_queue_num;
749 struct rte_vdpa_device *vdpa_dev;
753 pthread_mutex_lock(&vhost_user.mutex);
754 vsocket = find_vhost_user_socket(path);
756 RTE_LOG(ERR, VHOST_CONFIG,
757 "socket file %s is not registered yet.\n", path);
762 did = vsocket->vdpa_dev_id;
763 vdpa_dev = rte_vdpa_get_device(did);
764 if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) {
765 *queue_num = VHOST_MAX_QUEUE_PAIRS;
769 if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) {
770 RTE_LOG(ERR, VHOST_CONFIG,
771 "failed to get vdpa queue number "
772 "for socket file %s.\n", path);
777 *queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num);
780 pthread_mutex_unlock(&vhost_user.mutex);
785 vhost_user_socket_mem_free(struct vhost_user_socket *vsocket)
787 if (vsocket && vsocket->path) {
789 vsocket->path = NULL;
799 * Register a new vhost-user socket; here we could act as server
800 * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
804 rte_vhost_driver_register(const char *path, uint64_t flags)
807 struct vhost_user_socket *vsocket;
812 pthread_mutex_lock(&vhost_user.mutex);
814 if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
815 RTE_LOG(ERR, VHOST_CONFIG,
816 "error: the number of vhost sockets reaches maximum\n");
820 vsocket = malloc(sizeof(struct vhost_user_socket));
823 memset(vsocket, 0, sizeof(struct vhost_user_socket));
824 vsocket->path = strdup(path);
825 if (vsocket->path == NULL) {
826 RTE_LOG(ERR, VHOST_CONFIG,
827 "error: failed to copy socket path string\n");
828 vhost_user_socket_mem_free(vsocket);
831 TAILQ_INIT(&vsocket->conn_list);
832 ret = pthread_mutex_init(&vsocket->conn_mutex, NULL);
834 RTE_LOG(ERR, VHOST_CONFIG,
835 "error: failed to init connection mutex\n");
838 vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
841 * Set the supported features correctly for the builtin vhost-user
844 * Applications know nothing about features the builtin virtio net
845 * driver (virtio_net.c) supports, thus it's not possible for them
846 * to invoke rte_vhost_driver_set_features(). To workaround it, here
847 * we set it unconditionally. If the application want to implement
848 * another vhost-user driver (say SCSI), it should call the
849 * rte_vhost_driver_set_features(), which will overwrite following
852 vsocket->use_builtin_virtio_net = true;
853 vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
854 vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES;
856 /* Dequeue zero copy can't assure descriptors returned in order */
857 if (vsocket->dequeue_zero_copy) {
858 vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER);
859 vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER);
862 if (!(flags & RTE_VHOST_USER_IOMMU_SUPPORT)) {
863 vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
864 vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
867 if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
868 vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
869 if (vsocket->reconnect && reconn_tid == 0) {
870 if (vhost_user_reconnect_init() != 0)
874 vsocket->is_server = true;
876 ret = create_unix_socket(vsocket);
881 vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
883 pthread_mutex_unlock(&vhost_user.mutex);
887 if (pthread_mutex_destroy(&vsocket->conn_mutex)) {
888 RTE_LOG(ERR, VHOST_CONFIG,
889 "error: failed to destroy connection mutex\n");
892 vhost_user_socket_mem_free(vsocket);
894 pthread_mutex_unlock(&vhost_user.mutex);
900 vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
903 struct vhost_user_reconnect *reconn, *next;
905 pthread_mutex_lock(&reconn_list.mutex);
907 for (reconn = TAILQ_FIRST(&reconn_list.head);
908 reconn != NULL; reconn = next) {
909 next = TAILQ_NEXT(reconn, next);
911 if (reconn->vsocket == vsocket) {
912 TAILQ_REMOVE(&reconn_list.head, reconn, next);
919 pthread_mutex_unlock(&reconn_list.mutex);
924 * Unregister the specified vhost socket
927 rte_vhost_driver_unregister(const char *path)
931 struct vhost_user_connection *conn, *next;
933 pthread_mutex_lock(&vhost_user.mutex);
935 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
936 struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
938 if (!strcmp(vsocket->path, path)) {
940 pthread_mutex_lock(&vsocket->conn_mutex);
941 for (conn = TAILQ_FIRST(&vsocket->conn_list);
944 next = TAILQ_NEXT(conn, next);
947 * If r/wcb is executing, release the
948 * conn_mutex lock, and try again since
949 * the r/wcb may use the conn_mutex lock.
951 if (fdset_try_del(&vhost_user.fdset,
952 conn->connfd) == -1) {
953 pthread_mutex_unlock(
954 &vsocket->conn_mutex);
958 RTE_LOG(INFO, VHOST_CONFIG,
959 "free connfd = %d for device '%s'\n",
962 vhost_destroy_device(conn->vid);
963 TAILQ_REMOVE(&vsocket->conn_list, conn, next);
966 pthread_mutex_unlock(&vsocket->conn_mutex);
968 if (vsocket->is_server) {
969 fdset_del(&vhost_user.fdset,
971 close(vsocket->socket_fd);
973 } else if (vsocket->reconnect) {
974 vhost_user_remove_reconnect(vsocket);
977 pthread_mutex_destroy(&vsocket->conn_mutex);
978 vhost_user_socket_mem_free(vsocket);
980 count = --vhost_user.vsocket_cnt;
981 vhost_user.vsockets[i] = vhost_user.vsockets[count];
982 vhost_user.vsockets[count] = NULL;
983 pthread_mutex_unlock(&vhost_user.mutex);
988 pthread_mutex_unlock(&vhost_user.mutex);
994 * Register ops so that we can add/remove device to data core.
997 rte_vhost_driver_callback_register(const char *path,
998 struct vhost_device_ops const * const ops)
1000 struct vhost_user_socket *vsocket;
1002 pthread_mutex_lock(&vhost_user.mutex);
1003 vsocket = find_vhost_user_socket(path);
1005 vsocket->notify_ops = ops;
1006 pthread_mutex_unlock(&vhost_user.mutex);
1008 return vsocket ? 0 : -1;
1011 struct vhost_device_ops const *
1012 vhost_driver_callback_get(const char *path)
1014 struct vhost_user_socket *vsocket;
1016 pthread_mutex_lock(&vhost_user.mutex);
1017 vsocket = find_vhost_user_socket(path);
1018 pthread_mutex_unlock(&vhost_user.mutex);
1020 return vsocket ? vsocket->notify_ops : NULL;
1024 rte_vhost_driver_start(const char *path)
1026 struct vhost_user_socket *vsocket;
1027 static pthread_t fdset_tid;
1029 pthread_mutex_lock(&vhost_user.mutex);
1030 vsocket = find_vhost_user_socket(path);
1031 pthread_mutex_unlock(&vhost_user.mutex);
1036 if (fdset_tid == 0) {
1038 * create a pipe which will be waited by poll and notified to
1039 * rebuild the wait list of poll.
1041 if (fdset_pipe_init(&vhost_user.fdset) < 0) {
1042 RTE_LOG(ERR, VHOST_CONFIG,
1043 "failed to create pipe for vhost fdset\n");
1047 int ret = rte_ctrl_thread_create(&fdset_tid,
1048 "vhost-events", NULL, fdset_event_dispatch,
1051 RTE_LOG(ERR, VHOST_CONFIG,
1052 "failed to create fdset handling thread");
1054 fdset_pipe_uninit(&vhost_user.fdset);
1059 if (vsocket->is_server)
1060 return vhost_user_start_server(vsocket);
1062 return vhost_user_start_client(vsocket);