4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/socket.h>
44 #include <sys/queue.h>
53 #include "vhost_user.h"
56 TAILQ_HEAD(vhost_user_connection_list, vhost_user_connection);
59 * Every time rte_vhost_driver_register() is invoked, an associated
60 * vhost_user_socket struct will be created.
62 struct vhost_user_socket {
63 struct vhost_user_connection_list conn_list;
64 pthread_mutex_t conn_mutex;
69 bool dequeue_zero_copy;
72 * The "supported_features" indicates the feature bits the
73 * vhost driver supports. The "features" indicates the feature
74 * bits after the rte_vhost_driver_features_disable/enable().
75 * It is also the final feature bits used for vhost-user
76 * features negotiation.
78 uint64_t supported_features;
81 struct vhost_device_ops const *notify_ops;
84 struct vhost_user_connection {
85 struct vhost_user_socket *vsocket;
89 TAILQ_ENTRY(vhost_user_connection) next;
92 #define MAX_VHOST_SOCKET 1024
94 struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
97 pthread_mutex_t mutex;
100 #define MAX_VIRTIO_BACKLOG 128
102 static void vhost_user_server_new_connection(int fd, void *data, int *remove);
103 static void vhost_user_read_cb(int fd, void *dat, int *remove);
104 static int vhost_user_create_client(struct vhost_user_socket *vsocket);
106 static struct vhost_user vhost_user = {
108 .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
109 .fd_mutex = PTHREAD_MUTEX_INITIALIZER,
113 .mutex = PTHREAD_MUTEX_INITIALIZER,
116 /* return bytes# of read on success or negative val on failure. */
118 read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
122 size_t fdsize = fd_num * sizeof(int);
123 char control[CMSG_SPACE(fdsize)];
124 struct cmsghdr *cmsg;
127 memset(&msgh, 0, sizeof(msgh));
129 iov.iov_len = buflen;
133 msgh.msg_control = control;
134 msgh.msg_controllen = sizeof(control);
136 ret = recvmsg(sockfd, &msgh, 0);
138 RTE_LOG(ERR, VHOST_CONFIG, "recvmsg failed\n");
142 if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
143 RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n");
147 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
148 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
149 if ((cmsg->cmsg_level == SOL_SOCKET) &&
150 (cmsg->cmsg_type == SCM_RIGHTS)) {
151 memcpy(fds, CMSG_DATA(cmsg), fdsize);
160 send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
165 size_t fdsize = fd_num * sizeof(int);
166 char control[CMSG_SPACE(fdsize)];
167 struct cmsghdr *cmsg;
170 memset(&msgh, 0, sizeof(msgh));
172 iov.iov_len = buflen;
177 if (fds && fd_num > 0) {
178 msgh.msg_control = control;
179 msgh.msg_controllen = sizeof(control);
180 cmsg = CMSG_FIRSTHDR(&msgh);
181 cmsg->cmsg_len = CMSG_LEN(fdsize);
182 cmsg->cmsg_level = SOL_SOCKET;
183 cmsg->cmsg_type = SCM_RIGHTS;
184 memcpy(CMSG_DATA(cmsg), fds, fdsize);
186 msgh.msg_control = NULL;
187 msgh.msg_controllen = 0;
191 ret = sendmsg(sockfd, &msgh, 0);
192 } while (ret < 0 && errno == EINTR);
195 RTE_LOG(ERR, VHOST_CONFIG, "sendmsg error\n");
203 vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
207 struct vhost_user_connection *conn;
210 conn = malloc(sizeof(*conn));
216 vid = vhost_new_device();
223 size = strnlen(vsocket->path, PATH_MAX);
224 vhost_set_ifname(vid, vsocket->path, size);
226 if (vsocket->dequeue_zero_copy)
227 vhost_enable_dequeue_zero_copy(vid);
229 RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
232 conn->vsocket = vsocket;
234 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_read_cb,
240 RTE_LOG(ERR, VHOST_CONFIG,
241 "failed to add fd %d into vhost server fdset\n",
245 pthread_mutex_lock(&vsocket->conn_mutex);
246 TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
247 pthread_mutex_unlock(&vsocket->conn_mutex);
250 /* call back when there is new vhost-user connection from client */
252 vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
254 struct vhost_user_socket *vsocket = dat;
256 fd = accept(fd, NULL, NULL);
260 RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd);
261 vhost_user_add_connection(fd, vsocket);
265 vhost_user_read_cb(int connfd, void *dat, int *remove)
267 struct vhost_user_connection *conn = dat;
268 struct vhost_user_socket *vsocket = conn->vsocket;
271 ret = vhost_user_msg_handler(conn->vid, connfd);
275 vhost_destroy_device(conn->vid);
277 pthread_mutex_lock(&vsocket->conn_mutex);
278 TAILQ_REMOVE(&vsocket->conn_list, conn, next);
279 pthread_mutex_unlock(&vsocket->conn_mutex);
283 if (vsocket->reconnect)
284 vhost_user_create_client(vsocket);
289 create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server)
293 fd = socket(AF_UNIX, SOCK_STREAM, 0);
296 RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
297 is_server ? "server" : "client", fd);
299 if (!is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
300 RTE_LOG(ERR, VHOST_CONFIG,
301 "vhost-user: can't set nonblocking mode for socket, fd: "
302 "%d (%s)\n", fd, strerror(errno));
307 memset(un, 0, sizeof(*un));
308 un->sun_family = AF_UNIX;
309 strncpy(un->sun_path, path, sizeof(un->sun_path));
310 un->sun_path[sizeof(un->sun_path) - 1] = '\0';
316 vhost_user_create_server(struct vhost_user_socket *vsocket)
320 struct sockaddr_un un;
321 const char *path = vsocket->path;
323 fd = create_unix_socket(path, &un, vsocket->is_server);
327 ret = bind(fd, (struct sockaddr *)&un, sizeof(un));
329 RTE_LOG(ERR, VHOST_CONFIG,
330 "failed to bind to %s: %s; remove it and try again\n",
331 path, strerror(errno));
334 RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
336 ret = listen(fd, MAX_VIRTIO_BACKLOG);
340 vsocket->listenfd = fd;
341 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
344 RTE_LOG(ERR, VHOST_CONFIG,
345 "failed to add listen fd %d to vhost server fdset\n",
357 struct vhost_user_reconnect {
358 struct sockaddr_un un;
360 struct vhost_user_socket *vsocket;
362 TAILQ_ENTRY(vhost_user_reconnect) next;
365 TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
366 struct vhost_user_reconnect_list {
367 struct vhost_user_reconnect_tailq_list head;
368 pthread_mutex_t mutex;
371 static struct vhost_user_reconnect_list reconn_list;
372 static pthread_t reconn_tid;
375 vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
379 ret = connect(fd, un, sz);
380 if (ret < 0 && errno != EISCONN)
383 flags = fcntl(fd, F_GETFL, 0);
385 RTE_LOG(ERR, VHOST_CONFIG,
386 "can't get flags for connfd %d\n", fd);
389 if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
390 RTE_LOG(ERR, VHOST_CONFIG,
391 "can't disable nonblocking on fd %d\n", fd);
398 vhost_user_client_reconnect(void *arg __rte_unused)
401 struct vhost_user_reconnect *reconn, *next;
404 pthread_mutex_lock(&reconn_list.mutex);
407 * An equal implementation of TAILQ_FOREACH_SAFE,
408 * which does not exist on all platforms.
410 for (reconn = TAILQ_FIRST(&reconn_list.head);
411 reconn != NULL; reconn = next) {
412 next = TAILQ_NEXT(reconn, next);
414 ret = vhost_user_connect_nonblock(reconn->fd,
415 (struct sockaddr *)&reconn->un,
419 RTE_LOG(ERR, VHOST_CONFIG,
420 "reconnection for fd %d failed\n",
427 RTE_LOG(INFO, VHOST_CONFIG,
428 "%s: connected\n", reconn->vsocket->path);
429 vhost_user_add_connection(reconn->fd, reconn->vsocket);
431 TAILQ_REMOVE(&reconn_list.head, reconn, next);
435 pthread_mutex_unlock(&reconn_list.mutex);
443 vhost_user_reconnect_init(void)
447 pthread_mutex_init(&reconn_list.mutex, NULL);
448 TAILQ_INIT(&reconn_list.head);
450 ret = pthread_create(&reconn_tid, NULL,
451 vhost_user_client_reconnect, NULL);
453 RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
459 vhost_user_create_client(struct vhost_user_socket *vsocket)
463 struct sockaddr_un un;
464 const char *path = vsocket->path;
465 struct vhost_user_reconnect *reconn;
467 fd = create_unix_socket(path, &un, vsocket->is_server);
471 ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&un,
474 vhost_user_add_connection(fd, vsocket);
478 RTE_LOG(WARNING, VHOST_CONFIG,
479 "failed to connect to %s: %s\n",
480 path, strerror(errno));
482 if (ret == -2 || !vsocket->reconnect) {
487 RTE_LOG(INFO, VHOST_CONFIG, "%s: reconnecting...\n", path);
488 reconn = malloc(sizeof(*reconn));
489 if (reconn == NULL) {
490 RTE_LOG(ERR, VHOST_CONFIG,
491 "failed to allocate memory for reconnect\n");
497 reconn->vsocket = vsocket;
498 pthread_mutex_lock(&reconn_list.mutex);
499 TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
500 pthread_mutex_unlock(&reconn_list.mutex);
505 static struct vhost_user_socket *
506 find_vhost_user_socket(const char *path)
510 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
511 struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
513 if (!strcmp(vsocket->path, path))
521 rte_vhost_driver_disable_features(const char *path, uint64_t features)
523 struct vhost_user_socket *vsocket;
525 pthread_mutex_lock(&vhost_user.mutex);
526 vsocket = find_vhost_user_socket(path);
528 vsocket->features &= ~features;
529 pthread_mutex_unlock(&vhost_user.mutex);
531 return vsocket ? 0 : -1;
535 rte_vhost_driver_enable_features(const char *path, uint64_t features)
537 struct vhost_user_socket *vsocket;
539 pthread_mutex_lock(&vhost_user.mutex);
540 vsocket = find_vhost_user_socket(path);
542 if ((vsocket->supported_features & features) != features) {
544 * trying to enable features the driver doesn't
547 pthread_mutex_unlock(&vhost_user.mutex);
550 vsocket->features |= features;
552 pthread_mutex_unlock(&vhost_user.mutex);
554 return vsocket ? 0 : -1;
558 rte_vhost_driver_set_features(const char *path, uint64_t features)
560 struct vhost_user_socket *vsocket;
562 pthread_mutex_lock(&vhost_user.mutex);
563 vsocket = find_vhost_user_socket(path);
565 vsocket->supported_features = features;
566 vsocket->features = features;
568 pthread_mutex_unlock(&vhost_user.mutex);
570 return vsocket ? 0 : -1;
574 rte_vhost_driver_get_features(const char *path, uint64_t *features)
576 struct vhost_user_socket *vsocket;
578 pthread_mutex_lock(&vhost_user.mutex);
579 vsocket = find_vhost_user_socket(path);
581 *features = vsocket->features;
582 pthread_mutex_unlock(&vhost_user.mutex);
585 RTE_LOG(ERR, VHOST_CONFIG,
586 "socket file %s is not registered yet.\n", path);
594 * Register a new vhost-user socket; here we could act as server
595 * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
599 rte_vhost_driver_register(const char *path, uint64_t flags)
602 struct vhost_user_socket *vsocket;
607 pthread_mutex_lock(&vhost_user.mutex);
609 if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
610 RTE_LOG(ERR, VHOST_CONFIG,
611 "error: the number of vhost sockets reaches maximum\n");
615 vsocket = malloc(sizeof(struct vhost_user_socket));
618 memset(vsocket, 0, sizeof(struct vhost_user_socket));
619 vsocket->path = strdup(path);
620 TAILQ_INIT(&vsocket->conn_list);
621 pthread_mutex_init(&vsocket->conn_mutex, NULL);
622 vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
625 * Set the supported features correctly for the builtin vhost-user
628 * Applications know nothing about features the builtin virtio net
629 * driver (virtio_net.c) supports, thus it's not possible for them
630 * to invoke rte_vhost_driver_set_features(). To workaround it, here
631 * we set it unconditionally. If the application want to implement
632 * another vhost-user driver (say SCSI), it should call the
633 * rte_vhost_driver_set_features(), which will overwrite following
636 vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
637 vsocket->features = VIRTIO_NET_SUPPORTED_FEATURES;
639 if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
640 vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
641 if (vsocket->reconnect && reconn_tid == 0) {
642 if (vhost_user_reconnect_init() < 0) {
648 ret = vhost_user_create_client(vsocket);
650 vsocket->is_server = true;
651 ret = vhost_user_create_server(vsocket);
659 vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
662 pthread_mutex_unlock(&vhost_user.mutex);
668 vhost_user_remove_reconnect(struct vhost_user_socket *vsocket)
671 struct vhost_user_reconnect *reconn, *next;
673 pthread_mutex_lock(&reconn_list.mutex);
675 for (reconn = TAILQ_FIRST(&reconn_list.head);
676 reconn != NULL; reconn = next) {
677 next = TAILQ_NEXT(reconn, next);
679 if (reconn->vsocket == vsocket) {
680 TAILQ_REMOVE(&reconn_list.head, reconn, next);
687 pthread_mutex_unlock(&reconn_list.mutex);
692 * Unregister the specified vhost socket
695 rte_vhost_driver_unregister(const char *path)
699 struct vhost_user_connection *conn, *next;
701 pthread_mutex_lock(&vhost_user.mutex);
703 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
704 struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
706 if (!strcmp(vsocket->path, path)) {
707 if (vsocket->is_server) {
708 fdset_del(&vhost_user.fdset, vsocket->listenfd);
709 close(vsocket->listenfd);
711 } else if (vsocket->reconnect) {
712 vhost_user_remove_reconnect(vsocket);
715 pthread_mutex_lock(&vsocket->conn_mutex);
716 for (conn = TAILQ_FIRST(&vsocket->conn_list);
719 next = TAILQ_NEXT(conn, next);
721 fdset_del(&vhost_user.fdset, conn->connfd);
722 RTE_LOG(INFO, VHOST_CONFIG,
723 "free connfd = %d for device '%s'\n",
726 vhost_destroy_device(conn->vid);
727 TAILQ_REMOVE(&vsocket->conn_list, conn, next);
730 pthread_mutex_unlock(&vsocket->conn_mutex);
735 count = --vhost_user.vsocket_cnt;
736 vhost_user.vsockets[i] = vhost_user.vsockets[count];
737 vhost_user.vsockets[count] = NULL;
738 pthread_mutex_unlock(&vhost_user.mutex);
743 pthread_mutex_unlock(&vhost_user.mutex);
749 * Register ops so that we can add/remove device to data core.
752 rte_vhost_driver_callback_register(const char *path,
753 struct vhost_device_ops const * const ops)
755 struct vhost_user_socket *vsocket;
757 pthread_mutex_lock(&vhost_user.mutex);
758 vsocket = find_vhost_user_socket(path);
760 vsocket->notify_ops = ops;
761 pthread_mutex_unlock(&vhost_user.mutex);
763 return vsocket ? 0 : -1;
766 struct vhost_device_ops const *
767 vhost_driver_callback_get(const char *path)
769 struct vhost_user_socket *vsocket;
771 pthread_mutex_lock(&vhost_user.mutex);
772 vsocket = find_vhost_user_socket(path);
773 pthread_mutex_unlock(&vhost_user.mutex);
775 return vsocket ? vsocket->notify_ops : NULL;
779 rte_vhost_driver_session_start(void)
781 fdset_event_dispatch(&vhost_user.fdset);