4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
42 #include <sys/socket.h>
44 #include <sys/queue.h>
50 #include <rte_virtio_net.h>
53 #include "vhost-net-user.h"
54 #include "vhost-net.h"
55 #include "virtio-net-user.h"
58 * Every time rte_vhost_driver_register() is invoked, an associated
59 * vhost_user_socket struct will be created.
61 struct vhost_user_socket {
68 struct vhost_user_connection {
69 struct vhost_user_socket *vsocket;
73 #define MAX_VHOST_SOCKET 1024
75 struct vhost_user_socket *vsockets[MAX_VHOST_SOCKET];
78 pthread_mutex_t mutex;
81 #define MAX_VIRTIO_BACKLOG 128
83 static void vhost_user_server_new_connection(int fd, void *data, int *remove);
84 static void vhost_user_msg_handler(int fd, void *dat, int *remove);
85 static int vhost_user_create_client(struct vhost_user_socket *vsocket);
87 static struct vhost_user vhost_user = {
89 .fd = { [0 ... MAX_FDS - 1] = {-1, NULL, NULL, NULL, 0} },
90 .fd_mutex = PTHREAD_MUTEX_INITIALIZER,
94 .mutex = PTHREAD_MUTEX_INITIALIZER,
97 static const char *vhost_message_str[VHOST_USER_MAX] = {
98 [VHOST_USER_NONE] = "VHOST_USER_NONE",
99 [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
100 [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
101 [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
102 [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
103 [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
104 [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
105 [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
106 [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
107 [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
108 [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
109 [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
110 [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
111 [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
112 [VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
113 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
114 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
115 [VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
116 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
117 [VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
120 /* return bytes# of read on success or negative val on failure. */
122 read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
126 size_t fdsize = fd_num * sizeof(int);
127 char control[CMSG_SPACE(fdsize)];
128 struct cmsghdr *cmsg;
131 memset(&msgh, 0, sizeof(msgh));
133 iov.iov_len = buflen;
137 msgh.msg_control = control;
138 msgh.msg_controllen = sizeof(control);
140 ret = recvmsg(sockfd, &msgh, 0);
142 RTE_LOG(ERR, VHOST_CONFIG, "recvmsg failed\n");
146 if (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC)) {
147 RTE_LOG(ERR, VHOST_CONFIG, "truncted msg\n");
151 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
152 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
153 if ((cmsg->cmsg_level == SOL_SOCKET) &&
154 (cmsg->cmsg_type == SCM_RIGHTS)) {
155 memcpy(fds, CMSG_DATA(cmsg), fdsize);
163 /* return bytes# of read on success or negative val on failure. */
165 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
169 ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
170 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
174 if (msg && msg->size) {
175 if (msg->size > sizeof(msg->payload)) {
176 RTE_LOG(ERR, VHOST_CONFIG,
177 "invalid msg size: %d\n", msg->size);
180 ret = read(sockfd, &msg->payload, msg->size);
183 if (ret != (int)msg->size) {
184 RTE_LOG(ERR, VHOST_CONFIG,
185 "read control message failed\n");
194 send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
199 size_t fdsize = fd_num * sizeof(int);
200 char control[CMSG_SPACE(fdsize)];
201 struct cmsghdr *cmsg;
204 memset(&msgh, 0, sizeof(msgh));
206 iov.iov_len = buflen;
211 if (fds && fd_num > 0) {
212 msgh.msg_control = control;
213 msgh.msg_controllen = sizeof(control);
214 cmsg = CMSG_FIRSTHDR(&msgh);
215 cmsg->cmsg_len = CMSG_LEN(fdsize);
216 cmsg->cmsg_level = SOL_SOCKET;
217 cmsg->cmsg_type = SCM_RIGHTS;
218 memcpy(CMSG_DATA(cmsg), fds, fdsize);
220 msgh.msg_control = NULL;
221 msgh.msg_controllen = 0;
225 ret = sendmsg(sockfd, &msgh, 0);
226 } while (ret < 0 && errno == EINTR);
229 RTE_LOG(ERR, VHOST_CONFIG, "sendmsg error\n");
237 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
244 msg->flags &= ~VHOST_USER_VERSION_MASK;
245 msg->flags |= VHOST_USER_VERSION;
246 msg->flags |= VHOST_USER_REPLY_MASK;
248 ret = send_fd_message(sockfd, (char *)msg,
249 VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
256 vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
260 struct vhost_user_connection *conn;
263 conn = malloc(sizeof(*conn));
269 vid = vhost_new_device();
276 size = strnlen(vsocket->path, PATH_MAX);
277 vhost_set_ifname(vid, vsocket->path, size);
279 RTE_LOG(INFO, VHOST_CONFIG, "new device, handle is %d\n", vid);
281 conn->vsocket = vsocket;
283 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_msg_handler,
288 RTE_LOG(ERR, VHOST_CONFIG,
289 "failed to add fd %d into vhost server fdset\n",
294 /* call back when there is new vhost-user connection from client */
296 vhost_user_server_new_connection(int fd, void *dat, int *remove __rte_unused)
298 struct vhost_user_socket *vsocket = dat;
300 fd = accept(fd, NULL, NULL);
304 RTE_LOG(INFO, VHOST_CONFIG, "new vhost user connection is %d\n", fd);
305 vhost_user_add_connection(fd, vsocket);
308 /* callback when there is message on the connfd */
310 vhost_user_msg_handler(int connfd, void *dat, int *remove)
313 struct vhost_user_connection *conn = dat;
314 struct VhostUserMsg msg;
319 ret = read_vhost_message(connfd, &msg);
320 if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
321 struct vhost_user_socket *vsocket = conn->vsocket;
324 RTE_LOG(ERR, VHOST_CONFIG,
325 "vhost read message failed\n");
327 RTE_LOG(INFO, VHOST_CONFIG,
328 "vhost peer closed\n");
330 RTE_LOG(ERR, VHOST_CONFIG,
331 "vhost read incorrect message\n");
336 vhost_destroy_device(vid);
338 if (vsocket->reconnect)
339 vhost_user_create_client(vsocket);
344 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
345 vhost_message_str[msg.request]);
346 switch (msg.request) {
347 case VHOST_USER_GET_FEATURES:
348 ret = vhost_get_features(vid, &features);
349 msg.payload.u64 = features;
350 msg.size = sizeof(msg.payload.u64);
351 send_vhost_message(connfd, &msg);
353 case VHOST_USER_SET_FEATURES:
354 features = msg.payload.u64;
355 vhost_set_features(vid, &features);
358 case VHOST_USER_GET_PROTOCOL_FEATURES:
359 msg.payload.u64 = VHOST_USER_PROTOCOL_FEATURES;
360 msg.size = sizeof(msg.payload.u64);
361 send_vhost_message(connfd, &msg);
363 case VHOST_USER_SET_PROTOCOL_FEATURES:
364 user_set_protocol_features(vid, msg.payload.u64);
367 case VHOST_USER_SET_OWNER:
368 vhost_set_owner(vid);
370 case VHOST_USER_RESET_OWNER:
371 vhost_reset_owner(vid);
374 case VHOST_USER_SET_MEM_TABLE:
375 user_set_mem_table(vid, &msg);
378 case VHOST_USER_SET_LOG_BASE:
379 user_set_log_base(vid, &msg);
381 /* it needs a reply */
382 msg.size = sizeof(msg.payload.u64);
383 send_vhost_message(connfd, &msg);
385 case VHOST_USER_SET_LOG_FD:
387 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
390 case VHOST_USER_SET_VRING_NUM:
391 vhost_set_vring_num(vid, &msg.payload.state);
393 case VHOST_USER_SET_VRING_ADDR:
394 vhost_set_vring_addr(vid, &msg.payload.addr);
396 case VHOST_USER_SET_VRING_BASE:
397 vhost_set_vring_base(vid, &msg.payload.state);
400 case VHOST_USER_GET_VRING_BASE:
401 ret = user_get_vring_base(vid, &msg.payload.state);
402 msg.size = sizeof(msg.payload.state);
403 send_vhost_message(connfd, &msg);
406 case VHOST_USER_SET_VRING_KICK:
407 user_set_vring_kick(vid, &msg);
409 case VHOST_USER_SET_VRING_CALL:
410 user_set_vring_call(vid, &msg);
413 case VHOST_USER_SET_VRING_ERR:
414 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
416 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
419 case VHOST_USER_GET_QUEUE_NUM:
420 msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
421 msg.size = sizeof(msg.payload.u64);
422 send_vhost_message(connfd, &msg);
425 case VHOST_USER_SET_VRING_ENABLE:
426 user_set_vring_enable(vid, &msg.payload.state);
428 case VHOST_USER_SEND_RARP:
429 user_send_rarp(vid, &msg);
439 create_unix_socket(const char *path, struct sockaddr_un *un, bool is_server)
443 fd = socket(AF_UNIX, SOCK_STREAM, 0);
446 RTE_LOG(INFO, VHOST_CONFIG, "vhost-user %s: socket created, fd: %d\n",
447 is_server ? "server" : "client", fd);
449 if (!is_server && fcntl(fd, F_SETFL, O_NONBLOCK)) {
450 RTE_LOG(ERR, VHOST_CONFIG,
451 "vhost-user: can't set nonblocking mode for socket, fd: "
452 "%d (%s)\n", fd, strerror(errno));
457 memset(un, 0, sizeof(*un));
458 un->sun_family = AF_UNIX;
459 strncpy(un->sun_path, path, sizeof(un->sun_path));
460 un->sun_path[sizeof(un->sun_path) - 1] = '\0';
466 vhost_user_create_server(struct vhost_user_socket *vsocket)
470 struct sockaddr_un un;
471 const char *path = vsocket->path;
473 fd = create_unix_socket(path, &un, vsocket->is_server);
477 ret = bind(fd, (struct sockaddr *)&un, sizeof(un));
479 RTE_LOG(ERR, VHOST_CONFIG,
480 "failed to bind to %s: %s; remove it and try again\n",
481 path, strerror(errno));
484 RTE_LOG(INFO, VHOST_CONFIG, "bind to %s\n", path);
486 ret = listen(fd, MAX_VIRTIO_BACKLOG);
490 vsocket->listenfd = fd;
491 ret = fdset_add(&vhost_user.fdset, fd, vhost_user_server_new_connection,
494 RTE_LOG(ERR, VHOST_CONFIG,
495 "failed to add listen fd %d to vhost server fdset\n",
507 struct vhost_user_reconnect {
508 struct sockaddr_un un;
510 struct vhost_user_socket *vsocket;
512 TAILQ_ENTRY(vhost_user_reconnect) next;
515 TAILQ_HEAD(vhost_user_reconnect_tailq_list, vhost_user_reconnect);
516 struct vhost_user_reconnect_list {
517 struct vhost_user_reconnect_tailq_list head;
518 pthread_mutex_t mutex;
521 static struct vhost_user_reconnect_list reconn_list;
522 static pthread_t reconn_tid;
525 vhost_user_connect_nonblock(int fd, struct sockaddr *un, size_t sz)
529 ret = connect(fd, un, sz);
530 if (ret < 0 && errno != EISCONN)
533 flags = fcntl(fd, F_GETFL, 0);
535 RTE_LOG(ERR, VHOST_CONFIG,
536 "can't get flags for connfd %d\n", fd);
539 if ((flags & O_NONBLOCK) && fcntl(fd, F_SETFL, flags & ~O_NONBLOCK)) {
540 RTE_LOG(ERR, VHOST_CONFIG,
541 "can't disable nonblocking on fd %d\n", fd);
548 vhost_user_client_reconnect(void *arg __rte_unused)
551 struct vhost_user_reconnect *reconn, *next;
554 pthread_mutex_lock(&reconn_list.mutex);
557 * An equal implementation of TAILQ_FOREACH_SAFE,
558 * which does not exist on all platforms.
560 for (reconn = TAILQ_FIRST(&reconn_list.head);
561 reconn != NULL; reconn = next) {
562 next = TAILQ_NEXT(reconn, next);
564 ret = vhost_user_connect_nonblock(reconn->fd,
565 (struct sockaddr *)&reconn->un,
569 RTE_LOG(ERR, VHOST_CONFIG,
570 "reconnection for fd %d failed\n",
577 RTE_LOG(INFO, VHOST_CONFIG,
578 "%s: connected\n", reconn->vsocket->path);
579 vhost_user_add_connection(reconn->fd, reconn->vsocket);
581 TAILQ_REMOVE(&reconn_list.head, reconn, next);
585 pthread_mutex_unlock(&reconn_list.mutex);
593 vhost_user_reconnect_init(void)
597 pthread_mutex_init(&reconn_list.mutex, NULL);
598 TAILQ_INIT(&reconn_list.head);
600 ret = pthread_create(&reconn_tid, NULL,
601 vhost_user_client_reconnect, NULL);
603 RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
609 vhost_user_create_client(struct vhost_user_socket *vsocket)
613 struct sockaddr_un un;
614 const char *path = vsocket->path;
615 struct vhost_user_reconnect *reconn;
617 fd = create_unix_socket(path, &un, vsocket->is_server);
621 ret = vhost_user_connect_nonblock(fd, (struct sockaddr *)&un,
624 vhost_user_add_connection(fd, vsocket);
628 RTE_LOG(ERR, VHOST_CONFIG,
629 "failed to connect to %s: %s\n",
630 path, strerror(errno));
632 if (ret == -2 || !vsocket->reconnect) {
637 RTE_LOG(ERR, VHOST_CONFIG, "%s: reconnecting...\n", path);
638 reconn = malloc(sizeof(*reconn));
639 if (reconn == NULL) {
640 RTE_LOG(ERR, VHOST_CONFIG,
641 "failed to allocate memory for reconnect\n");
647 reconn->vsocket = vsocket;
648 pthread_mutex_lock(&reconn_list.mutex);
649 TAILQ_INSERT_TAIL(&reconn_list.head, reconn, next);
650 pthread_mutex_unlock(&reconn_list.mutex);
656 * Register a new vhost-user socket; here we could act as server
657 * (the default case), or client (when RTE_VHOST_USER_CLIENT) flag
661 rte_vhost_driver_register(const char *path, uint64_t flags)
664 struct vhost_user_socket *vsocket;
669 pthread_mutex_lock(&vhost_user.mutex);
671 if (vhost_user.vsocket_cnt == MAX_VHOST_SOCKET) {
672 RTE_LOG(ERR, VHOST_CONFIG,
673 "error: the number of vhost sockets reaches maximum\n");
677 vsocket = malloc(sizeof(struct vhost_user_socket));
680 memset(vsocket, 0, sizeof(struct vhost_user_socket));
681 vsocket->path = strdup(path);
683 if ((flags & RTE_VHOST_USER_CLIENT) != 0) {
684 vsocket->reconnect = !(flags & RTE_VHOST_USER_NO_RECONNECT);
685 if (vsocket->reconnect && reconn_tid == 0) {
686 if (vhost_user_reconnect_init() < 0) {
692 ret = vhost_user_create_client(vsocket);
694 vsocket->is_server = true;
695 ret = vhost_user_create_server(vsocket);
703 vhost_user.vsockets[vhost_user.vsocket_cnt++] = vsocket;
706 pthread_mutex_unlock(&vhost_user.mutex);
712 * Unregister the specified vhost socket
715 rte_vhost_driver_unregister(const char *path)
720 pthread_mutex_lock(&vhost_user.mutex);
722 for (i = 0; i < vhost_user.vsocket_cnt; i++) {
723 if (!strcmp(vhost_user.vsockets[i]->path, path)) {
724 if (vhost_user.vsockets[i]->is_server) {
725 fdset_del(&vhost_user.fdset,
726 vhost_user.vsockets[i]->listenfd);
727 close(vhost_user.vsockets[i]->listenfd);
731 free(vhost_user.vsockets[i]->path);
732 free(vhost_user.vsockets[i]);
734 count = --vhost_user.vsocket_cnt;
735 vhost_user.vsockets[i] = vhost_user.vsockets[count];
736 vhost_user.vsockets[count] = NULL;
737 pthread_mutex_unlock(&vhost_user.mutex);
742 pthread_mutex_unlock(&vhost_user.mutex);
748 rte_vhost_driver_session_start(void)
750 fdset_event_dispatch(&vhost_user.fdset);