1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
5 #include <sys/socket.h>
14 #include <rte_string_fns.h>
15 #include <rte_fbarray.h>
18 #include "virtio_user_dev.h"
20 /* The version of the protocol we support */
21 #define VHOST_USER_VERSION 0x1
23 #define VHOST_MEMORY_MAX_NREGIONS 8
27 struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
30 struct vhost_user_msg {
31 enum vhost_user_request request;
33 #define VHOST_USER_VERSION_MASK 0x3
34 #define VHOST_USER_REPLY_MASK (0x1 << 2)
35 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
37 uint32_t size; /* the following payload size */
39 #define VHOST_USER_VRING_IDX_MASK 0xff
40 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
42 struct vhost_vring_state state;
43 struct vhost_vring_addr addr;
44 struct vhost_memory memory;
46 int fds[VHOST_MEMORY_MAX_NREGIONS];
49 #define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
50 #define VHOST_USER_PAYLOAD_SIZE \
51 (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
54 vhost_user_write(int fd, struct vhost_user_msg *msg, int *fds, int fd_num)
59 size_t fd_size = fd_num * sizeof(int);
60 char control[CMSG_SPACE(fd_size)];
63 memset(&msgh, 0, sizeof(msgh));
64 memset(control, 0, sizeof(control));
66 iov.iov_base = (uint8_t *)msg;
67 iov.iov_len = VHOST_USER_HDR_SIZE + msg->size;
71 msgh.msg_control = control;
72 msgh.msg_controllen = sizeof(control);
74 cmsg = CMSG_FIRSTHDR(&msgh);
75 cmsg->cmsg_len = CMSG_LEN(fd_size);
76 cmsg->cmsg_level = SOL_SOCKET;
77 cmsg->cmsg_type = SCM_RIGHTS;
78 memcpy(CMSG_DATA(cmsg), fds, fd_size);
81 r = sendmsg(fd, &msgh, 0);
82 } while (r < 0 && errno == EINTR);
88 vhost_user_read(int fd, struct vhost_user_msg *msg)
90 uint32_t valid_flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
91 int ret, sz_hdr = VHOST_USER_HDR_SIZE, sz_payload;
93 ret = recv(fd, (void *)msg, sz_hdr, 0);
95 PMD_DRV_LOG(ERR, "Failed to recv msg hdr: %d instead of %d.",
100 /* validate msg flags */
101 if (msg->flags != (valid_flags)) {
102 PMD_DRV_LOG(ERR, "Failed to recv msg: flags %x instead of %x.",
103 msg->flags, valid_flags);
107 sz_payload = msg->size;
109 if ((size_t)sz_payload > sizeof(msg->payload))
113 ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
114 if (ret < sz_payload) {
116 "Failed to recv msg payload: %d instead of %d.",
129 vhost_user_set_owner(struct virtio_user_dev *dev)
132 struct vhost_user_msg msg = {
133 .request = VHOST_USER_SET_OWNER,
134 .flags = VHOST_USER_VERSION,
137 ret = vhost_user_write(dev->vhostfd, &msg, NULL, 0);
139 PMD_DRV_LOG(ERR, "Failed to set owner");
147 struct vhost_memory *vm;
153 update_memory_region(const struct rte_memseg_list *msl __rte_unused,
154 const struct rte_memseg *ms, void *arg)
156 struct walk_arg *wa = arg;
157 struct vhost_memory_region *mr;
158 uint64_t start_addr, end_addr;
162 fd = rte_memseg_get_fd_thread_unsafe(ms);
164 PMD_DRV_LOG(ERR, "Failed to get fd, ms=%p rte_errno=%d",
169 if (rte_memseg_get_fd_offset_thread_unsafe(ms, &offset) < 0) {
170 PMD_DRV_LOG(ERR, "Failed to get offset, ms=%p rte_errno=%d",
175 start_addr = (uint64_t)(uintptr_t)ms->addr;
176 end_addr = start_addr + ms->len;
178 for (i = 0; i < wa->region_nr; i++) {
179 if (wa->fds[i] != fd)
182 mr = &wa->vm->regions[i];
184 if (mr->userspace_addr + mr->memory_size < end_addr)
185 mr->memory_size = end_addr - mr->userspace_addr;
187 if (mr->userspace_addr > start_addr) {
188 mr->userspace_addr = start_addr;
189 mr->guest_phys_addr = start_addr;
192 if (mr->mmap_offset > offset)
193 mr->mmap_offset = offset;
195 PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64
196 " addr=0x%" PRIx64 " len=%" PRIu64, i, fd,
197 mr->mmap_offset, mr->userspace_addr,
203 if (i >= VHOST_MEMORY_MAX_NREGIONS) {
204 PMD_DRV_LOG(ERR, "Too many memory regions");
208 mr = &wa->vm->regions[i];
211 mr->guest_phys_addr = start_addr;
212 mr->userspace_addr = start_addr;
213 mr->memory_size = ms->len;
214 mr->mmap_offset = offset;
216 PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64
217 " addr=0x%" PRIx64 " len=%" PRIu64, i, fd,
218 mr->mmap_offset, mr->userspace_addr,
227 prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
232 wa.vm = &msg->payload.memory;
236 * The memory lock has already been taken by memory subsystem
237 * or virtio_user_start_device().
239 if (rte_memseg_walk_thread_unsafe(update_memory_region, &wa) < 0)
242 msg->payload.memory.nregions = wa.region_nr;
243 msg->payload.memory.padding = 0;
248 static struct vhost_user_msg m;
250 const char * const vhost_msg_strings[] = {
251 [VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER",
252 [VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES",
253 [VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES",
254 [VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL",
255 [VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM",
256 [VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE",
257 [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
258 [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
259 [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
260 [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
261 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
262 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
263 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
264 [VHOST_USER_SET_STATUS] = "VHOST_SET_STATUS",
265 [VHOST_USER_GET_STATUS] = "VHOST_GET_STATUS",
269 vhost_user_sock(struct virtio_user_dev *dev,
270 enum vhost_user_request req,
273 struct vhost_user_msg msg;
274 struct vhost_vring_file *file = 0;
276 int has_reply_ack = 0;
277 int fds[VHOST_MEMORY_MAX_NREGIONS];
279 int vhostfd = dev->vhostfd;
283 PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
285 if (dev->is_server && vhostfd < 0)
288 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK))
292 msg.flags = VHOST_USER_VERSION;
296 case VHOST_USER_GET_STATUS:
297 if (!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK) ||
298 (!(dev->protocol_features &
299 (1ULL << VHOST_USER_PROTOCOL_F_STATUS))))
302 case VHOST_USER_GET_FEATURES:
303 case VHOST_USER_GET_PROTOCOL_FEATURES:
307 case VHOST_USER_SET_STATUS:
308 if (!(dev->status & VIRTIO_CONFIG_STATUS_FEATURES_OK) ||
309 (!(dev->protocol_features &
310 (1ULL << VHOST_USER_PROTOCOL_F_STATUS))))
314 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
316 case VHOST_USER_SET_PROTOCOL_FEATURES:
317 case VHOST_USER_SET_LOG_BASE:
318 msg.payload.u64 = *((__u64 *)arg);
319 msg.size = sizeof(m.payload.u64);
322 case VHOST_USER_SET_FEATURES:
323 msg.payload.u64 = *((__u64 *)arg) | (dev->device_features &
324 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES));
325 msg.size = sizeof(m.payload.u64);
328 case VHOST_USER_RESET_OWNER:
331 case VHOST_USER_SET_MEM_TABLE:
332 if (prepare_vhost_memory_user(&msg, fds) < 0)
334 fd_num = msg.payload.memory.nregions;
335 msg.size = sizeof(m.payload.memory.nregions);
336 msg.size += sizeof(m.payload.memory.padding);
337 msg.size += fd_num * sizeof(struct vhost_memory_region);
340 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
343 case VHOST_USER_SET_LOG_FD:
344 fds[fd_num++] = *((int *)arg);
347 case VHOST_USER_SET_VRING_NUM:
348 case VHOST_USER_SET_VRING_BASE:
349 case VHOST_USER_SET_VRING_ENABLE:
350 memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
351 msg.size = sizeof(m.payload.state);
354 case VHOST_USER_GET_VRING_BASE:
355 memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
356 msg.size = sizeof(m.payload.state);
360 case VHOST_USER_SET_VRING_ADDR:
361 memcpy(&msg.payload.addr, arg, sizeof(msg.payload.addr));
362 msg.size = sizeof(m.payload.addr);
365 case VHOST_USER_SET_VRING_KICK:
366 case VHOST_USER_SET_VRING_CALL:
367 case VHOST_USER_SET_VRING_ERR:
369 msg.payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
370 msg.size = sizeof(m.payload.u64);
372 fds[fd_num++] = file->fd;
374 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
378 PMD_DRV_LOG(ERR, "trying to send unhandled msg type");
382 if (vhost_user_write(vhostfd, &msg, fds, fd_num) < 0) {
383 PMD_DRV_LOG(ERR, "%s failed: %s",
384 vhost_msg_strings[req], strerror(errno));
388 if (need_reply || msg.flags & VHOST_USER_NEED_REPLY_MASK) {
389 if (vhost_user_read(vhostfd, &msg) < 0) {
390 PMD_DRV_LOG(ERR, "Received msg failed: %s",
395 if (req != msg.request) {
396 PMD_DRV_LOG(ERR, "Received unexpected msg type");
401 case VHOST_USER_GET_FEATURES:
402 case VHOST_USER_GET_STATUS:
403 case VHOST_USER_GET_PROTOCOL_FEATURES:
404 if (msg.size != sizeof(m.payload.u64)) {
405 PMD_DRV_LOG(ERR, "Received bad msg size");
408 *((__u64 *)arg) = msg.payload.u64;
410 case VHOST_USER_GET_VRING_BASE:
411 if (msg.size != sizeof(m.payload.state)) {
412 PMD_DRV_LOG(ERR, "Received bad msg size");
415 memcpy(arg, &msg.payload.state,
416 sizeof(struct vhost_vring_state));
419 /* Reply-ack handling */
420 if (msg.size != sizeof(m.payload.u64)) {
421 PMD_DRV_LOG(ERR, "Received bad msg size");
425 if (msg.payload.u64 != 0) {
426 PMD_DRV_LOG(ERR, "Slave replied NACK");
437 #define MAX_VIRTIO_USER_BACKLOG 1
439 virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
443 int fd = dev->listenfd;
445 ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
447 PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n",
448 dev->path, strerror(errno));
451 ret = listen(fd, MAX_VIRTIO_USER_BACKLOG);
455 flag = fcntl(fd, F_GETFL);
456 if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
457 PMD_DRV_LOG(ERR, "fcntl failed, %s", strerror(errno));
465 * Set up environment to talk with a vhost user backend.
472 vhost_user_setup(struct virtio_user_dev *dev)
476 struct sockaddr_un un;
478 fd = socket(AF_UNIX, SOCK_STREAM, 0);
480 PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno));
484 flag = fcntl(fd, F_GETFD);
485 if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0)
486 PMD_DRV_LOG(WARNING, "fcntl failed, %s", strerror(errno));
488 memset(&un, 0, sizeof(un));
489 un.sun_family = AF_UNIX;
490 strlcpy(un.sun_path, dev->path, sizeof(un.sun_path));
492 if (dev->is_server) {
494 if (virtio_user_start_server(dev, &un) < 0) {
495 PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
501 if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
502 PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
513 vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
519 if (dev->qp_enabled[pair_idx] == enable)
522 for (i = 0; i < 2; ++i) {
523 struct vhost_vring_state state = {
524 .index = pair_idx * 2 + i,
528 if (vhost_user_sock(dev, VHOST_USER_SET_VRING_ENABLE, &state))
532 dev->qp_enabled[pair_idx] = enable;
536 struct virtio_user_backend_ops virtio_ops_user = {
537 .setup = vhost_user_setup,
538 .set_owner = vhost_user_set_owner,
539 .send_request = vhost_user_sock,
540 .enable_qp = vhost_user_enable_queue_pair