1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
5 #include <sys/socket.h>
14 #include <rte_string_fns.h>
15 #include <rte_fbarray.h>
18 #include "virtio_user_dev.h"
20 /* The version of the protocol we support */
21 #define VHOST_USER_VERSION 0x1
23 #define VHOST_MEMORY_MAX_NREGIONS 8
27 struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS];
30 struct vhost_user_msg {
31 enum vhost_user_request request;
33 #define VHOST_USER_VERSION_MASK 0x3
34 #define VHOST_USER_REPLY_MASK (0x1 << 2)
35 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
37 uint32_t size; /* the following payload size */
39 #define VHOST_USER_VRING_IDX_MASK 0xff
40 #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
42 struct vhost_vring_state state;
43 struct vhost_vring_addr addr;
44 struct vhost_memory memory;
46 int fds[VHOST_MEMORY_MAX_NREGIONS];
49 #define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64)
50 #define VHOST_USER_PAYLOAD_SIZE \
51 (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE)
54 vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num)
59 size_t fd_size = fd_num * sizeof(int);
60 char control[CMSG_SPACE(fd_size)];
63 memset(&msgh, 0, sizeof(msgh));
64 memset(control, 0, sizeof(control));
66 iov.iov_base = (uint8_t *)buf;
71 msgh.msg_control = control;
72 msgh.msg_controllen = sizeof(control);
74 cmsg = CMSG_FIRSTHDR(&msgh);
75 cmsg->cmsg_len = CMSG_LEN(fd_size);
76 cmsg->cmsg_level = SOL_SOCKET;
77 cmsg->cmsg_type = SCM_RIGHTS;
78 memcpy(CMSG_DATA(cmsg), fds, fd_size);
81 r = sendmsg(fd, &msgh, 0);
82 } while (r < 0 && errno == EINTR);
88 vhost_user_read(int fd, struct vhost_user_msg *msg)
90 uint32_t valid_flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION;
91 int ret, sz_hdr = VHOST_USER_HDR_SIZE, sz_payload;
93 ret = recv(fd, (void *)msg, sz_hdr, 0);
95 PMD_DRV_LOG(ERR, "Failed to recv msg hdr: %d instead of %d.",
100 /* validate msg flags */
101 if (msg->flags != (valid_flags)) {
102 PMD_DRV_LOG(ERR, "Failed to recv msg: flags %x instead of %x.",
103 msg->flags, valid_flags);
107 sz_payload = msg->size;
109 if ((size_t)sz_payload > sizeof(msg->payload))
113 ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0);
114 if (ret < sz_payload) {
116 "Failed to recv msg payload: %d instead of %d.",
129 struct vhost_memory *vm;
135 update_memory_region(const struct rte_memseg_list *msl __rte_unused,
136 const struct rte_memseg *ms, void *arg)
138 struct walk_arg *wa = arg;
139 struct vhost_memory_region *mr;
140 uint64_t start_addr, end_addr;
144 fd = rte_memseg_get_fd_thread_unsafe(ms);
146 PMD_DRV_LOG(ERR, "Failed to get fd, ms=%p rte_errno=%d",
151 if (rte_memseg_get_fd_offset_thread_unsafe(ms, &offset) < 0) {
152 PMD_DRV_LOG(ERR, "Failed to get offset, ms=%p rte_errno=%d",
157 start_addr = (uint64_t)(uintptr_t)ms->addr;
158 end_addr = start_addr + ms->len;
160 for (i = 0; i < wa->region_nr; i++) {
161 if (wa->fds[i] != fd)
164 mr = &wa->vm->regions[i];
166 if (mr->userspace_addr + mr->memory_size < end_addr)
167 mr->memory_size = end_addr - mr->userspace_addr;
169 if (mr->userspace_addr > start_addr) {
170 mr->userspace_addr = start_addr;
171 mr->guest_phys_addr = start_addr;
174 if (mr->mmap_offset > offset)
175 mr->mmap_offset = offset;
177 PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64
178 " addr=0x%" PRIx64 " len=%" PRIu64, i, fd,
179 mr->mmap_offset, mr->userspace_addr,
185 if (i >= VHOST_MEMORY_MAX_NREGIONS) {
186 PMD_DRV_LOG(ERR, "Too many memory regions");
190 mr = &wa->vm->regions[i];
193 mr->guest_phys_addr = start_addr;
194 mr->userspace_addr = start_addr;
195 mr->memory_size = ms->len;
196 mr->mmap_offset = offset;
198 PMD_DRV_LOG(DEBUG, "index=%d fd=%d offset=0x%" PRIx64
199 " addr=0x%" PRIx64 " len=%" PRIu64, i, fd,
200 mr->mmap_offset, mr->userspace_addr,
209 prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[])
214 wa.vm = &msg->payload.memory;
218 * The memory lock has already been taken by memory subsystem
219 * or virtio_user_start_device().
221 if (rte_memseg_walk_thread_unsafe(update_memory_region, &wa) < 0)
224 msg->payload.memory.nregions = wa.region_nr;
225 msg->payload.memory.padding = 0;
230 static struct vhost_user_msg m;
232 const char * const vhost_msg_strings[] = {
233 [VHOST_USER_SET_OWNER] = "VHOST_SET_OWNER",
234 [VHOST_USER_RESET_OWNER] = "VHOST_RESET_OWNER",
235 [VHOST_USER_SET_FEATURES] = "VHOST_SET_FEATURES",
236 [VHOST_USER_GET_FEATURES] = "VHOST_GET_FEATURES",
237 [VHOST_USER_SET_VRING_CALL] = "VHOST_SET_VRING_CALL",
238 [VHOST_USER_SET_VRING_NUM] = "VHOST_SET_VRING_NUM",
239 [VHOST_USER_SET_VRING_BASE] = "VHOST_SET_VRING_BASE",
240 [VHOST_USER_GET_VRING_BASE] = "VHOST_GET_VRING_BASE",
241 [VHOST_USER_SET_VRING_ADDR] = "VHOST_SET_VRING_ADDR",
242 [VHOST_USER_SET_VRING_KICK] = "VHOST_SET_VRING_KICK",
243 [VHOST_USER_SET_MEM_TABLE] = "VHOST_SET_MEM_TABLE",
244 [VHOST_USER_SET_VRING_ENABLE] = "VHOST_SET_VRING_ENABLE",
245 [VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
246 [VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
247 [VHOST_USER_SET_STATUS] = "VHOST_SET_STATUS",
248 [VHOST_USER_GET_STATUS] = "VHOST_GET_STATUS",
252 vhost_user_sock(struct virtio_user_dev *dev,
253 enum vhost_user_request req,
256 struct vhost_user_msg msg;
257 struct vhost_vring_file *file = 0;
259 int has_reply_ack = 0;
260 int fds[VHOST_MEMORY_MAX_NREGIONS];
263 int vhostfd = dev->vhostfd;
267 PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
269 if (dev->is_server && vhostfd < 0)
272 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK))
276 msg.flags = VHOST_USER_VERSION;
280 case VHOST_USER_GET_FEATURES:
281 case VHOST_USER_GET_PROTOCOL_FEATURES:
282 case VHOST_USER_GET_STATUS:
286 case VHOST_USER_SET_STATUS:
287 if (!(dev->protocol_features &
288 (1ULL << VHOST_USER_PROTOCOL_F_STATUS)))
292 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
294 case VHOST_USER_SET_FEATURES:
295 case VHOST_USER_SET_PROTOCOL_FEATURES:
296 case VHOST_USER_SET_LOG_BASE:
297 msg.payload.u64 = *((__u64 *)arg);
298 msg.size = sizeof(m.payload.u64);
301 case VHOST_USER_SET_OWNER:
302 case VHOST_USER_RESET_OWNER:
305 case VHOST_USER_SET_MEM_TABLE:
306 if (prepare_vhost_memory_user(&msg, fds) < 0)
308 fd_num = msg.payload.memory.nregions;
309 msg.size = sizeof(m.payload.memory.nregions);
310 msg.size += sizeof(m.payload.memory.padding);
311 msg.size += fd_num * sizeof(struct vhost_memory_region);
314 msg.flags |= VHOST_USER_NEED_REPLY_MASK;
317 case VHOST_USER_SET_LOG_FD:
318 fds[fd_num++] = *((int *)arg);
321 case VHOST_USER_SET_VRING_NUM:
322 case VHOST_USER_SET_VRING_BASE:
323 case VHOST_USER_SET_VRING_ENABLE:
324 memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
325 msg.size = sizeof(m.payload.state);
328 case VHOST_USER_GET_VRING_BASE:
329 memcpy(&msg.payload.state, arg, sizeof(msg.payload.state));
330 msg.size = sizeof(m.payload.state);
334 case VHOST_USER_SET_VRING_ADDR:
335 memcpy(&msg.payload.addr, arg, sizeof(msg.payload.addr));
336 msg.size = sizeof(m.payload.addr);
339 case VHOST_USER_SET_VRING_KICK:
340 case VHOST_USER_SET_VRING_CALL:
341 case VHOST_USER_SET_VRING_ERR:
343 msg.payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK;
344 msg.size = sizeof(m.payload.u64);
346 fds[fd_num++] = file->fd;
348 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
352 PMD_DRV_LOG(ERR, "trying to send unhandled msg type");
356 len = VHOST_USER_HDR_SIZE + msg.size;
357 if (vhost_user_write(vhostfd, &msg, len, fds, fd_num) < 0) {
358 PMD_DRV_LOG(ERR, "%s failed: %s",
359 vhost_msg_strings[req], strerror(errno));
363 if (need_reply || msg.flags & VHOST_USER_NEED_REPLY_MASK) {
364 if (vhost_user_read(vhostfd, &msg) < 0) {
365 PMD_DRV_LOG(ERR, "Received msg failed: %s",
370 if (req != msg.request) {
371 PMD_DRV_LOG(ERR, "Received unexpected msg type");
376 case VHOST_USER_GET_FEATURES:
377 case VHOST_USER_GET_STATUS:
378 case VHOST_USER_GET_PROTOCOL_FEATURES:
379 if (msg.size != sizeof(m.payload.u64)) {
380 PMD_DRV_LOG(ERR, "Received bad msg size");
383 *((__u64 *)arg) = msg.payload.u64;
385 case VHOST_USER_GET_VRING_BASE:
386 if (msg.size != sizeof(m.payload.state)) {
387 PMD_DRV_LOG(ERR, "Received bad msg size");
390 memcpy(arg, &msg.payload.state,
391 sizeof(struct vhost_vring_state));
394 /* Reply-ack handling */
395 if (msg.size != sizeof(m.payload.u64)) {
396 PMD_DRV_LOG(ERR, "Received bad msg size");
400 if (msg.payload.u64 != 0) {
401 PMD_DRV_LOG(ERR, "Slave replied NACK");
412 #define MAX_VIRTIO_USER_BACKLOG 1
414 virtio_user_start_server(struct virtio_user_dev *dev, struct sockaddr_un *un)
418 int fd = dev->listenfd;
420 ret = bind(fd, (struct sockaddr *)un, sizeof(*un));
422 PMD_DRV_LOG(ERR, "failed to bind to %s: %s; remove it and try again\n",
423 dev->path, strerror(errno));
426 ret = listen(fd, MAX_VIRTIO_USER_BACKLOG);
430 flag = fcntl(fd, F_GETFL);
431 if (fcntl(fd, F_SETFL, flag | O_NONBLOCK) < 0) {
432 PMD_DRV_LOG(ERR, "fcntl failed, %s", strerror(errno));
440 * Set up environment to talk with a vhost user backend.
447 vhost_user_setup(struct virtio_user_dev *dev)
451 struct sockaddr_un un;
453 fd = socket(AF_UNIX, SOCK_STREAM, 0);
455 PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno));
459 flag = fcntl(fd, F_GETFD);
460 if (fcntl(fd, F_SETFD, flag | FD_CLOEXEC) < 0)
461 PMD_DRV_LOG(WARNING, "fcntl failed, %s", strerror(errno));
463 memset(&un, 0, sizeof(un));
464 un.sun_family = AF_UNIX;
465 strlcpy(un.sun_path, dev->path, sizeof(un.sun_path));
467 if (dev->is_server) {
469 if (virtio_user_start_server(dev, &un) < 0) {
470 PMD_DRV_LOG(ERR, "virtio-user startup fails in server mode");
476 if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
477 PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno));
488 vhost_user_enable_queue_pair(struct virtio_user_dev *dev,
494 if (dev->qp_enabled[pair_idx] == enable)
497 for (i = 0; i < 2; ++i) {
498 struct vhost_vring_state state = {
499 .index = pair_idx * 2 + i,
503 if (vhost_user_sock(dev, VHOST_USER_SET_VRING_ENABLE, &state))
507 dev->qp_enabled[pair_idx] = enable;
511 struct virtio_user_backend_ops virtio_ops_user = {
512 .setup = vhost_user_setup,
513 .send_request = vhost_user_sock,
514 .enable_qp = vhost_user_enable_queue_pair