1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Intel Corporation
11 #include <rte_memory.h>
14 #include "virtio_user_dev.h"
15 #include "vhost_kernel_tap.h"
17 struct vhost_memory_kernel {
20 struct vhost_memory_region regions[0];
23 /* vhost kernel ioctls */
24 #define VHOST_VIRTIO 0xAF
25 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
26 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
27 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
28 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
29 #define VHOST_SET_MEM_TABLE _IOW(VHOST_VIRTIO, 0x03, struct vhost_memory_kernel)
30 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
31 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
32 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
33 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
34 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
35 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
36 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
37 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
38 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
39 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
41 /* with below features, vhost kernel does not need to do the checksum and TSO,
42 * these info will be passed to virtio_user through virtio net header.
44 #define VHOST_KERNEL_GUEST_OFFLOADS_MASK \
45 ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
46 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
47 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
48 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
49 (1ULL << VIRTIO_NET_F_GUEST_UFO))
51 /* with below features, when flows from virtio_user to vhost kernel
52 * (1) if flows goes up through the kernel networking stack, it does not need
53 * to verify checksum, which can save CPU cycles;
54 * (2) if flows goes through a Linux bridge and outside from an interface
55 * (kernel driver), checksum and TSO will be done by GSO in kernel or even
56 * offloaded into real physical device.
58 #define VHOST_KERNEL_HOST_OFFLOADS_MASK \
59 ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
60 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
61 (1ULL << VIRTIO_NET_F_CSUM))
63 static uint64_t max_regions = 64;
66 get_vhost_kernel_max_regions(void)
69 char buf[20] = {'\0'};
71 fd = open("/sys/module/vhost/parameters/max_mem_regions", O_RDONLY);
75 if (read(fd, buf, sizeof(buf) - 1) > 0)
76 max_regions = strtoull(buf, NULL, 10);
82 vhost_kernel_ioctl(int fd, uint64_t request, void *arg)
86 ret = ioctl(fd, request, arg);
88 PMD_DRV_LOG(ERR, "Vhost-kernel ioctl %"PRIu64" failed (%s)",
89 request, strerror(errno));
97 vhost_kernel_set_owner(struct virtio_user_dev *dev)
99 return vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_OWNER, NULL);
103 vhost_kernel_get_features(struct virtio_user_dev *dev, uint64_t *features)
106 unsigned int tap_features;
108 ret = vhost_kernel_ioctl(dev->vhostfds[0], VHOST_GET_FEATURES, features);
110 PMD_DRV_LOG(ERR, "Failed to get features");
114 ret = tap_support_features(&tap_features);
116 PMD_DRV_LOG(ERR, "Failed to get TAP features");
120 /* with tap as the backend, all these features are supported
121 * but not claimed by vhost-net, so we add them back when
122 * reporting to upper layer.
124 if (tap_features & IFF_VNET_HDR) {
125 *features |= VHOST_KERNEL_GUEST_OFFLOADS_MASK;
126 *features |= VHOST_KERNEL_HOST_OFFLOADS_MASK;
129 /* vhost_kernel will not declare this feature, but it does
130 * support multi-queue.
132 if (tap_features & IFF_MULTI_QUEUE)
133 *features |= (1ull << VIRTIO_NET_F_MQ);
139 vhost_kernel_set_features(struct virtio_user_dev *dev, uint64_t features)
141 /* We don't need memory protection here */
142 features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
143 /* VHOST kernel does not know about below flags */
144 features &= ~VHOST_KERNEL_GUEST_OFFLOADS_MASK;
145 features &= ~VHOST_KERNEL_HOST_OFFLOADS_MASK;
146 features &= ~(1ULL << VIRTIO_NET_F_MQ);
148 return vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_FEATURES, &features);
152 add_memseg_list(const struct rte_memseg_list *msl, void *arg)
154 struct vhost_memory_kernel *vm = arg;
155 struct vhost_memory_region *mr;
162 if (vm->nregions >= max_regions)
165 start_addr = msl->base_va;
166 len = msl->page_sz * msl->memseg_arr.len;
168 mr = &vm->regions[vm->nregions++];
170 mr->guest_phys_addr = (uint64_t)(uintptr_t)start_addr;
171 mr->userspace_addr = (uint64_t)(uintptr_t)start_addr;
172 mr->memory_size = len;
173 mr->mmap_offset = 0; /* flags_padding */
175 PMD_DRV_LOG(DEBUG, "index=%u addr=%p len=%" PRIu64,
176 vm->nregions - 1, start_addr, len);
181 /* By default, vhost kernel module allows 64 regions, but DPDK may
182 * have much more memory regions. Below function will treat each
183 * contiguous memory space reserved by DPDK as one region.
186 vhost_kernel_set_memory_table(struct virtio_user_dev *dev)
188 struct vhost_memory_kernel *vm;
191 vm = malloc(sizeof(struct vhost_memory_kernel) +
193 sizeof(struct vhost_memory_region));
201 * The memory lock has already been taken by memory subsystem
202 * or virtio_user_start_device().
204 ret = rte_memseg_list_walk_thread_unsafe(add_memseg_list, vm);
208 ret = vhost_kernel_ioctl(dev->vhostfds[0], VHOST_SET_MEM_TABLE, vm);
218 PMD_DRV_LOG(ERR, "Failed to set memory table");
222 static uint64_t vhost_req_user_to_kernel[] = {
223 [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
224 [VHOST_USER_SET_VRING_CALL] = VHOST_SET_VRING_CALL,
225 [VHOST_USER_SET_VRING_NUM] = VHOST_SET_VRING_NUM,
226 [VHOST_USER_SET_VRING_BASE] = VHOST_SET_VRING_BASE,
227 [VHOST_USER_GET_VRING_BASE] = VHOST_GET_VRING_BASE,
228 [VHOST_USER_SET_VRING_ADDR] = VHOST_SET_VRING_ADDR,
229 [VHOST_USER_SET_VRING_KICK] = VHOST_SET_VRING_KICK,
233 vhost_kernel_send_request(struct virtio_user_dev *dev,
234 enum vhost_user_request req,
241 unsigned int queue_sel;
243 PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
245 req_kernel = vhost_req_user_to_kernel[req];
247 switch (req_kernel) {
248 case VHOST_SET_VRING_NUM:
249 case VHOST_SET_VRING_ADDR:
250 case VHOST_SET_VRING_BASE:
251 case VHOST_GET_VRING_BASE:
252 case VHOST_SET_VRING_KICK:
253 case VHOST_SET_VRING_CALL:
254 queue_sel = *(unsigned int *)arg;
255 vhostfd = dev->vhostfds[queue_sel / 2];
256 *(unsigned int *)arg = queue_sel % 2;
257 PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
258 vhostfd, *(unsigned int *)arg);
264 for (i = 0; i < dev->max_queue_pairs; ++i) {
265 if (dev->vhostfds[i] < 0)
268 ret = ioctl(dev->vhostfds[i], req_kernel, arg);
273 ret = ioctl(vhostfd, req_kernel, arg);
277 PMD_DRV_LOG(ERR, "%s failed: %s",
278 vhost_msg_strings[req], strerror(errno));
284 * Set up environment to talk with a vhost kernel backend.
287 * - (-1) if fail to set up;
288 * - (>=0) if successful.
291 vhost_kernel_setup(struct virtio_user_dev *dev)
296 get_vhost_kernel_max_regions();
298 for (i = 0; i < dev->max_queue_pairs; ++i) {
299 vhostfd = open(dev->path, O_RDWR);
301 PMD_DRV_LOG(ERR, "fail to open %s, %s",
302 dev->path, strerror(errno));
306 dev->vhostfds[i] = vhostfd;
313 vhost_kernel_set_backend(int vhostfd, int tapfd)
315 struct vhost_vring_file f;
319 if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
320 PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
326 if (ioctl(vhostfd, VHOST_NET_SET_BACKEND, &f) < 0) {
327 PMD_DRV_LOG(ERR, "VHOST_NET_SET_BACKEND fails, %s",
336 vhost_kernel_enable_queue_pair(struct virtio_user_dev *dev,
343 int req_mq = (dev->max_queue_pairs > 1);
345 vhostfd = dev->vhostfds[pair_idx];
347 if (dev->qp_enabled[pair_idx] == enable)
351 tapfd = dev->tapfds[pair_idx];
352 if (vhost_kernel_set_backend(vhostfd, -1) < 0) {
353 PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
356 if (req_mq && vhost_kernel_tap_set_queue(tapfd, false) < 0) {
357 PMD_DRV_LOG(ERR, "fail to disable tap for vhost kernel");
360 dev->qp_enabled[pair_idx] = false;
364 if (dev->tapfds[pair_idx] >= 0) {
365 tapfd = dev->tapfds[pair_idx];
366 if (vhost_kernel_tap_set_offload(tapfd, dev->features) == -1)
368 if (req_mq && vhost_kernel_tap_set_queue(tapfd, true) < 0) {
369 PMD_DRV_LOG(ERR, "fail to enable tap for vhost kernel");
375 if ((dev->features & (1ULL << VIRTIO_NET_F_MRG_RXBUF)) ||
376 (dev->features & (1ULL << VIRTIO_F_VERSION_1)))
377 hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
379 hdr_size = sizeof(struct virtio_net_hdr);
381 tapfd = vhost_kernel_open_tap(&dev->ifname, hdr_size, req_mq,
382 (char *)dev->mac_addr, dev->features);
384 PMD_DRV_LOG(ERR, "fail to open tap for vhost kernel");
388 dev->tapfds[pair_idx] = tapfd;
391 if (vhost_kernel_set_backend(vhostfd, tapfd) < 0) {
392 PMD_DRV_LOG(ERR, "fail to set backend for vhost kernel");
396 dev->qp_enabled[pair_idx] = true;
400 struct virtio_user_backend_ops virtio_ops_kernel = {
401 .setup = vhost_kernel_setup,
402 .set_owner = vhost_kernel_set_owner,
403 .get_features = vhost_kernel_get_features,
404 .set_features = vhost_kernel_set_features,
405 .set_memory_table = vhost_kernel_set_memory_table,
406 .send_request = vhost_kernel_send_request,
407 .enable_qp = vhost_kernel_enable_queue_pair