1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Red Hat Inc.
11 #include <rte_memory.h>
14 #include "virtio_user_dev.h"
16 /* vhost kernel & vdpa ioctls */
17 #define VHOST_VIRTIO 0xAF
18 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
19 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
20 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
21 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
22 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
23 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
24 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
25 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
26 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
27 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
28 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
29 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
30 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
31 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
32 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
33 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
34 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
35 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
36 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
37 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
39 static uint64_t vhost_req_user_to_vdpa[] = {
40 [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
41 [VHOST_USER_SET_STATUS] = VHOST_VDPA_SET_STATUS,
42 [VHOST_USER_GET_STATUS] = VHOST_VDPA_GET_STATUS,
45 /* no alignment requirement */
46 struct vhost_iotlb_msg {
50 #define VHOST_ACCESS_RO 0x1
51 #define VHOST_ACCESS_WO 0x2
52 #define VHOST_ACCESS_RW 0x3
54 #define VHOST_IOTLB_MISS 1
55 #define VHOST_IOTLB_UPDATE 2
56 #define VHOST_IOTLB_INVALIDATE 3
57 #define VHOST_IOTLB_ACCESS_FAIL 4
58 #define VHOST_IOTLB_BATCH_BEGIN 5
59 #define VHOST_IOTLB_BATCH_END 6
63 #define VHOST_IOTLB_MSG_V2 0x2
69 struct vhost_iotlb_msg iotlb;
76 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
80 ret = ioctl(fd, request, arg);
82 PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
83 request, strerror(errno));
91 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
93 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_OWNER, NULL);
97 vhost_vdpa_get_backend_features(struct virtio_user_dev *dev, uint64_t *features)
99 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
103 vhost_vdpa_set_backend_features(struct virtio_user_dev *dev, uint64_t features)
105 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
109 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
113 ret = vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_FEATURES, features);
115 PMD_DRV_LOG(ERR, "Failed to get features");
119 /* Multiqueue not supported for now */
120 *features &= ~(1ULL << VIRTIO_NET_F_MQ);
126 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
129 features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
131 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_FEATURES, &features);
135 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
137 struct vhost_msg msg = {};
139 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
142 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
143 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
147 msg.type = VHOST_IOTLB_MSG_V2;
148 msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
150 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
151 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
160 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
162 struct vhost_msg msg = {};
164 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
167 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
168 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
172 msg.type = VHOST_IOTLB_MSG_V2;
173 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
175 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
176 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
185 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
186 uint64_t iova, size_t len)
188 struct vhost_msg msg = {};
190 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
191 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
195 msg.type = VHOST_IOTLB_MSG_V2;
196 msg.iotlb.type = VHOST_IOTLB_UPDATE;
197 msg.iotlb.iova = iova;
198 msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
199 msg.iotlb.size = len;
200 msg.iotlb.perm = VHOST_ACCESS_RW;
202 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
203 __func__, iova, addr, len);
205 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
206 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
215 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
216 uint64_t iova, size_t len)
218 struct vhost_msg msg = {};
220 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
221 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
225 msg.type = VHOST_IOTLB_MSG_V2;
226 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
227 msg.iotlb.iova = iova;
228 msg.iotlb.size = len;
230 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
231 __func__, iova, len);
233 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
234 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
243 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
244 uint64_t iova, size_t len)
248 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
251 ret = vhost_vdpa_dma_map(dev, addr, iova, len);
253 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
260 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
261 uint64_t iova, size_t len)
265 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
268 ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
270 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
277 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
278 const struct rte_memseg *ms, size_t len, void *arg)
280 struct virtio_user_dev *dev = arg;
285 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
289 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
292 struct virtio_user_dev *dev = arg;
294 /* skip external memory that isn't a heap */
295 if (msl->external && !msl->heap)
298 /* skip any segments with invalid IOVA addresses */
299 if (ms->iova == RTE_BAD_IOVA)
302 /* if IOVA mode is VA, we've already mapped the internal segments */
303 if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
306 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
310 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
314 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
317 vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
319 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
320 /* with IOVA as VA mode, we can get away with mapping contiguous
321 * chunks rather than going page-by-page.
323 ret = rte_memseg_contig_walk_thread_unsafe(
324 vhost_vdpa_map_contig, dev);
327 /* we have to continue the walk because we've skipped the
328 * external segments during the config walk.
331 ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
334 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
341 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
343 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
347 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
349 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_NUM, state);
353 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
355 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_BASE, state);
359 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
361 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_VRING_BASE, state);
365 vhost_vdpa_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file)
367 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_CALL, file);
371 vhost_vdpa_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file)
373 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_KICK, file);
377 vhost_vdpa_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
379 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_ADDR, addr);
382 /* with below features, vhost vdpa does not need to do the checksum and TSO,
383 * these info will be passed to virtio_user through virtio net header.
385 #define VHOST_VDPA_GUEST_OFFLOADS_MASK \
386 ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
387 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
388 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
389 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
390 (1ULL << VIRTIO_NET_F_GUEST_UFO))
392 #define VHOST_VDPA_HOST_OFFLOADS_MASK \
393 ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
394 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
395 (1ULL << VIRTIO_NET_F_CSUM))
398 vhost_vdpa_send_request(struct virtio_user_dev *dev,
399 enum vhost_user_request req,
405 PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
407 req_vdpa = vhost_req_user_to_vdpa[req];
410 case VHOST_SET_VRING_ADDR:
411 PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
412 dev->vhostfd, *(unsigned int *)arg);
418 ret = ioctl(dev->vhostfd, req_vdpa, arg);
420 PMD_DRV_LOG(ERR, "%s failed: %s",
421 vhost_msg_strings[req], strerror(errno));
427 * Set up environment to talk with a vhost vdpa backend.
430 * - (-1) if fail to set up;
431 * - (>=0) if successful.
434 vhost_vdpa_setup(struct virtio_user_dev *dev)
436 uint32_t did = (uint32_t)-1;
438 dev->vhostfd = open(dev->path, O_RDWR);
439 if (dev->vhostfd < 0) {
440 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
441 dev->path, strerror(errno));
445 if (ioctl(dev->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
446 did != VIRTIO_ID_NETWORK) {
447 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
455 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
461 if (dev->qp_enabled[pair_idx] == enable)
464 for (i = 0; i < 2; ++i) {
465 struct vhost_vring_state state = {
466 .index = pair_idx * 2 + i,
470 if (vhost_vdpa_set_vring_enable(dev, &state))
474 dev->qp_enabled[pair_idx] = enable;
479 struct virtio_user_backend_ops virtio_ops_vdpa = {
480 .setup = vhost_vdpa_setup,
481 .set_owner = vhost_vdpa_set_owner,
482 .get_features = vhost_vdpa_get_features,
483 .set_features = vhost_vdpa_set_features,
484 .get_protocol_features = vhost_vdpa_get_backend_features,
485 .set_protocol_features = vhost_vdpa_set_backend_features,
486 .set_memory_table = vhost_vdpa_set_memory_table,
487 .set_vring_num = vhost_vdpa_set_vring_num,
488 .set_vring_base = vhost_vdpa_set_vring_base,
489 .get_vring_base = vhost_vdpa_get_vring_base,
490 .set_vring_call = vhost_vdpa_set_vring_call,
491 .set_vring_kick = vhost_vdpa_set_vring_kick,
492 .set_vring_addr = vhost_vdpa_set_vring_addr,
493 .send_request = vhost_vdpa_send_request,
494 .enable_qp = vhost_vdpa_enable_queue_pair,
495 .dma_map = vhost_vdpa_dma_map_batch,
496 .dma_unmap = vhost_vdpa_dma_unmap_batch,