1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Red Hat Inc.
11 #include <rte_memory.h>
14 #include "virtio_user_dev.h"
16 /* vhost kernel & vdpa ioctls */
17 #define VHOST_VIRTIO 0xAF
18 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
19 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
20 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
21 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
22 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
23 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
24 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
25 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
26 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
27 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
28 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
29 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
30 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
31 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
32 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
33 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
34 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
35 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
36 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
37 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
39 static uint64_t vhost_req_user_to_vdpa[] = {
40 [VHOST_USER_RESET_OWNER] = VHOST_RESET_OWNER,
43 /* no alignment requirement */
44 struct vhost_iotlb_msg {
48 #define VHOST_ACCESS_RO 0x1
49 #define VHOST_ACCESS_WO 0x2
50 #define VHOST_ACCESS_RW 0x3
52 #define VHOST_IOTLB_MISS 1
53 #define VHOST_IOTLB_UPDATE 2
54 #define VHOST_IOTLB_INVALIDATE 3
55 #define VHOST_IOTLB_ACCESS_FAIL 4
56 #define VHOST_IOTLB_BATCH_BEGIN 5
57 #define VHOST_IOTLB_BATCH_END 6
61 #define VHOST_IOTLB_MSG_V2 0x2
67 struct vhost_iotlb_msg iotlb;
74 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
78 ret = ioctl(fd, request, arg);
80 PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
81 request, strerror(errno));
89 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
91 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_OWNER, NULL);
95 vhost_vdpa_get_backend_features(struct virtio_user_dev *dev, uint64_t *features)
97 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
101 vhost_vdpa_set_backend_features(struct virtio_user_dev *dev, uint64_t features)
103 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
107 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
111 ret = vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_FEATURES, features);
113 PMD_DRV_LOG(ERR, "Failed to get features");
117 /* Multiqueue not supported for now */
118 *features &= ~(1ULL << VIRTIO_NET_F_MQ);
124 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
127 features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
129 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_FEATURES, &features);
133 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
135 struct vhost_msg msg = {};
137 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
140 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
141 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
145 msg.type = VHOST_IOTLB_MSG_V2;
146 msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
148 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
149 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
158 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
160 struct vhost_msg msg = {};
162 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
165 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
166 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
170 msg.type = VHOST_IOTLB_MSG_V2;
171 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
173 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
174 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
183 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
184 uint64_t iova, size_t len)
186 struct vhost_msg msg = {};
188 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
189 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
193 msg.type = VHOST_IOTLB_MSG_V2;
194 msg.iotlb.type = VHOST_IOTLB_UPDATE;
195 msg.iotlb.iova = iova;
196 msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
197 msg.iotlb.size = len;
198 msg.iotlb.perm = VHOST_ACCESS_RW;
200 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
201 __func__, iova, addr, len);
203 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
204 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
213 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
214 uint64_t iova, size_t len)
216 struct vhost_msg msg = {};
218 if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
219 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
223 msg.type = VHOST_IOTLB_MSG_V2;
224 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
225 msg.iotlb.iova = iova;
226 msg.iotlb.size = len;
228 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
229 __func__, iova, len);
231 if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
232 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
241 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
242 uint64_t iova, size_t len)
246 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
249 ret = vhost_vdpa_dma_map(dev, addr, iova, len);
251 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
258 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
259 uint64_t iova, size_t len)
263 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
266 ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
268 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
275 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
276 const struct rte_memseg *ms, size_t len, void *arg)
278 struct virtio_user_dev *dev = arg;
283 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
287 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
290 struct virtio_user_dev *dev = arg;
292 /* skip external memory that isn't a heap */
293 if (msl->external && !msl->heap)
296 /* skip any segments with invalid IOVA addresses */
297 if (ms->iova == RTE_BAD_IOVA)
300 /* if IOVA mode is VA, we've already mapped the internal segments */
301 if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
304 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
308 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
312 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
315 vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
317 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
318 /* with IOVA as VA mode, we can get away with mapping contiguous
319 * chunks rather than going page-by-page.
321 ret = rte_memseg_contig_walk_thread_unsafe(
322 vhost_vdpa_map_contig, dev);
325 /* we have to continue the walk because we've skipped the
326 * external segments during the config walk.
329 ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
332 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
339 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
341 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
345 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
347 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_NUM, state);
351 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
353 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_BASE, state);
357 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
359 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_GET_VRING_BASE, state);
363 vhost_vdpa_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file)
365 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_CALL, file);
369 vhost_vdpa_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file)
371 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_KICK, file);
375 vhost_vdpa_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
377 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_SET_VRING_ADDR, addr);
381 vhost_vdpa_get_status(struct virtio_user_dev *dev, uint8_t *status)
383 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_GET_STATUS, status);
387 vhost_vdpa_set_status(struct virtio_user_dev *dev, uint8_t status)
389 return vhost_vdpa_ioctl(dev->vhostfd, VHOST_VDPA_SET_STATUS, &status);
392 /* with below features, vhost vdpa does not need to do the checksum and TSO,
393 * these info will be passed to virtio_user through virtio net header.
395 #define VHOST_VDPA_GUEST_OFFLOADS_MASK \
396 ((1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
397 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
398 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
399 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
400 (1ULL << VIRTIO_NET_F_GUEST_UFO))
402 #define VHOST_VDPA_HOST_OFFLOADS_MASK \
403 ((1ULL << VIRTIO_NET_F_HOST_TSO4) | \
404 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
405 (1ULL << VIRTIO_NET_F_CSUM))
408 vhost_vdpa_send_request(struct virtio_user_dev *dev,
409 enum vhost_user_request req,
415 PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]);
417 req_vdpa = vhost_req_user_to_vdpa[req];
420 case VHOST_SET_VRING_ADDR:
421 PMD_DRV_LOG(DEBUG, "vhostfd=%d, index=%u",
422 dev->vhostfd, *(unsigned int *)arg);
428 ret = ioctl(dev->vhostfd, req_vdpa, arg);
430 PMD_DRV_LOG(ERR, "%s failed: %s",
431 vhost_msg_strings[req], strerror(errno));
437 * Set up environment to talk with a vhost vdpa backend.
440 * - (-1) if fail to set up;
441 * - (>=0) if successful.
444 vhost_vdpa_setup(struct virtio_user_dev *dev)
446 uint32_t did = (uint32_t)-1;
448 dev->vhostfd = open(dev->path, O_RDWR);
449 if (dev->vhostfd < 0) {
450 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
451 dev->path, strerror(errno));
455 if (ioctl(dev->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
456 did != VIRTIO_ID_NETWORK) {
457 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
465 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
471 if (dev->qp_enabled[pair_idx] == enable)
474 for (i = 0; i < 2; ++i) {
475 struct vhost_vring_state state = {
476 .index = pair_idx * 2 + i,
480 if (vhost_vdpa_set_vring_enable(dev, &state))
484 dev->qp_enabled[pair_idx] = enable;
489 struct virtio_user_backend_ops virtio_ops_vdpa = {
490 .setup = vhost_vdpa_setup,
491 .set_owner = vhost_vdpa_set_owner,
492 .get_features = vhost_vdpa_get_features,
493 .set_features = vhost_vdpa_set_features,
494 .get_protocol_features = vhost_vdpa_get_backend_features,
495 .set_protocol_features = vhost_vdpa_set_backend_features,
496 .set_memory_table = vhost_vdpa_set_memory_table,
497 .set_vring_num = vhost_vdpa_set_vring_num,
498 .set_vring_base = vhost_vdpa_set_vring_base,
499 .get_vring_base = vhost_vdpa_get_vring_base,
500 .set_vring_call = vhost_vdpa_set_vring_call,
501 .set_vring_kick = vhost_vdpa_set_vring_kick,
502 .set_vring_addr = vhost_vdpa_set_vring_addr,
503 .get_status = vhost_vdpa_get_status,
504 .set_status = vhost_vdpa_set_status,
505 .send_request = vhost_vdpa_send_request,
506 .enable_qp = vhost_vdpa_enable_queue_pair,
507 .dma_map = vhost_vdpa_dma_map_batch,
508 .dma_unmap = vhost_vdpa_dma_unmap_batch,