1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Red Hat Inc.
11 #include <rte_memory.h>
14 #include "virtio_user_dev.h"
16 struct vhost_vdpa_data {
18 uint64_t protocol_features;
21 #define VHOST_VDPA_SUPPORTED_BACKEND_FEATURES \
22 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | \
23 1ULL << VHOST_BACKEND_F_IOTLB_BATCH)
25 /* vhost kernel & vdpa ioctls */
26 #define VHOST_VIRTIO 0xAF
27 #define VHOST_GET_FEATURES _IOR(VHOST_VIRTIO, 0x00, __u64)
28 #define VHOST_SET_FEATURES _IOW(VHOST_VIRTIO, 0x00, __u64)
29 #define VHOST_SET_OWNER _IO(VHOST_VIRTIO, 0x01)
30 #define VHOST_RESET_OWNER _IO(VHOST_VIRTIO, 0x02)
31 #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64)
32 #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int)
33 #define VHOST_SET_VRING_NUM _IOW(VHOST_VIRTIO, 0x10, struct vhost_vring_state)
34 #define VHOST_SET_VRING_ADDR _IOW(VHOST_VIRTIO, 0x11, struct vhost_vring_addr)
35 #define VHOST_SET_VRING_BASE _IOW(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
36 #define VHOST_GET_VRING_BASE _IOWR(VHOST_VIRTIO, 0x12, struct vhost_vring_state)
37 #define VHOST_SET_VRING_KICK _IOW(VHOST_VIRTIO, 0x20, struct vhost_vring_file)
38 #define VHOST_SET_VRING_CALL _IOW(VHOST_VIRTIO, 0x21, struct vhost_vring_file)
39 #define VHOST_SET_VRING_ERR _IOW(VHOST_VIRTIO, 0x22, struct vhost_vring_file)
40 #define VHOST_NET_SET_BACKEND _IOW(VHOST_VIRTIO, 0x30, struct vhost_vring_file)
41 #define VHOST_VDPA_GET_DEVICE_ID _IOR(VHOST_VIRTIO, 0x70, __u32)
42 #define VHOST_VDPA_GET_STATUS _IOR(VHOST_VIRTIO, 0x71, __u8)
43 #define VHOST_VDPA_SET_STATUS _IOW(VHOST_VIRTIO, 0x72, __u8)
44 #define VHOST_VDPA_GET_CONFIG _IOR(VHOST_VIRTIO, 0x73, struct vhost_vdpa_config)
45 #define VHOST_VDPA_SET_CONFIG _IOW(VHOST_VIRTIO, 0x74, struct vhost_vdpa_config)
46 #define VHOST_VDPA_SET_VRING_ENABLE _IOW(VHOST_VIRTIO, 0x75, struct vhost_vring_state)
47 #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
48 #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
50 /* no alignment requirement */
51 struct vhost_iotlb_msg {
55 #define VHOST_ACCESS_RO 0x1
56 #define VHOST_ACCESS_WO 0x2
57 #define VHOST_ACCESS_RW 0x3
59 #define VHOST_IOTLB_MISS 1
60 #define VHOST_IOTLB_UPDATE 2
61 #define VHOST_IOTLB_INVALIDATE 3
62 #define VHOST_IOTLB_ACCESS_FAIL 4
63 #define VHOST_IOTLB_BATCH_BEGIN 5
64 #define VHOST_IOTLB_BATCH_END 6
68 #define VHOST_IOTLB_MSG_V2 0x2
70 struct vhost_vdpa_config {
80 struct vhost_iotlb_msg iotlb;
87 vhost_vdpa_ioctl(int fd, uint64_t request, void *arg)
91 ret = ioctl(fd, request, arg);
93 PMD_DRV_LOG(ERR, "Vhost-vDPA ioctl %"PRIu64" failed (%s)",
94 request, strerror(errno));
102 vhost_vdpa_set_owner(struct virtio_user_dev *dev)
104 struct vhost_vdpa_data *data = dev->backend_data;
106 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_OWNER, NULL);
110 vhost_vdpa_get_protocol_features(struct virtio_user_dev *dev, uint64_t *features)
112 struct vhost_vdpa_data *data = dev->backend_data;
114 return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_BACKEND_FEATURES, features);
118 vhost_vdpa_set_protocol_features(struct virtio_user_dev *dev, uint64_t features)
120 struct vhost_vdpa_data *data = dev->backend_data;
122 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_BACKEND_FEATURES, &features);
126 vhost_vdpa_get_features(struct virtio_user_dev *dev, uint64_t *features)
128 struct vhost_vdpa_data *data = dev->backend_data;
131 ret = vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_FEATURES, features);
133 PMD_DRV_LOG(ERR, "Failed to get features");
137 /* Multiqueue not supported for now */
138 *features &= ~(1ULL << VIRTIO_NET_F_MQ);
140 /* Negotiated vDPA backend features */
141 ret = vhost_vdpa_get_protocol_features(dev, &data->protocol_features);
143 PMD_DRV_LOG(ERR, "Failed to get backend features");
147 data->protocol_features &= VHOST_VDPA_SUPPORTED_BACKEND_FEATURES;
149 ret = vhost_vdpa_set_protocol_features(dev, data->protocol_features);
151 PMD_DRV_LOG(ERR, "Failed to set backend features");
159 vhost_vdpa_set_features(struct virtio_user_dev *dev, uint64_t features)
161 struct vhost_vdpa_data *data = dev->backend_data;
164 features |= 1ULL << VIRTIO_F_IOMMU_PLATFORM;
166 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_FEATURES, &features);
170 vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev)
172 struct vhost_vdpa_data *data = dev->backend_data;
173 struct vhost_msg msg = {};
175 if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
178 if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
179 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
183 msg.type = VHOST_IOTLB_MSG_V2;
184 msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
186 if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
187 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)",
196 vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev)
198 struct vhost_vdpa_data *data = dev->backend_data;
199 struct vhost_msg msg = {};
201 if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH)))
204 if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
205 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
209 msg.type = VHOST_IOTLB_MSG_V2;
210 msg.iotlb.type = VHOST_IOTLB_BATCH_END;
212 if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
213 PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)",
222 vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr,
223 uint64_t iova, size_t len)
225 struct vhost_vdpa_data *data = dev->backend_data;
226 struct vhost_msg msg = {};
228 if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
229 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
233 msg.type = VHOST_IOTLB_MSG_V2;
234 msg.iotlb.type = VHOST_IOTLB_UPDATE;
235 msg.iotlb.iova = iova;
236 msg.iotlb.uaddr = (uint64_t)(uintptr_t)addr;
237 msg.iotlb.size = len;
238 msg.iotlb.perm = VHOST_ACCESS_RW;
240 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", addr: %p, len: 0x%zx",
241 __func__, iova, addr, len);
243 if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
244 PMD_DRV_LOG(ERR, "Failed to send IOTLB update (%s)",
253 vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr,
254 uint64_t iova, size_t len)
256 struct vhost_vdpa_data *data = dev->backend_data;
257 struct vhost_msg msg = {};
259 if (!(data->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) {
260 PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend.");
264 msg.type = VHOST_IOTLB_MSG_V2;
265 msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
266 msg.iotlb.iova = iova;
267 msg.iotlb.size = len;
269 PMD_DRV_LOG(DEBUG, "%s: iova: 0x%" PRIx64 ", len: 0x%zx",
270 __func__, iova, len);
272 if (write(data->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) {
273 PMD_DRV_LOG(ERR, "Failed to send IOTLB invalidate (%s)",
282 vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr,
283 uint64_t iova, size_t len)
287 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
290 ret = vhost_vdpa_dma_map(dev, addr, iova, len);
292 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
299 vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr,
300 uint64_t iova, size_t len)
304 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
307 ret = vhost_vdpa_dma_unmap(dev, addr, iova, len);
309 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
316 vhost_vdpa_map_contig(const struct rte_memseg_list *msl,
317 const struct rte_memseg *ms, size_t len, void *arg)
319 struct virtio_user_dev *dev = arg;
324 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, len);
328 vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms,
331 struct virtio_user_dev *dev = arg;
333 /* skip external memory that isn't a heap */
334 if (msl->external && !msl->heap)
337 /* skip any segments with invalid IOVA addresses */
338 if (ms->iova == RTE_BAD_IOVA)
341 /* if IOVA mode is VA, we've already mapped the internal segments */
342 if (!msl->external && rte_eal_iova_mode() == RTE_IOVA_VA)
345 return vhost_vdpa_dma_map(dev, ms->addr, ms->iova, ms->len);
349 vhost_vdpa_set_memory_table(struct virtio_user_dev *dev)
353 if (vhost_vdpa_iotlb_batch_begin(dev) < 0)
356 vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX);
358 if (rte_eal_iova_mode() == RTE_IOVA_VA) {
359 /* with IOVA as VA mode, we can get away with mapping contiguous
360 * chunks rather than going page-by-page.
362 ret = rte_memseg_contig_walk_thread_unsafe(
363 vhost_vdpa_map_contig, dev);
366 /* we have to continue the walk because we've skipped the
367 * external segments during the config walk.
370 ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev);
373 if (vhost_vdpa_iotlb_batch_end(dev) < 0)
380 vhost_vdpa_set_vring_enable(struct virtio_user_dev *dev, struct vhost_vring_state *state)
382 struct vhost_vdpa_data *data = dev->backend_data;
384 return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_VRING_ENABLE, state);
388 vhost_vdpa_set_vring_num(struct virtio_user_dev *dev, struct vhost_vring_state *state)
390 struct vhost_vdpa_data *data = dev->backend_data;
392 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_NUM, state);
396 vhost_vdpa_set_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
398 struct vhost_vdpa_data *data = dev->backend_data;
400 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_BASE, state);
404 vhost_vdpa_get_vring_base(struct virtio_user_dev *dev, struct vhost_vring_state *state)
406 struct vhost_vdpa_data *data = dev->backend_data;
408 return vhost_vdpa_ioctl(data->vhostfd, VHOST_GET_VRING_BASE, state);
412 vhost_vdpa_set_vring_call(struct virtio_user_dev *dev, struct vhost_vring_file *file)
414 struct vhost_vdpa_data *data = dev->backend_data;
416 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_CALL, file);
420 vhost_vdpa_set_vring_kick(struct virtio_user_dev *dev, struct vhost_vring_file *file)
422 struct vhost_vdpa_data *data = dev->backend_data;
424 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_KICK, file);
428 vhost_vdpa_set_vring_addr(struct virtio_user_dev *dev, struct vhost_vring_addr *addr)
430 struct vhost_vdpa_data *data = dev->backend_data;
432 return vhost_vdpa_ioctl(data->vhostfd, VHOST_SET_VRING_ADDR, addr);
436 vhost_vdpa_get_status(struct virtio_user_dev *dev, uint8_t *status)
438 struct vhost_vdpa_data *data = dev->backend_data;
440 return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_GET_STATUS, status);
444 vhost_vdpa_set_status(struct virtio_user_dev *dev, uint8_t status)
446 struct vhost_vdpa_data *data = dev->backend_data;
448 return vhost_vdpa_ioctl(data->vhostfd, VHOST_VDPA_SET_STATUS, &status);
452 vhost_vdpa_get_config(struct virtio_user_dev *dev, uint8_t *data, uint32_t off, uint32_t len)
454 struct vhost_vdpa_data *vdpa_data = dev->backend_data;
455 struct vhost_vdpa_config *config;
458 config = malloc(sizeof(*config) + len);
460 PMD_DRV_LOG(ERR, "Failed to allocate vDPA config data");
467 ret = vhost_vdpa_ioctl(vdpa_data->vhostfd, VHOST_VDPA_GET_CONFIG, config);
469 PMD_DRV_LOG(ERR, "Failed to get vDPA config (offset 0x%x, len 0x%x)", off, len);
474 memcpy(data, config->buf, len);
482 vhost_vdpa_set_config(struct virtio_user_dev *dev, const uint8_t *data, uint32_t off, uint32_t len)
484 struct vhost_vdpa_data *vdpa_data = dev->backend_data;
485 struct vhost_vdpa_config *config;
488 config = malloc(sizeof(*config) + len);
490 PMD_DRV_LOG(ERR, "Failed to allocate vDPA config data");
497 memcpy(config->buf, data, len);
499 ret = vhost_vdpa_ioctl(vdpa_data->vhostfd, VHOST_VDPA_SET_CONFIG, config);
501 PMD_DRV_LOG(ERR, "Failed to set vDPA config (offset 0x%x, len 0x%x)", off, len);
511 * Set up environment to talk with a vhost vdpa backend.
514 * - (-1) if fail to set up;
515 * - (>=0) if successful.
518 vhost_vdpa_setup(struct virtio_user_dev *dev)
520 struct vhost_vdpa_data *data;
521 uint32_t did = (uint32_t)-1;
523 data = malloc(sizeof(*data));
525 PMD_DRV_LOG(ERR, "(%s) Faidle to allocate backend data", dev->path);
529 data->vhostfd = open(dev->path, O_RDWR);
530 if (data->vhostfd < 0) {
531 PMD_DRV_LOG(ERR, "Failed to open %s: %s\n",
532 dev->path, strerror(errno));
537 if (ioctl(data->vhostfd, VHOST_VDPA_GET_DEVICE_ID, &did) < 0 ||
538 did != VIRTIO_ID_NETWORK) {
539 PMD_DRV_LOG(ERR, "Invalid vdpa device ID: %u\n", did);
540 close(data->vhostfd);
545 dev->backend_data = data;
551 vhost_vdpa_destroy(struct virtio_user_dev *dev)
553 struct vhost_vdpa_data *data = dev->backend_data;
558 close(data->vhostfd);
561 dev->backend_data = NULL;
567 vhost_vdpa_enable_queue_pair(struct virtio_user_dev *dev,
573 if (dev->qp_enabled[pair_idx] == enable)
576 for (i = 0; i < 2; ++i) {
577 struct vhost_vring_state state = {
578 .index = pair_idx * 2 + i,
582 if (vhost_vdpa_set_vring_enable(dev, &state))
586 dev->qp_enabled[pair_idx] = enable;
592 vhost_vdpa_get_backend_features(uint64_t *features)
600 vhost_vdpa_update_link_state(struct virtio_user_dev *dev __rte_unused)
602 /* Nothing to update (for now?) */
607 vhost_vdpa_get_intr_fd(struct virtio_user_dev *dev __rte_unused)
609 /* No link state interrupt with Vhost-vDPA */
613 struct virtio_user_backend_ops virtio_ops_vdpa = {
614 .setup = vhost_vdpa_setup,
615 .destroy = vhost_vdpa_destroy,
616 .get_backend_features = vhost_vdpa_get_backend_features,
617 .set_owner = vhost_vdpa_set_owner,
618 .get_features = vhost_vdpa_get_features,
619 .set_features = vhost_vdpa_set_features,
620 .set_memory_table = vhost_vdpa_set_memory_table,
621 .set_vring_num = vhost_vdpa_set_vring_num,
622 .set_vring_base = vhost_vdpa_set_vring_base,
623 .get_vring_base = vhost_vdpa_get_vring_base,
624 .set_vring_call = vhost_vdpa_set_vring_call,
625 .set_vring_kick = vhost_vdpa_set_vring_kick,
626 .set_vring_addr = vhost_vdpa_set_vring_addr,
627 .get_status = vhost_vdpa_get_status,
628 .set_status = vhost_vdpa_set_status,
629 .get_config = vhost_vdpa_get_config,
630 .set_config = vhost_vdpa_set_config,
631 .enable_qp = vhost_vdpa_enable_queue_pair,
632 .dma_map = vhost_vdpa_dma_map_batch,
633 .dma_unmap = vhost_vdpa_dma_unmap_batch,
634 .update_link_state = vhost_vdpa_update_link_state,
635 .get_intr_fd = vhost_vdpa_get_intr_fd,