1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
10 #ifdef RTE_LIBRTE_VHOST_NUMA
15 #include <rte_errno.h>
16 #include <rte_ethdev.h>
18 #include <rte_string_fns.h>
19 #include <rte_memory.h>
20 #include <rte_malloc.h>
21 #include <rte_vhost.h>
22 #include <rte_rwlock.h>
26 #include "vhost_user.h"
28 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
30 /* Called with iotlb_lock read-locked */
32 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
33 uint64_t iova, uint64_t *size, uint8_t perm)
35 uint64_t vva, tmp_size;
42 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
43 if (tmp_size == *size)
48 if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
50 * iotlb_lock is read-locked for a full burst,
51 * but it only protects the iotlb cache.
52 * In case of IOTLB miss, we might block on the socket,
53 * which could cause a deadlock with QEMU if an IOTLB update
54 * is being handled. We can safely unlock here to avoid it.
56 vhost_user_iotlb_rd_unlock(vq);
58 vhost_user_iotlb_pending_insert(vq, iova, perm);
59 if (vhost_user_iotlb_miss(dev, iova, perm)) {
60 RTE_LOG(ERR, VHOST_CONFIG,
61 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
63 vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
66 vhost_user_iotlb_rd_lock(vq);
72 #define VHOST_LOG_PAGE 4096
75 * Atomically set a bit in memory.
77 static __rte_always_inline void
78 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
80 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
82 * __sync_ built-ins are deprecated, but __atomic_ ones
83 * are sub-optimized in older GCC versions.
85 __sync_fetch_and_or_1(addr, (1U << nr));
87 __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
91 static __rte_always_inline void
92 vhost_log_page(uint8_t *log_base, uint64_t page)
94 vhost_set_bit(page % 8, &log_base[page / 8]);
98 __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
102 if (unlikely(!dev->log_base || !len))
105 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
108 /* To make sure guest memory updates are committed before logging */
111 page = addr / VHOST_LOG_PAGE;
112 while (page * VHOST_LOG_PAGE < addr + len) {
113 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
119 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
121 unsigned long *log_base;
124 if (unlikely(!dev->log_base))
129 log_base = (unsigned long *)(uintptr_t)dev->log_base;
131 for (i = 0; i < vq->log_cache_nb_elem; i++) {
132 struct log_cache_entry *elem = vq->log_cache + i;
134 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
136 * '__sync' builtins are deprecated, but '__atomic' ones
137 * are sub-optimized in older GCC versions.
139 __sync_fetch_and_or(log_base + elem->offset, elem->val);
141 __atomic_fetch_or(log_base + elem->offset, elem->val,
148 vq->log_cache_nb_elem = 0;
151 static __rte_always_inline void
152 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
155 uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
156 uint32_t offset = page / (sizeof(unsigned long) << 3);
159 for (i = 0; i < vq->log_cache_nb_elem; i++) {
160 struct log_cache_entry *elem = vq->log_cache + i;
162 if (elem->offset == offset) {
163 elem->val |= (1UL << bit_nr);
168 if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
170 * No more room for a new log cache entry,
171 * so write the dirty log map directly.
174 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
179 vq->log_cache[i].offset = offset;
180 vq->log_cache[i].val = (1UL << bit_nr);
181 vq->log_cache_nb_elem++;
185 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
186 uint64_t addr, uint64_t len)
190 if (unlikely(!dev->log_base || !len))
193 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
196 page = addr / VHOST_LOG_PAGE;
197 while (page * VHOST_LOG_PAGE < addr + len) {
198 vhost_log_cache_page(dev, vq, page);
204 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
206 if ((vq->callfd >= 0) && (destroy != 0))
213 * Unmap any memory, close any file descriptors and
214 * free any memory owned by a device.
217 cleanup_device(struct virtio_net *dev, int destroy)
221 vhost_backend_cleanup(dev);
223 for (i = 0; i < dev->nr_vring; i++)
224 cleanup_vq(dev->virtqueue[i], destroy);
228 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
230 if (vq_is_packed(dev))
231 rte_free(vq->shadow_used_packed);
233 rte_free(vq->shadow_used_split);
234 rte_free(vq->batch_copy_elems);
235 rte_mempool_free(vq->iotlb_pool);
240 * Release virtqueues and device memory.
243 free_device(struct virtio_net *dev)
247 for (i = 0; i < dev->nr_vring; i++)
248 free_vq(dev, dev->virtqueue[i]);
254 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
256 uint64_t req_size, size;
258 req_size = sizeof(struct vring_desc) * vq->size;
260 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
261 vq->ring_addrs.desc_user_addr,
262 &size, VHOST_ACCESS_RW);
263 if (!vq->desc || size != req_size)
266 req_size = sizeof(struct vring_avail);
267 req_size += sizeof(uint16_t) * vq->size;
268 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
269 req_size += sizeof(uint16_t);
271 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
272 vq->ring_addrs.avail_user_addr,
273 &size, VHOST_ACCESS_RW);
274 if (!vq->avail || size != req_size)
277 req_size = sizeof(struct vring_used);
278 req_size += sizeof(struct vring_used_elem) * vq->size;
279 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
280 req_size += sizeof(uint16_t);
282 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
283 vq->ring_addrs.used_user_addr,
284 &size, VHOST_ACCESS_RW);
285 if (!vq->used || size != req_size)
292 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
294 uint64_t req_size, size;
296 req_size = sizeof(struct vring_packed_desc) * vq->size;
298 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
299 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
300 &size, VHOST_ACCESS_RW);
301 if (!vq->desc_packed || size != req_size)
304 req_size = sizeof(struct vring_packed_desc_event);
306 vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
307 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
308 &size, VHOST_ACCESS_RW);
309 if (!vq->driver_event || size != req_size)
312 req_size = sizeof(struct vring_packed_desc_event);
314 vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
315 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
316 &size, VHOST_ACCESS_RW);
317 if (!vq->device_event || size != req_size)
324 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
327 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
330 if (vq_is_packed(dev)) {
331 if (vring_translate_packed(dev, vq) < 0)
334 if (vring_translate_split(dev, vq) < 0)
344 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
346 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
347 vhost_user_iotlb_wr_lock(vq);
354 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
355 vhost_user_iotlb_wr_unlock(vq);
359 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
361 struct vhost_virtqueue *vq;
363 if (vring_idx >= VHOST_MAX_VRING) {
364 RTE_LOG(ERR, VHOST_CONFIG,
365 "Failed not init vring, out of bound (%d)\n",
370 vq = dev->virtqueue[vring_idx];
372 memset(vq, 0, sizeof(struct vhost_virtqueue));
374 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
375 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
377 vhost_user_iotlb_init(dev, vring_idx);
378 /* Backends are set to -1 indicating an inactive device. */
381 TAILQ_INIT(&vq->zmbuf_list);
385 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
387 struct vhost_virtqueue *vq;
390 if (vring_idx >= VHOST_MAX_VRING) {
391 RTE_LOG(ERR, VHOST_CONFIG,
392 "Failed not init vring, out of bound (%d)\n",
397 vq = dev->virtqueue[vring_idx];
399 init_vring_queue(dev, vring_idx);
404 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
406 struct vhost_virtqueue *vq;
408 vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
410 RTE_LOG(ERR, VHOST_CONFIG,
411 "Failed to allocate memory for vring:%u.\n", vring_idx);
415 dev->virtqueue[vring_idx] = vq;
416 init_vring_queue(dev, vring_idx);
417 rte_spinlock_init(&vq->access_lock);
418 vq->avail_wrap_counter = 1;
419 vq->used_wrap_counter = 1;
420 vq->signalled_used_valid = false;
428 * Reset some variables in device structure, while keeping few
429 * others untouched, such as vid, ifname, nr_vring: they
430 * should be same unless the device is removed.
433 reset_device(struct virtio_net *dev)
438 dev->protocol_features = 0;
439 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
441 for (i = 0; i < dev->nr_vring; i++)
442 reset_vring_queue(dev, i);
446 * Invoked when there is a new vhost-user connection established (when
447 * there is a new virtio device being attached).
450 vhost_new_device(void)
452 struct virtio_net *dev;
455 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
456 if (vhost_devices[i] == NULL)
460 if (i == MAX_VHOST_DEVICE) {
461 RTE_LOG(ERR, VHOST_CONFIG,
462 "Failed to find a free slot for new device.\n");
466 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
468 RTE_LOG(ERR, VHOST_CONFIG,
469 "Failed to allocate memory for new dev.\n");
473 vhost_devices[i] = dev;
475 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
476 dev->slave_req_fd = -1;
477 dev->vdpa_dev_id = -1;
478 dev->postcopy_ufd = -1;
479 rte_spinlock_init(&dev->slave_req_lock);
485 vhost_destroy_device_notify(struct virtio_net *dev)
487 struct rte_vdpa_device *vdpa_dev;
490 if (dev->flags & VIRTIO_DEV_RUNNING) {
491 did = dev->vdpa_dev_id;
492 vdpa_dev = rte_vdpa_get_device(did);
493 if (vdpa_dev && vdpa_dev->ops->dev_close)
494 vdpa_dev->ops->dev_close(dev->vid);
495 dev->flags &= ~VIRTIO_DEV_RUNNING;
496 dev->notify_ops->destroy_device(dev->vid);
501 * Invoked when there is the vhost-user connection is broken (when
502 * the virtio device is being detached).
505 vhost_destroy_device(int vid)
507 struct virtio_net *dev = get_device(vid);
512 vhost_destroy_device_notify(dev);
514 cleanup_device(dev, 1);
517 vhost_devices[vid] = NULL;
521 vhost_attach_vdpa_device(int vid, int did)
523 struct virtio_net *dev = get_device(vid);
528 if (rte_vdpa_get_device(did) == NULL)
531 dev->vdpa_dev_id = did;
535 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
537 struct virtio_net *dev;
540 dev = get_device(vid);
544 len = if_len > sizeof(dev->ifname) ?
545 sizeof(dev->ifname) : if_len;
547 strncpy(dev->ifname, if_name, len);
548 dev->ifname[sizeof(dev->ifname) - 1] = '\0';
552 vhost_enable_dequeue_zero_copy(int vid)
554 struct virtio_net *dev = get_device(vid);
559 dev->dequeue_zero_copy = 1;
563 vhost_set_builtin_virtio_net(int vid, bool enable)
565 struct virtio_net *dev = get_device(vid);
571 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
573 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
577 rte_vhost_get_mtu(int vid, uint16_t *mtu)
579 struct virtio_net *dev = get_device(vid);
581 if (dev == NULL || mtu == NULL)
584 if (!(dev->flags & VIRTIO_DEV_READY))
587 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
596 rte_vhost_get_numa_node(int vid)
598 #ifdef RTE_LIBRTE_VHOST_NUMA
599 struct virtio_net *dev = get_device(vid);
603 if (dev == NULL || numa_available() != 0)
606 ret = get_mempolicy(&numa_node, NULL, 0, dev,
607 MPOL_F_NODE | MPOL_F_ADDR);
609 RTE_LOG(ERR, VHOST_CONFIG,
610 "(%d) failed to query numa node: %s\n",
611 vid, rte_strerror(errno));
623 rte_vhost_get_queue_num(int vid)
625 struct virtio_net *dev = get_device(vid);
630 return dev->nr_vring / 2;
634 rte_vhost_get_vring_num(int vid)
636 struct virtio_net *dev = get_device(vid);
641 return dev->nr_vring;
645 rte_vhost_get_ifname(int vid, char *buf, size_t len)
647 struct virtio_net *dev = get_device(vid);
649 if (dev == NULL || buf == NULL)
652 len = RTE_MIN(len, sizeof(dev->ifname));
654 strncpy(buf, dev->ifname, len);
661 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
663 struct virtio_net *dev;
665 dev = get_device(vid);
666 if (dev == NULL || features == NULL)
669 *features = dev->features;
674 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
676 struct virtio_net *dev;
677 struct rte_vhost_memory *m;
680 dev = get_device(vid);
681 if (dev == NULL || mem == NULL)
684 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
685 m = malloc(sizeof(struct rte_vhost_memory) + size);
689 m->nregions = dev->mem->nregions;
690 memcpy(m->regions, dev->mem->regions, size);
697 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
698 struct rte_vhost_vring *vring)
700 struct virtio_net *dev;
701 struct vhost_virtqueue *vq;
703 dev = get_device(vid);
704 if (dev == NULL || vring == NULL)
707 if (vring_idx >= VHOST_MAX_VRING)
710 vq = dev->virtqueue[vring_idx];
714 vring->desc = vq->desc;
715 vring->avail = vq->avail;
716 vring->used = vq->used;
717 vring->log_guest_addr = vq->log_guest_addr;
719 vring->callfd = vq->callfd;
720 vring->kickfd = vq->kickfd;
721 vring->size = vq->size;
727 rte_vhost_vring_call(int vid, uint16_t vring_idx)
729 struct virtio_net *dev;
730 struct vhost_virtqueue *vq;
732 dev = get_device(vid);
736 if (vring_idx >= VHOST_MAX_VRING)
739 vq = dev->virtqueue[vring_idx];
743 if (vq_is_packed(dev))
744 vhost_vring_call_packed(dev, vq);
746 vhost_vring_call_split(dev, vq);
752 rte_vhost_avail_entries(int vid, uint16_t queue_id)
754 struct virtio_net *dev;
755 struct vhost_virtqueue *vq;
757 dev = get_device(vid);
761 vq = dev->virtqueue[queue_id];
765 return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
769 vhost_enable_notify_split(struct virtio_net *dev,
770 struct vhost_virtqueue *vq, int enable)
772 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
774 vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
776 vq->used->flags |= VRING_USED_F_NO_NOTIFY;
779 vhost_avail_event(vq) = vq->last_avail_idx;
784 vhost_enable_notify_packed(struct virtio_net *dev,
785 struct vhost_virtqueue *vq, int enable)
790 vq->device_event->flags = VRING_EVENT_F_DISABLE;
794 flags = VRING_EVENT_F_ENABLE;
795 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
796 flags = VRING_EVENT_F_DESC;
797 vq->device_event->off_wrap = vq->last_avail_idx |
798 vq->avail_wrap_counter << 15;
803 vq->device_event->flags = flags;
807 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
809 struct virtio_net *dev = get_device(vid);
810 struct vhost_virtqueue *vq;
815 vq = dev->virtqueue[queue_id];
817 if (vq_is_packed(dev))
818 vhost_enable_notify_packed(dev, vq, enable);
820 vhost_enable_notify_split(dev, vq, enable);
826 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
828 struct virtio_net *dev = get_device(vid);
833 vhost_log_write(dev, addr, len);
837 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
838 uint64_t offset, uint64_t len)
840 struct virtio_net *dev;
841 struct vhost_virtqueue *vq;
843 dev = get_device(vid);
847 if (vring_idx >= VHOST_MAX_VRING)
849 vq = dev->virtqueue[vring_idx];
853 vhost_log_used_vring(dev, vq, offset, len);
857 rte_vhost_rx_queue_count(int vid, uint16_t qid)
859 struct virtio_net *dev;
860 struct vhost_virtqueue *vq;
862 dev = get_device(vid);
866 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
867 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
868 dev->vid, __func__, qid);
872 vq = dev->virtqueue[qid];
876 if (unlikely(vq->enabled == 0 || vq->avail == NULL))
879 return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
882 int rte_vhost_get_vdpa_device_id(int vid)
884 struct virtio_net *dev = get_device(vid);
889 return dev->vdpa_dev_id;
892 int rte_vhost_get_log_base(int vid, uint64_t *log_base,
895 struct virtio_net *dev = get_device(vid);
897 if (dev == NULL || log_base == NULL || log_size == NULL)
900 *log_base = dev->log_base;
901 *log_size = dev->log_size;
906 int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
907 uint16_t *last_avail_idx, uint16_t *last_used_idx)
909 struct virtio_net *dev = get_device(vid);
911 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
914 *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
915 *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
920 int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
921 uint16_t last_avail_idx, uint16_t last_used_idx)
923 struct virtio_net *dev = get_device(vid);
928 dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
929 dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
934 int rte_vhost_extern_callback_register(int vid,
935 struct rte_vhost_user_extern_ops const * const ops, void *ctx)
937 struct virtio_net *dev = get_device(vid);
939 if (dev == NULL || ops == NULL)
942 dev->extern_ops = *ops;
943 dev->extern_data = ctx;