1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
10 #ifdef RTE_LIBRTE_VHOST_NUMA
14 #include <rte_errno.h>
15 #include <rte_ethdev.h>
17 #include <rte_string_fns.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost.h>
21 #include <rte_rwlock.h>
25 #include "vhost_user.h"
27 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
29 /* Called with iotlb_lock read-locked */
31 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
32 uint64_t iova, uint64_t *size, uint8_t perm)
34 uint64_t vva, tmp_size;
41 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
42 if (tmp_size == *size)
47 if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
49 * iotlb_lock is read-locked for a full burst,
50 * but it only protects the iotlb cache.
51 * In case of IOTLB miss, we might block on the socket,
52 * which could cause a deadlock with QEMU if an IOTLB update
53 * is being handled. We can safely unlock here to avoid it.
55 vhost_user_iotlb_rd_unlock(vq);
57 vhost_user_iotlb_pending_insert(vq, iova, perm);
58 if (vhost_user_iotlb_miss(dev, iova, perm)) {
59 RTE_LOG(ERR, VHOST_CONFIG,
60 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
62 vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
65 vhost_user_iotlb_rd_lock(vq);
72 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
74 if ((vq->callfd >= 0) && (destroy != 0))
81 * Unmap any memory, close any file descriptors and
82 * free any memory owned by a device.
85 cleanup_device(struct virtio_net *dev, int destroy)
89 vhost_backend_cleanup(dev);
91 for (i = 0; i < dev->nr_vring; i++)
92 cleanup_vq(dev->virtqueue[i], destroy);
96 free_vq(struct vhost_virtqueue *vq)
98 rte_free(vq->shadow_used_ring);
99 rte_free(vq->batch_copy_elems);
100 rte_mempool_free(vq->iotlb_pool);
105 * Release virtqueues and device memory.
108 free_device(struct virtio_net *dev)
112 for (i = 0; i < dev->nr_vring; i++)
113 free_vq(dev->virtqueue[i]);
119 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
121 uint64_t req_size, size;
123 req_size = sizeof(struct vring_desc) * vq->size;
125 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
126 vq->ring_addrs.desc_user_addr,
127 &size, VHOST_ACCESS_RW);
128 if (!vq->desc || size != req_size)
131 req_size = sizeof(struct vring_avail);
132 req_size += sizeof(uint16_t) * vq->size;
133 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
134 req_size += sizeof(uint16_t);
136 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
137 vq->ring_addrs.avail_user_addr,
138 &size, VHOST_ACCESS_RW);
139 if (!vq->avail || size != req_size)
142 req_size = sizeof(struct vring_used);
143 req_size += sizeof(struct vring_used_elem) * vq->size;
144 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
145 req_size += sizeof(uint16_t);
147 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
148 vq->ring_addrs.used_user_addr,
149 &size, VHOST_ACCESS_RW);
150 if (!vq->used || size != req_size)
157 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
159 uint64_t req_size, size;
161 req_size = sizeof(struct vring_packed_desc) * vq->size;
164 (struct vring_packed_desc *)(uintptr_t)vhost_iova_to_vva(dev,
165 vq, vq->ring_addrs.desc_user_addr,
166 &size, VHOST_ACCESS_RW);
167 if (!vq->desc_packed || size != req_size)
174 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
177 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
180 if (vq_is_packed(dev)) {
181 if (vring_translate_packed(dev, vq) < 0)
184 if (vring_translate_split(dev, vq) < 0)
194 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
196 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
197 vhost_user_iotlb_wr_lock(vq);
204 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
205 vhost_user_iotlb_wr_unlock(vq);
209 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
211 struct vhost_virtqueue *vq;
213 if (vring_idx >= VHOST_MAX_VRING) {
214 RTE_LOG(ERR, VHOST_CONFIG,
215 "Failed not init vring, out of bound (%d)\n",
220 vq = dev->virtqueue[vring_idx];
222 memset(vq, 0, sizeof(struct vhost_virtqueue));
224 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
225 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
227 vhost_user_iotlb_init(dev, vring_idx);
228 /* Backends are set to -1 indicating an inactive device. */
231 TAILQ_INIT(&vq->zmbuf_list);
235 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
237 struct vhost_virtqueue *vq;
240 if (vring_idx >= VHOST_MAX_VRING) {
241 RTE_LOG(ERR, VHOST_CONFIG,
242 "Failed not init vring, out of bound (%d)\n",
247 vq = dev->virtqueue[vring_idx];
249 init_vring_queue(dev, vring_idx);
254 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
256 struct vhost_virtqueue *vq;
258 vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
260 RTE_LOG(ERR, VHOST_CONFIG,
261 "Failed to allocate memory for vring:%u.\n", vring_idx);
265 dev->virtqueue[vring_idx] = vq;
266 init_vring_queue(dev, vring_idx);
267 rte_spinlock_init(&vq->access_lock);
268 vq->avail_wrap_counter = 1;
269 vq->used_wrap_counter = 1;
277 * Reset some variables in device structure, while keeping few
278 * others untouched, such as vid, ifname, nr_vring: they
279 * should be same unless the device is removed.
282 reset_device(struct virtio_net *dev)
287 dev->protocol_features = 0;
288 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
290 for (i = 0; i < dev->nr_vring; i++)
291 reset_vring_queue(dev, i);
295 * Invoked when there is a new vhost-user connection established (when
296 * there is a new virtio device being attached).
299 vhost_new_device(void)
301 struct virtio_net *dev;
304 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
305 if (vhost_devices[i] == NULL)
309 if (i == MAX_VHOST_DEVICE) {
310 RTE_LOG(ERR, VHOST_CONFIG,
311 "Failed to find a free slot for new device.\n");
315 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
317 RTE_LOG(ERR, VHOST_CONFIG,
318 "Failed to allocate memory for new dev.\n");
322 vhost_devices[i] = dev;
324 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
325 dev->slave_req_fd = -1;
326 dev->vdpa_dev_id = -1;
327 rte_spinlock_init(&dev->slave_req_lock);
333 vhost_destroy_device_notify(struct virtio_net *dev)
335 struct rte_vdpa_device *vdpa_dev;
338 if (dev->flags & VIRTIO_DEV_RUNNING) {
339 did = dev->vdpa_dev_id;
340 vdpa_dev = rte_vdpa_get_device(did);
341 if (vdpa_dev && vdpa_dev->ops->dev_close)
342 vdpa_dev->ops->dev_close(dev->vid);
343 dev->flags &= ~VIRTIO_DEV_RUNNING;
344 dev->notify_ops->destroy_device(dev->vid);
349 * Invoked when there is the vhost-user connection is broken (when
350 * the virtio device is being detached).
353 vhost_destroy_device(int vid)
355 struct virtio_net *dev = get_device(vid);
360 vhost_destroy_device_notify(dev);
362 cleanup_device(dev, 1);
365 vhost_devices[vid] = NULL;
369 vhost_attach_vdpa_device(int vid, int did)
371 struct virtio_net *dev = get_device(vid);
376 if (rte_vdpa_get_device(did) == NULL)
379 dev->vdpa_dev_id = did;
383 vhost_detach_vdpa_device(int vid)
385 struct virtio_net *dev = get_device(vid);
390 vhost_user_host_notifier_ctrl(vid, false);
392 dev->vdpa_dev_id = -1;
396 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
398 struct virtio_net *dev;
401 dev = get_device(vid);
405 len = if_len > sizeof(dev->ifname) ?
406 sizeof(dev->ifname) : if_len;
408 strncpy(dev->ifname, if_name, len);
409 dev->ifname[sizeof(dev->ifname) - 1] = '\0';
413 vhost_enable_dequeue_zero_copy(int vid)
415 struct virtio_net *dev = get_device(vid);
420 dev->dequeue_zero_copy = 1;
424 vhost_set_builtin_virtio_net(int vid, bool enable)
426 struct virtio_net *dev = get_device(vid);
432 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
434 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
438 rte_vhost_get_mtu(int vid, uint16_t *mtu)
440 struct virtio_net *dev = get_device(vid);
445 if (!(dev->flags & VIRTIO_DEV_READY))
448 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
457 rte_vhost_get_numa_node(int vid)
459 #ifdef RTE_LIBRTE_VHOST_NUMA
460 struct virtio_net *dev = get_device(vid);
467 ret = get_mempolicy(&numa_node, NULL, 0, dev,
468 MPOL_F_NODE | MPOL_F_ADDR);
470 RTE_LOG(ERR, VHOST_CONFIG,
471 "(%d) failed to query numa node: %s\n",
472 vid, rte_strerror(errno));
484 rte_vhost_get_queue_num(int vid)
486 struct virtio_net *dev = get_device(vid);
491 return dev->nr_vring / 2;
495 rte_vhost_get_vring_num(int vid)
497 struct virtio_net *dev = get_device(vid);
502 return dev->nr_vring;
506 rte_vhost_get_ifname(int vid, char *buf, size_t len)
508 struct virtio_net *dev = get_device(vid);
513 len = RTE_MIN(len, sizeof(dev->ifname));
515 strncpy(buf, dev->ifname, len);
522 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
524 struct virtio_net *dev;
526 dev = get_device(vid);
530 *features = dev->features;
535 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
537 struct virtio_net *dev;
538 struct rte_vhost_memory *m;
541 dev = get_device(vid);
545 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
546 m = malloc(sizeof(struct rte_vhost_memory) + size);
550 m->nregions = dev->mem->nregions;
551 memcpy(m->regions, dev->mem->regions, size);
558 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
559 struct rte_vhost_vring *vring)
561 struct virtio_net *dev;
562 struct vhost_virtqueue *vq;
564 dev = get_device(vid);
568 if (vring_idx >= VHOST_MAX_VRING)
571 vq = dev->virtqueue[vring_idx];
575 vring->desc = vq->desc;
576 vring->avail = vq->avail;
577 vring->used = vq->used;
578 vring->log_guest_addr = vq->log_guest_addr;
580 vring->callfd = vq->callfd;
581 vring->kickfd = vq->kickfd;
582 vring->size = vq->size;
588 rte_vhost_vring_call(int vid, uint16_t vring_idx)
590 struct virtio_net *dev;
591 struct vhost_virtqueue *vq;
593 dev = get_device(vid);
597 if (vring_idx >= VHOST_MAX_VRING)
600 vq = dev->virtqueue[vring_idx];
604 vhost_vring_call(dev, vq);
609 rte_vhost_avail_entries(int vid, uint16_t queue_id)
611 struct virtio_net *dev;
612 struct vhost_virtqueue *vq;
614 dev = get_device(vid);
618 vq = dev->virtqueue[queue_id];
622 return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
626 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
628 struct virtio_net *dev = get_device(vid);
634 dev->virtqueue[queue_id]->used->flags &=
635 ~VRING_USED_F_NO_NOTIFY;
637 dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY;
642 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
644 struct virtio_net *dev = get_device(vid);
649 vhost_log_write(dev, addr, len);
653 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
654 uint64_t offset, uint64_t len)
656 struct virtio_net *dev;
657 struct vhost_virtqueue *vq;
659 dev = get_device(vid);
663 if (vring_idx >= VHOST_MAX_VRING)
665 vq = dev->virtqueue[vring_idx];
669 vhost_log_used_vring(dev, vq, offset, len);
673 rte_vhost_rx_queue_count(int vid, uint16_t qid)
675 struct virtio_net *dev;
676 struct vhost_virtqueue *vq;
678 dev = get_device(vid);
682 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
683 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
684 dev->vid, __func__, qid);
688 vq = dev->virtqueue[qid];
692 if (unlikely(vq->enabled == 0 || vq->avail == NULL))
695 return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
698 int rte_vhost_get_vdpa_device_id(int vid)
700 struct virtio_net *dev = get_device(vid);
705 return dev->vdpa_dev_id;
708 int rte_vhost_get_log_base(int vid, uint64_t *log_base,
711 struct virtio_net *dev = get_device(vid);
716 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
717 RTE_LOG(ERR, VHOST_DATA,
718 "(%d) %s: built-in vhost net backend is disabled.\n",
723 *log_base = dev->log_base;
724 *log_size = dev->log_size;
729 int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
730 uint16_t *last_avail_idx, uint16_t *last_used_idx)
732 struct virtio_net *dev = get_device(vid);
737 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
738 RTE_LOG(ERR, VHOST_DATA,
739 "(%d) %s: built-in vhost net backend is disabled.\n",
744 *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
745 *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
750 int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
751 uint16_t last_avail_idx, uint16_t last_used_idx)
753 struct virtio_net *dev = get_device(vid);
758 if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
759 RTE_LOG(ERR, VHOST_DATA,
760 "(%d) %s: built-in vhost net backend is disabled.\n",
765 dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
766 dev->virtqueue[queue_id]->last_used_idx = last_used_idx;