1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
10 #ifdef RTE_LIBRTE_VHOST_NUMA
14 #include <rte_errno.h>
15 #include <rte_ethdev.h>
17 #include <rte_string_fns.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost.h>
21 #include <rte_rwlock.h>
25 #include "vhost_user.h"
27 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
29 /* Called with iotlb_lock read-locked */
31 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
32 uint64_t iova, uint64_t size, uint8_t perm)
34 uint64_t vva, tmp_size;
41 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
47 if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
49 * iotlb_lock is read-locked for a full burst,
50 * but it only protects the iotlb cache.
51 * In case of IOTLB miss, we might block on the socket,
52 * which could cause a deadlock with QEMU if an IOTLB update
53 * is being handled. We can safely unlock here to avoid it.
55 vhost_user_iotlb_rd_unlock(vq);
57 vhost_user_iotlb_pending_insert(vq, iova, perm);
58 if (vhost_user_iotlb_miss(dev, iova, perm)) {
59 RTE_LOG(ERR, VHOST_CONFIG,
60 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
62 vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
65 vhost_user_iotlb_rd_lock(vq);
74 struct virtio_net *dev = vhost_devices[vid];
77 RTE_LOG(ERR, VHOST_CONFIG,
78 "(%d) device not found.\n", vid);
85 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
87 if ((vq->callfd >= 0) && (destroy != 0))
94 * Unmap any memory, close any file descriptors and
95 * free any memory owned by a device.
98 cleanup_device(struct virtio_net *dev, int destroy)
102 vhost_backend_cleanup(dev);
104 for (i = 0; i < dev->nr_vring; i++)
105 cleanup_vq(dev->virtqueue[i], destroy);
109 free_vq(struct vhost_virtqueue *vq)
111 rte_free(vq->shadow_used_ring);
112 rte_free(vq->batch_copy_elems);
113 rte_mempool_free(vq->iotlb_pool);
118 * Release virtqueues and device memory.
121 free_device(struct virtio_net *dev)
125 for (i = 0; i < dev->nr_vring; i++)
126 free_vq(dev->virtqueue[i]);
132 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
136 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
139 size = sizeof(struct vring_desc) * vq->size;
140 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
141 vq->ring_addrs.desc_user_addr,
142 size, VHOST_ACCESS_RW);
146 size = sizeof(struct vring_avail);
147 size += sizeof(uint16_t) * vq->size;
148 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
149 vq->ring_addrs.avail_user_addr,
150 size, VHOST_ACCESS_RW);
154 size = sizeof(struct vring_used);
155 size += sizeof(struct vring_used_elem) * vq->size;
156 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
157 vq->ring_addrs.used_user_addr,
158 size, VHOST_ACCESS_RW);
169 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
171 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
172 vhost_user_iotlb_wr_lock(vq);
179 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
180 vhost_user_iotlb_wr_unlock(vq);
184 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
186 struct vhost_virtqueue *vq;
188 if (vring_idx >= VHOST_MAX_VRING) {
189 RTE_LOG(ERR, VHOST_CONFIG,
190 "Failed not init vring, out of bound (%d)\n",
195 vq = dev->virtqueue[vring_idx];
197 memset(vq, 0, sizeof(struct vhost_virtqueue));
199 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
200 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
202 vhost_user_iotlb_init(dev, vring_idx);
203 /* Backends are set to -1 indicating an inactive device. */
206 TAILQ_INIT(&vq->zmbuf_list);
210 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
212 struct vhost_virtqueue *vq;
215 if (vring_idx >= VHOST_MAX_VRING) {
216 RTE_LOG(ERR, VHOST_CONFIG,
217 "Failed not init vring, out of bound (%d)\n",
222 vq = dev->virtqueue[vring_idx];
224 init_vring_queue(dev, vring_idx);
229 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
231 struct vhost_virtqueue *vq;
233 vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
235 RTE_LOG(ERR, VHOST_CONFIG,
236 "Failed to allocate memory for vring:%u.\n", vring_idx);
240 dev->virtqueue[vring_idx] = vq;
241 init_vring_queue(dev, vring_idx);
242 rte_spinlock_init(&vq->access_lock);
250 * Reset some variables in device structure, while keeping few
251 * others untouched, such as vid, ifname, nr_vring: they
252 * should be same unless the device is removed.
255 reset_device(struct virtio_net *dev)
260 dev->protocol_features = 0;
261 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
263 for (i = 0; i < dev->nr_vring; i++)
264 reset_vring_queue(dev, i);
268 * Invoked when there is a new vhost-user connection established (when
269 * there is a new virtio device being attached).
272 vhost_new_device(void)
274 struct virtio_net *dev;
277 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
279 RTE_LOG(ERR, VHOST_CONFIG,
280 "Failed to allocate memory for new dev.\n");
284 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
285 if (vhost_devices[i] == NULL)
288 if (i == MAX_VHOST_DEVICE) {
289 RTE_LOG(ERR, VHOST_CONFIG,
290 "Failed to find a free slot for new device.\n");
295 vhost_devices[i] = dev;
297 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
298 dev->slave_req_fd = -1;
304 * Invoked when there is the vhost-user connection is broken (when
305 * the virtio device is being detached).
308 vhost_destroy_device(int vid)
310 struct virtio_net *dev = get_device(vid);
315 if (dev->flags & VIRTIO_DEV_RUNNING) {
316 dev->flags &= ~VIRTIO_DEV_RUNNING;
317 dev->notify_ops->destroy_device(vid);
320 cleanup_device(dev, 1);
323 vhost_devices[vid] = NULL;
327 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
329 struct virtio_net *dev;
332 dev = get_device(vid);
336 len = if_len > sizeof(dev->ifname) ?
337 sizeof(dev->ifname) : if_len;
339 strncpy(dev->ifname, if_name, len);
340 dev->ifname[sizeof(dev->ifname) - 1] = '\0';
344 vhost_enable_dequeue_zero_copy(int vid)
346 struct virtio_net *dev = get_device(vid);
351 dev->dequeue_zero_copy = 1;
355 vhost_set_builtin_virtio_net(int vid, bool enable)
357 struct virtio_net *dev = get_device(vid);
363 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
365 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
369 rte_vhost_get_mtu(int vid, uint16_t *mtu)
371 struct virtio_net *dev = get_device(vid);
376 if (!(dev->flags & VIRTIO_DEV_READY))
379 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
388 rte_vhost_get_numa_node(int vid)
390 #ifdef RTE_LIBRTE_VHOST_NUMA
391 struct virtio_net *dev = get_device(vid);
398 ret = get_mempolicy(&numa_node, NULL, 0, dev,
399 MPOL_F_NODE | MPOL_F_ADDR);
401 RTE_LOG(ERR, VHOST_CONFIG,
402 "(%d) failed to query numa node: %s\n",
403 vid, rte_strerror(errno));
415 rte_vhost_get_queue_num(int vid)
417 struct virtio_net *dev = get_device(vid);
422 return dev->nr_vring / 2;
426 rte_vhost_get_vring_num(int vid)
428 struct virtio_net *dev = get_device(vid);
433 return dev->nr_vring;
437 rte_vhost_get_ifname(int vid, char *buf, size_t len)
439 struct virtio_net *dev = get_device(vid);
444 len = RTE_MIN(len, sizeof(dev->ifname));
446 strncpy(buf, dev->ifname, len);
453 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
455 struct virtio_net *dev;
457 dev = get_device(vid);
461 *features = dev->features;
466 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
468 struct virtio_net *dev;
469 struct rte_vhost_memory *m;
472 dev = get_device(vid);
476 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
477 m = malloc(sizeof(struct rte_vhost_memory) + size);
481 m->nregions = dev->mem->nregions;
482 memcpy(m->regions, dev->mem->regions, size);
489 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
490 struct rte_vhost_vring *vring)
492 struct virtio_net *dev;
493 struct vhost_virtqueue *vq;
495 dev = get_device(vid);
499 if (vring_idx >= VHOST_MAX_VRING)
502 vq = dev->virtqueue[vring_idx];
506 vring->desc = vq->desc;
507 vring->avail = vq->avail;
508 vring->used = vq->used;
509 vring->log_guest_addr = vq->log_guest_addr;
511 vring->callfd = vq->callfd;
512 vring->kickfd = vq->kickfd;
513 vring->size = vq->size;
519 rte_vhost_vring_call(int vid, uint16_t vring_idx)
521 struct virtio_net *dev;
522 struct vhost_virtqueue *vq;
524 dev = get_device(vid);
528 if (vring_idx >= VHOST_MAX_VRING)
531 vq = dev->virtqueue[vring_idx];
535 vhost_vring_call(dev, vq);
540 rte_vhost_avail_entries(int vid, uint16_t queue_id)
542 struct virtio_net *dev;
543 struct vhost_virtqueue *vq;
545 dev = get_device(vid);
549 vq = dev->virtqueue[queue_id];
553 return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
557 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
559 struct virtio_net *dev = get_device(vid);
565 RTE_LOG(ERR, VHOST_CONFIG,
566 "guest notification isn't supported.\n");
570 dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
575 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
577 struct virtio_net *dev = get_device(vid);
582 vhost_log_write(dev, addr, len);
586 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
587 uint64_t offset, uint64_t len)
589 struct virtio_net *dev;
590 struct vhost_virtqueue *vq;
592 dev = get_device(vid);
596 if (vring_idx >= VHOST_MAX_VRING)
598 vq = dev->virtqueue[vring_idx];
602 vhost_log_used_vring(dev, vq, offset, len);
606 rte_vhost_rx_queue_count(int vid, uint16_t qid)
608 struct virtio_net *dev;
609 struct vhost_virtqueue *vq;
611 dev = get_device(vid);
615 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
616 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
617 dev->vid, __func__, qid);
621 vq = dev->virtqueue[qid];
625 if (unlikely(vq->enabled == 0 || vq->avail == NULL))
628 return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;