4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/vhost.h>
35 #include <linux/virtio_net.h>
39 #ifdef RTE_LIBRTE_VHOST_NUMA
43 #include <rte_errno.h>
44 #include <rte_ethdev.h>
46 #include <rte_string_fns.h>
47 #include <rte_memory.h>
48 #include <rte_malloc.h>
49 #include <rte_vhost.h>
50 #include <rte_rwlock.h>
54 #include "vhost_user.h"
56 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
58 /* Called with iotlb_lock read-locked */
60 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
61 uint64_t iova, uint64_t size, uint8_t perm)
63 uint64_t vva, tmp_size;
70 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
74 if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) {
76 * iotlb_lock is read-locked for a full burst,
77 * but it only protects the iotlb cache.
78 * In case of IOTLB miss, we might block on the socket,
79 * which could cause a deadlock with QEMU if an IOTLB update
80 * is being handled. We can safely unlock here to avoid it.
82 vhost_user_iotlb_rd_unlock(vq);
84 vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm);
85 vhost_user_iotlb_miss(dev, iova + tmp_size, perm);
87 vhost_user_iotlb_rd_lock(vq);
96 struct virtio_net *dev = vhost_devices[vid];
99 RTE_LOG(ERR, VHOST_CONFIG,
100 "(%d) device not found.\n", vid);
107 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
109 if ((vq->callfd >= 0) && (destroy != 0))
116 * Unmap any memory, close any file descriptors and
117 * free any memory owned by a device.
120 cleanup_device(struct virtio_net *dev, int destroy)
124 vhost_backend_cleanup(dev);
126 for (i = 0; i < dev->nr_vring; i++)
127 cleanup_vq(dev->virtqueue[i], destroy);
131 * Release virtqueues and device memory.
134 free_device(struct virtio_net *dev)
137 struct vhost_virtqueue *vq;
139 for (i = 0; i < dev->nr_vring; i++) {
140 vq = dev->virtqueue[i];
142 rte_free(vq->shadow_used_ring);
143 rte_free(vq->batch_copy_elems);
144 rte_mempool_free(vq->iotlb_pool);
152 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
156 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
159 size = sizeof(struct vring_desc) * vq->size;
160 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
161 vq->ring_addrs.desc_user_addr,
162 size, VHOST_ACCESS_RW);
166 size = sizeof(struct vring_avail);
167 size += sizeof(uint16_t) * vq->size;
168 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
169 vq->ring_addrs.avail_user_addr,
170 size, VHOST_ACCESS_RW);
174 size = sizeof(struct vring_used);
175 size += sizeof(struct vring_used_elem) * vq->size;
176 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
177 vq->ring_addrs.used_user_addr,
178 size, VHOST_ACCESS_RW);
189 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
191 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
192 vhost_user_iotlb_wr_lock(vq);
199 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
200 vhost_user_iotlb_wr_unlock(vq);
204 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
206 struct vhost_virtqueue *vq;
208 if (vring_idx >= VHOST_MAX_VRING) {
209 RTE_LOG(ERR, VHOST_CONFIG,
210 "Failed not init vring, out of bound (%d)\n",
215 vq = dev->virtqueue[vring_idx];
217 memset(vq, 0, sizeof(struct vhost_virtqueue));
219 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
220 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
222 vhost_user_iotlb_init(dev, vring_idx);
223 /* Backends are set to -1 indicating an inactive device. */
226 TAILQ_INIT(&vq->zmbuf_list);
230 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
232 struct vhost_virtqueue *vq;
235 if (vring_idx >= VHOST_MAX_VRING) {
236 RTE_LOG(ERR, VHOST_CONFIG,
237 "Failed not init vring, out of bound (%d)\n",
242 vq = dev->virtqueue[vring_idx];
244 init_vring_queue(dev, vring_idx);
249 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
251 struct vhost_virtqueue *vq;
253 vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
255 RTE_LOG(ERR, VHOST_CONFIG,
256 "Failed to allocate memory for vring:%u.\n", vring_idx);
260 dev->virtqueue[vring_idx] = vq;
261 init_vring_queue(dev, vring_idx);
269 * Reset some variables in device structure, while keeping few
270 * others untouched, such as vid, ifname, nr_vring: they
271 * should be same unless the device is removed.
274 reset_device(struct virtio_net *dev)
279 dev->protocol_features = 0;
282 for (i = 0; i < dev->nr_vring; i++)
283 reset_vring_queue(dev, i);
287 * Invoked when there is a new vhost-user connection established (when
288 * there is a new virtio device being attached).
291 vhost_new_device(void)
293 struct virtio_net *dev;
296 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
298 RTE_LOG(ERR, VHOST_CONFIG,
299 "Failed to allocate memory for new dev.\n");
303 for (i = 0; i < MAX_VHOST_DEVICE; i++) {
304 if (vhost_devices[i] == NULL)
307 if (i == MAX_VHOST_DEVICE) {
308 RTE_LOG(ERR, VHOST_CONFIG,
309 "Failed to find a free slot for new device.\n");
314 vhost_devices[i] = dev;
316 dev->slave_req_fd = -1;
322 * Invoked when there is the vhost-user connection is broken (when
323 * the virtio device is being detached).
326 vhost_destroy_device(int vid)
328 struct virtio_net *dev = get_device(vid);
333 if (dev->flags & VIRTIO_DEV_RUNNING) {
334 dev->flags &= ~VIRTIO_DEV_RUNNING;
335 dev->notify_ops->destroy_device(vid);
338 cleanup_device(dev, 1);
341 vhost_devices[vid] = NULL;
345 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
347 struct virtio_net *dev;
350 dev = get_device(vid);
354 len = if_len > sizeof(dev->ifname) ?
355 sizeof(dev->ifname) : if_len;
357 strncpy(dev->ifname, if_name, len);
358 dev->ifname[sizeof(dev->ifname) - 1] = '\0';
362 vhost_enable_dequeue_zero_copy(int vid)
364 struct virtio_net *dev = get_device(vid);
369 dev->dequeue_zero_copy = 1;
373 rte_vhost_get_mtu(int vid, uint16_t *mtu)
375 struct virtio_net *dev = get_device(vid);
380 if (!(dev->flags & VIRTIO_DEV_READY))
383 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
392 rte_vhost_get_numa_node(int vid)
394 #ifdef RTE_LIBRTE_VHOST_NUMA
395 struct virtio_net *dev = get_device(vid);
402 ret = get_mempolicy(&numa_node, NULL, 0, dev,
403 MPOL_F_NODE | MPOL_F_ADDR);
405 RTE_LOG(ERR, VHOST_CONFIG,
406 "(%d) failed to query numa node: %s\n",
407 vid, rte_strerror(errno));
419 rte_vhost_get_queue_num(int vid)
421 struct virtio_net *dev = get_device(vid);
426 return dev->nr_vring / 2;
430 rte_vhost_get_vring_num(int vid)
432 struct virtio_net *dev = get_device(vid);
437 return dev->nr_vring;
441 rte_vhost_get_ifname(int vid, char *buf, size_t len)
443 struct virtio_net *dev = get_device(vid);
448 len = RTE_MIN(len, sizeof(dev->ifname));
450 strncpy(buf, dev->ifname, len);
457 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
459 struct virtio_net *dev;
461 dev = get_device(vid);
465 *features = dev->features;
470 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
472 struct virtio_net *dev;
473 struct rte_vhost_memory *m;
476 dev = get_device(vid);
480 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
481 m = malloc(sizeof(struct rte_vhost_memory) + size);
485 m->nregions = dev->mem->nregions;
486 memcpy(m->regions, dev->mem->regions, size);
493 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
494 struct rte_vhost_vring *vring)
496 struct virtio_net *dev;
497 struct vhost_virtqueue *vq;
499 dev = get_device(vid);
503 if (vring_idx >= VHOST_MAX_VRING)
506 vq = dev->virtqueue[vring_idx];
510 vring->desc = vq->desc;
511 vring->avail = vq->avail;
512 vring->used = vq->used;
513 vring->log_guest_addr = vq->log_guest_addr;
515 vring->callfd = vq->callfd;
516 vring->kickfd = vq->kickfd;
517 vring->size = vq->size;
523 rte_vhost_avail_entries(int vid, uint16_t queue_id)
525 struct virtio_net *dev;
526 struct vhost_virtqueue *vq;
528 dev = get_device(vid);
532 vq = dev->virtqueue[queue_id];
536 return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
540 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
542 struct virtio_net *dev = get_device(vid);
548 RTE_LOG(ERR, VHOST_CONFIG,
549 "guest notification isn't supported.\n");
553 dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
558 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
560 struct virtio_net *dev = get_device(vid);
565 vhost_log_write(dev, addr, len);
569 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
570 uint64_t offset, uint64_t len)
572 struct virtio_net *dev;
573 struct vhost_virtqueue *vq;
575 dev = get_device(vid);
579 if (vring_idx >= VHOST_MAX_VRING)
581 vq = dev->virtqueue[vring_idx];
585 vhost_log_used_vring(dev, vq, offset, len);
589 rte_vhost_rx_queue_count(int vid, uint16_t qid)
591 struct virtio_net *dev;
592 struct vhost_virtqueue *vq;
594 dev = get_device(vid);
598 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
599 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
600 dev->vid, __func__, qid);
604 vq = dev->virtqueue[qid];
608 if (unlikely(vq->enabled == 0 || vq->avail == NULL))
611 return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;