X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.c;h=afded4952f947b9450173beb122ed921b218a271;hb=fa80b3c9edfe4eaf05bea799e12b4679aceb4cbe;hp=54a1864eb6baa804c090df5fecf16f66e12d17f3;hpb=36031f80ccfe10563ebf7f05ce70a759296b78ba;p=dpdk.git diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 54a1864eb6..afded4952f 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -55,43 +26,49 @@ struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; +/* Called with iotlb_lock read-locked */ uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t iova, uint64_t size, uint8_t perm) + uint64_t iova, uint64_t *size, uint8_t perm) { uint64_t vva, tmp_size; - if (unlikely(!size)) + if (unlikely(!*size)) return 0; - tmp_size = size; + tmp_size = *size; vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm); - if (tmp_size == size) + if (tmp_size == *size) return vva; - if (!vhost_user_iotlb_pending_miss(vq, iova + tmp_size, perm)) { - vhost_user_iotlb_pending_insert(vq, iova + tmp_size, perm); - vhost_user_iotlb_miss(dev, iova + tmp_size, perm); + iova += tmp_size; + + if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) { + /* + * iotlb_lock is read-locked for a full burst, + * but it only protects the iotlb cache. + * In case of IOTLB miss, we might block on the socket, + * which could cause a deadlock with QEMU if an IOTLB update + * is being handled. We can safely unlock here to avoid it. + */ + vhost_user_iotlb_rd_unlock(vq); + + vhost_user_iotlb_pending_insert(vq, iova, perm); + if (vhost_user_iotlb_miss(dev, iova, perm)) { + RTE_LOG(ERR, VHOST_CONFIG, + "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n", + iova); + vhost_user_iotlb_pending_remove(vq, iova, 1, perm); + } + + vhost_user_iotlb_rd_lock(vq); } return 0; } -struct virtio_net * -get_device(int vid) -{ - struct virtio_net *dev = vhost_devices[vid]; - - if (unlikely(!dev)) { - RTE_LOG(ERR, VHOST_CONFIG, - "(%d) device not found.\n", vid); - } - - return dev; -} - -static void +void cleanup_vq(struct vhost_virtqueue *vq, int destroy) { if ((vq->callfd >= 0) && (destroy != 0)) @@ -115,6 +92,15 @@ cleanup_device(struct virtio_net *dev, int destroy) cleanup_vq(dev->virtqueue[i], destroy); } +void +free_vq(struct vhost_virtqueue *vq) +{ + rte_free(vq->shadow_used_ring); + rte_free(vq->batch_copy_elems); + rte_mempool_free(vq->iotlb_pool); + rte_free(vq); +} + /* * Release virtqueues and device memory. */ @@ -122,16 +108,9 @@ static void free_device(struct virtio_net *dev) { uint32_t i; - struct vhost_virtqueue *vq; - - for (i = 0; i < dev->nr_vring; i++) { - vq = dev->virtqueue[i]; - rte_free(vq->shadow_used_ring); - rte_free(vq->batch_copy_elems); - rte_mempool_free(vq->iotlb_pool); - rte_free(vq); - } + for (i = 0; i < dev->nr_vring; i++) + free_vq(dev->virtqueue[i]); rte_free(dev); } @@ -139,32 +118,39 @@ free_device(struct virtio_net *dev) int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq) { - uint64_t size; + uint64_t req_size, size; if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) goto out; - size = sizeof(struct vring_desc) * vq->size; + req_size = sizeof(struct vring_desc) * vq->size; + size = req_size; vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr, - size, VHOST_ACCESS_RW); - if (!vq->desc) + &size, VHOST_ACCESS_RW); + if (!vq->desc || size != req_size) return -1; - size = sizeof(struct vring_avail); - size += sizeof(uint16_t) * vq->size; + req_size = sizeof(struct vring_avail); + req_size += sizeof(uint16_t) * vq->size; + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) + req_size += sizeof(uint16_t); + size = req_size; vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr, - size, VHOST_ACCESS_RW); - if (!vq->avail) + &size, VHOST_ACCESS_RW); + if (!vq->avail || size != req_size) return -1; - size = sizeof(struct vring_used); - size += sizeof(struct vring_used_elem) * vq->size; + req_size = sizeof(struct vring_used); + req_size += sizeof(struct vring_used_elem) * vq->size; + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) + req_size += sizeof(uint16_t); + size = req_size; vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr, - size, VHOST_ACCESS_RW); - if (!vq->used) + &size, VHOST_ACCESS_RW); + if (!vq->used || size != req_size) return -1; out: @@ -247,6 +233,7 @@ alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx) dev->virtqueue[vring_idx] = vq; init_vring_queue(dev, vring_idx); + rte_spinlock_init(&vq->access_lock); dev->nr_vring += 1; @@ -265,7 +252,7 @@ reset_device(struct virtio_net *dev) dev->features = 0; dev->protocol_features = 0; - dev->flags = 0; + dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET; for (i = 0; i < dev->nr_vring; i++) reset_vring_queue(dev, i); @@ -301,7 +288,9 @@ vhost_new_device(void) vhost_devices[i] = dev; dev->vid = i; + dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET; dev->slave_req_fd = -1; + dev->vdpa_dev_id = -1; return i; } @@ -314,11 +303,17 @@ void vhost_destroy_device(int vid) { struct virtio_net *dev = get_device(vid); + struct rte_vdpa_device *vdpa_dev; + int did = -1; if (dev == NULL) return; if (dev->flags & VIRTIO_DEV_RUNNING) { + did = dev->vdpa_dev_id; + vdpa_dev = rte_vdpa_get_device(did); + if (vdpa_dev && vdpa_dev->ops->dev_close) + vdpa_dev->ops->dev_close(dev->vid); dev->flags &= ~VIRTIO_DEV_RUNNING; dev->notify_ops->destroy_device(vid); } @@ -329,6 +324,31 @@ vhost_destroy_device(int vid) vhost_devices[vid] = NULL; } +void +vhost_attach_vdpa_device(int vid, int did) +{ + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return; + + if (rte_vdpa_get_device(did) == NULL) + return; + + dev->vdpa_dev_id = did; +} + +void +vhost_detach_vdpa_device(int vid) +{ + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return; + + dev->vdpa_dev_id = -1; +} + void vhost_set_ifname(int vid, const char *if_name, unsigned int if_len) { @@ -357,6 +377,20 @@ vhost_enable_dequeue_zero_copy(int vid) dev->dequeue_zero_copy = 1; } +void +vhost_set_builtin_virtio_net(int vid, bool enable) +{ + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return; + + if (enable) + dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET; + else + dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET; +} + int rte_vhost_get_mtu(int vid, uint16_t *mtu) { @@ -507,6 +541,27 @@ rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx, return 0; } +int +rte_vhost_vring_call(int vid, uint16_t vring_idx) +{ + struct virtio_net *dev; + struct vhost_virtqueue *vq; + + dev = get_device(vid); + if (!dev) + return -1; + + if (vring_idx >= VHOST_MAX_VRING) + return -1; + + vq = dev->virtqueue[vring_idx]; + if (!vq) + return -1; + + vhost_vring_call(dev, vq); + return 0; +} + uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id) { @@ -529,16 +584,14 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) { struct virtio_net *dev = get_device(vid); - if (dev == NULL) - return -1; - - if (enable) { - RTE_LOG(ERR, VHOST_CONFIG, - "guest notification isn't supported.\n"); + if (!dev) return -1; - } - dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY; + if (enable) + dev->virtqueue[queue_id]->used->flags &= + ~VRING_USED_F_NO_NOTIFY; + else + dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY; return 0; } @@ -598,3 +651,76 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; } + +int rte_vhost_get_vdpa_device_id(int vid) +{ + struct virtio_net *dev = get_device(vid); + + if (dev == NULL) + return -1; + + return dev->vdpa_dev_id; +} + +int rte_vhost_get_log_base(int vid, uint64_t *log_base, + uint64_t *log_size) +{ + struct virtio_net *dev = get_device(vid); + + if (!dev) + return -1; + + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return -1; + } + + *log_base = dev->log_base; + *log_size = dev->log_size; + + return 0; +} + +int rte_vhost_get_vring_base(int vid, uint16_t queue_id, + uint16_t *last_avail_idx, uint16_t *last_used_idx) +{ + struct virtio_net *dev = get_device(vid); + + if (!dev) + return -1; + + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return -1; + } + + *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx; + *last_used_idx = dev->virtqueue[queue_id]->last_used_idx; + + return 0; +} + +int rte_vhost_set_vring_base(int vid, uint16_t queue_id, + uint16_t last_avail_idx, uint16_t last_used_idx) +{ + struct virtio_net *dev = get_device(vid); + + if (!dev) + return -1; + + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return -1; + } + + dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx; + dev->virtqueue[queue_id]->last_used_idx = last_used_idx; + + return 0; +}