vhost: check all range is mapped when translating GPAs
authorMaxime Coquelin <maxime.coquelin@redhat.com>
Tue, 23 Jan 2018 13:26:02 +0000 (14:26 +0100)
committerMaxime Coquelin <maxime.coquelin@redhat.com>
Mon, 23 Apr 2018 14:04:30 +0000 (16:04 +0200)
There is currently no check done on the length when translating
guest addresses into host virtual addresses. Also, there is no
guanrantee that the guest addresses range is contiguous in
the host virtual address space.

This patch prepares vhost_iova_to_vva() and its callers to
return and check the mapped size. If the mapped size is smaller
than the requested size, the caller handle it as an error.

This issue has been assigned CVE-2018-1059.

Reported-by: Yongji Xie <xieyongji@baidu.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
lib/librte_vhost/vhost.c
lib/librte_vhost/vhost.h
lib/librte_vhost/virtio_net.c

index 5ddf55e..afded49 100644 (file)
@@ -29,17 +29,17 @@ struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
 /* Called with iotlb_lock read-locked */
 uint64_t
 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                   uint64_t iova, uint64_t size, uint8_t perm)
+                   uint64_t iova, uint64_t *size, uint8_t perm)
 {
        uint64_t vva, tmp_size;
 
-       if (unlikely(!size))
+       if (unlikely(!*size))
                return 0;
 
-       tmp_size = size;
+       tmp_size = *size;
 
        vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
-       if (tmp_size == size)
+       if (tmp_size == *size)
                return vva;
 
        iova += tmp_size;
@@ -118,32 +118,39 @@ free_device(struct virtio_net *dev)
 int
 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
-       uint64_t size;
+       uint64_t req_size, size;
 
        if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
                goto out;
 
-       size = sizeof(struct vring_desc) * vq->size;
+       req_size = sizeof(struct vring_desc) * vq->size;
+       size = req_size;
        vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
                                                vq->ring_addrs.desc_user_addr,
-                                               size, VHOST_ACCESS_RW);
-       if (!vq->desc)
+                                               &size, VHOST_ACCESS_RW);
+       if (!vq->desc || size != req_size)
                return -1;
 
-       size = sizeof(struct vring_avail);
-       size += sizeof(uint16_t) * vq->size;
+       req_size = sizeof(struct vring_avail);
+       req_size += sizeof(uint16_t) * vq->size;
+       if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+               req_size += sizeof(uint16_t);
+       size = req_size;
        vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
                                                vq->ring_addrs.avail_user_addr,
-                                               size, VHOST_ACCESS_RW);
-       if (!vq->avail)
+                                               &size, VHOST_ACCESS_RW);
+       if (!vq->avail || size != req_size)
                return -1;
 
-       size = sizeof(struct vring_used);
-       size += sizeof(struct vring_used_elem) * vq->size;
+       req_size = sizeof(struct vring_used);
+       req_size += sizeof(struct vring_used_elem) * vq->size;
+       if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
+               req_size += sizeof(uint16_t);
+       size = req_size;
        vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
                                                vq->ring_addrs.used_user_addr,
-                                               size, VHOST_ACCESS_RW);
-       if (!vq->used)
+                                               &size, VHOST_ACCESS_RW);
+       if (!vq->used || size != req_size)
                return -1;
 
 out:
index c9b6446..f7dbd2c 100644 (file)
@@ -437,18 +437,18 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
 void vhost_backend_cleanup(struct virtio_net *dev);
 
 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                       uint64_t iova, uint64_t size, uint8_t perm);
+                       uint64_t iova, uint64_t *len, uint8_t perm);
 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                       uint64_t iova, uint64_t size, uint8_t perm)
+                       uint64_t iova, uint64_t *len, uint8_t perm)
 {
        if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
                return rte_vhost_gpa_to_vva(dev->mem, iova);
 
-       return __vhost_iova_to_vva(dev, vq, iova, size, perm);
+       return __vhost_iova_to_vva(dev, vq, iova, len, perm);
 }
 
 #define vhost_used_event(vr) \
index 108f4de..2be3e7a 100644 (file)
@@ -180,6 +180,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
+       uint64_t dlen;
        struct vring_desc *desc;
        uint64_t desc_addr;
        /* A counter to avoid desc dead loop chain */
@@ -189,14 +190,16 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        int error = 0;
 
        desc = &descs[desc_idx];
+       dlen = desc->len;
        desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
-                                       desc->len, VHOST_ACCESS_RW);
+                                       &dlen, VHOST_ACCESS_RW);
        /*
         * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
         * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
         * otherwise stores offset on the stack instead of in a register.
         */
-       if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
+       if (unlikely(dlen != desc->len || desc->len < dev->vhost_hlen) ||
+                       !desc_addr) {
                error = -1;
                goto out;
        }
@@ -234,10 +237,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        }
 
                        desc = &descs[desc->next];
+                       dlen = desc->len;
                        desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
-                                                       desc->len,
+                                                       &dlen,
                                                        VHOST_ACCESS_RW);
-                       if (unlikely(!desc_addr)) {
+                       if (unlikely(!desc_addr || dlen != desc->len)) {
                                error = -1;
                                goto out;
                        }
@@ -351,12 +355,13 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                int err;
 
                if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+                       uint64_t dlen = vq->desc[desc_idx].len;
                        descs = (struct vring_desc *)(uintptr_t)
                                vhost_iova_to_vva(dev,
                                                vq, vq->desc[desc_idx].addr,
-                                               vq->desc[desc_idx].len,
-                                               VHOST_ACCESS_RO);
-                       if (unlikely(!descs)) {
+                                               &dlen, VHOST_ACCESS_RO);
+                       if (unlikely(!descs ||
+                                       dlen != vq->desc[desc_idx].len)) {
                                count = i;
                                break;
                        }
@@ -408,16 +413,18 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint32_t vec_id = *vec_idx;
        uint32_t len    = 0;
+       uint64_t dlen;
        struct vring_desc *descs = vq->desc;
 
        *desc_chain_head = idx;
 
        if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+               dlen = vq->desc[idx].len;
                descs = (struct vring_desc *)(uintptr_t)
                        vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
-                                               vq->desc[idx].len,
+                                               &dlen,
                                                VHOST_ACCESS_RO);
-               if (unlikely(!descs))
+               if (unlikely(!descs || dlen != vq->desc[idx].len))
                        return -1;
 
                idx = 0;
@@ -500,6 +507,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint32_t mbuf_offset, mbuf_avail;
        uint32_t desc_offset, desc_avail;
        uint32_t cpy_len;
+       uint64_t dlen;
        uint64_t hdr_addr, hdr_phys_addr;
        struct rte_mbuf *hdr_mbuf;
        struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
@@ -511,10 +519,12 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                goto out;
        }
 
+       dlen = buf_vec[vec_idx].buf_len;
        desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr,
-                                               buf_vec[vec_idx].buf_len,
-                                               VHOST_ACCESS_RW);
-       if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
+                                               &dlen, VHOST_ACCESS_RW);
+       if (dlen != buf_vec[vec_idx].buf_len ||
+                       buf_vec[vec_idx].buf_len < dev->vhost_hlen ||
+                       !desc_addr) {
                error = -1;
                goto out;
        }
@@ -536,12 +546,14 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                /* done with current desc buf, get the next one */
                if (desc_avail == 0) {
                        vec_idx++;
+                       dlen = buf_vec[vec_idx].buf_len;
                        desc_addr =
                                vhost_iova_to_vva(dev, vq,
                                        buf_vec[vec_idx].buf_addr,
-                                       buf_vec[vec_idx].buf_len,
+                                       &dlen,
                                        VHOST_ACCESS_RW);
-                       if (unlikely(!desc_addr)) {
+                       if (unlikely(!desc_addr ||
+                                       dlen != buf_vec[vec_idx].buf_len)) {
                                error = -1;
                                goto out;
                        }
@@ -847,6 +859,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
+       uint64_t dlen;
        struct rte_mbuf *cur = m, *prev = m;
        struct virtio_net_hdr *hdr = NULL;
        /* A counter to avoid desc dead loop chain */
@@ -862,11 +875,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                goto out;
        }
 
+       dlen = desc->len;
        desc_addr = vhost_iova_to_vva(dev,
                                        vq, desc->addr,
-                                       desc->len,
+                                       &dlen,
                                        VHOST_ACCESS_RO);
-       if (unlikely(!desc_addr)) {
+       if (unlikely(!desc_addr || dlen != desc->len)) {
                error = -1;
                goto out;
        }
@@ -889,11 +903,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        goto out;
                }
 
+               dlen = desc->len;
                desc_addr = vhost_iova_to_vva(dev,
                                                        vq, desc->addr,
-                                                       desc->len,
+                                                       &dlen,
                                                        VHOST_ACCESS_RO);
-               if (unlikely(!desc_addr)) {
+               if (unlikely(!desc_addr || dlen != desc->len)) {
                        error = -1;
                        goto out;
                }
@@ -977,11 +992,11 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                goto out;
                        }
 
+                       dlen = desc->len;
                        desc_addr = vhost_iova_to_vva(dev,
                                                        vq, desc->addr,
-                                                       desc->len,
-                                                       VHOST_ACCESS_RO);
-                       if (unlikely(!desc_addr)) {
+                                                       &dlen, VHOST_ACCESS_RO);
+                       if (unlikely(!desc_addr || dlen != desc->len)) {
                                error = -1;
                                goto out;
                        }
@@ -1252,18 +1267,21 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        for (i = 0; i < count; i++) {
                struct vring_desc *desc;
                uint16_t sz, idx;
+               uint64_t dlen;
                int err;
 
                if (likely(i + 1 < count))
                        rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
 
                if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
+                       dlen = vq->desc[desc_indexes[i]].len;
                        desc = (struct vring_desc *)(uintptr_t)
                                vhost_iova_to_vva(dev, vq,
                                                vq->desc[desc_indexes[i]].addr,
-                                               vq->desc[desc_indexes[i]].len,
+                                               &dlen,
                                                VHOST_ACCESS_RO);
-                       if (unlikely(!desc))
+                       if (unlikely(!desc ||
+                                       dlen != vq->desc[desc_indexes[i]].len))
                                break;
 
                        rte_prefetch0(desc);