vhost: fix overflow on shadow used ring
authorTiwei Bie <tiwei.bie@intel.com>
Wed, 25 Jul 2018 13:51:37 +0000 (21:51 +0800)
committerTiwei Bie <tiwei.bie@intel.com>
Thu, 26 Jul 2018 08:02:50 +0000 (10:02 +0200)
The shadow used ring's size is the same as the vq's size,
so we shouldn't try more than "vq size" times. Besides,
the element pointed by avail->idx isn't available to the
device, so we will return error when try "vq size" times.

Fixes: 24e4844048e1 ("vhost: unify Rx mergeable and non-mergeable paths")
Fixes: a922401f35cc ("vhost: add Rx support for packed ring")

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Jens Freimann <jfreimann@redhat.com>
lib/librte_vhost/virtio_net.c

index a8b4c96..5779bcf 100644 (file)
@@ -415,13 +415,20 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        cur_idx  = vq->last_avail_idx;
 
        if (rxvq_is_mergeable(dev))
-               max_tries = vq->size;
+               max_tries = vq->size - 1;
        else
                max_tries = 1;
 
        while (size > 0) {
                if (unlikely(cur_idx == avail_head))
                        return -1;
+               /*
+                * if we tried all available ring items, and still
+                * can't get enough buf, it means something abnormal
+                * happened.
+                */
+               if (unlikely(++tries > max_tries))
+                       return -1;
 
                if (unlikely(fill_vec_buf_split(dev, vq, cur_idx,
                                                &vec_idx, buf_vec,
@@ -433,16 +440,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                size -= len;
 
                cur_idx++;
-               tries++;
                *num_buffers += 1;
-
-               /*
-                * if we tried all available ring items, and still
-                * can't get enough buf, it means something abnormal
-                * happened.
-                */
-               if (unlikely(tries > max_tries))
-                       return -1;
        }
 
        *nr_vec = vec_idx;
@@ -582,11 +580,19 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
        avail_idx = vq->last_avail_idx;
 
        if (rxvq_is_mergeable(dev))
-               max_tries = vq->size;
+               max_tries = vq->size - 1;
        else
                max_tries = 1;
 
        while (size > 0) {
+               /*
+                * if we tried all available ring items, and still
+                * can't get enough buf, it means something abnormal
+                * happened.
+                */
+               if (unlikely(++tries > max_tries))
+                       return -1;
+
                if (unlikely(fill_vec_buf_packed(dev, vq,
                                                avail_idx, &desc_count,
                                                buf_vec, &vec_idx,
@@ -603,16 +609,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        avail_idx -= vq->size;
 
                *nr_descs += desc_count;
-               tries++;
                *num_buffers += 1;
-
-               /*
-                * if we tried all available ring items, and still
-                * can't get enough buf, it means something abnormal
-                * happened.
-                */
-               if (unlikely(tries > max_tries))
-                       return -1;
        }
 
        *nr_vec = vec_idx;