From 4e0de8dac8531b82d4c328791a67f49eadfed5f0 Mon Sep 17 00:00:00 2001 From: Tiwei Bie Date: Mon, 19 Aug 2019 19:34:57 +0800 Subject: [PATCH] vhost: protect vring access done by application Besides the enqueue/dequeue API, other APIs of the builtin net backend should also be protected. Fixes: a3688046995f ("vhost: protect active rings from async ring changes") Cc: stable@dpdk.org Reported-by: Peng He Signed-off-by: Tiwei Bie Reviewed-by: Maxime Coquelin --- lib/librte_vhost/vhost.c | 50 +++++++++++++++++++++++++++++++--------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 77be160697..cea44df8cb 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -785,22 +785,33 @@ rte_vhost_avail_entries(int vid, uint16_t queue_id) { struct virtio_net *dev; struct vhost_virtqueue *vq; + uint16_t ret = 0; dev = get_device(vid); if (!dev) return 0; vq = dev->virtqueue[queue_id]; - if (!vq->enabled) - return 0; - return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; + rte_spinlock_lock(&vq->access_lock); + + if (unlikely(!vq->enabled || vq->avail == NULL)) + goto out; + + ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx; + +out: + rte_spinlock_unlock(&vq->access_lock); + return ret; } -static inline void +static inline int vhost_enable_notify_split(struct virtio_net *dev, struct vhost_virtqueue *vq, int enable) { + if (vq->used == NULL) + return -1; + if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { if (enable) vq->used->flags &= ~VRING_USED_F_NO_NOTIFY; @@ -810,17 +821,21 @@ vhost_enable_notify_split(struct virtio_net *dev, if (enable) vhost_avail_event(vq) = vq->last_avail_idx; } + return 0; } -static inline void +static inline int vhost_enable_notify_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, int enable) { uint16_t flags; + if (vq->device_event == NULL) + return -1; + if (!enable) { vq->device_event->flags = VRING_EVENT_F_DISABLE; - return; + return 0; } flags = VRING_EVENT_F_ENABLE; @@ -833,6 +848,7 @@ vhost_enable_notify_packed(struct virtio_net *dev, rte_smp_wmb(); vq->device_event->flags = flags; + return 0; } int @@ -840,18 +856,23 @@ rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable) { struct virtio_net *dev = get_device(vid); struct vhost_virtqueue *vq; + int ret; if (!dev) return -1; vq = dev->virtqueue[queue_id]; + rte_spinlock_lock(&vq->access_lock); + if (vq_is_packed(dev)) - vhost_enable_notify_packed(dev, vq, enable); + ret = vhost_enable_notify_packed(dev, vq, enable); else - vhost_enable_notify_split(dev, vq, enable); + ret = vhost_enable_notify_split(dev, vq, enable); - return 0; + rte_spinlock_unlock(&vq->access_lock); + + return ret; } void @@ -890,6 +911,7 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) { struct virtio_net *dev; struct vhost_virtqueue *vq; + uint32_t ret = 0; dev = get_device(vid); if (dev == NULL) @@ -905,10 +927,16 @@ rte_vhost_rx_queue_count(int vid, uint16_t qid) if (vq == NULL) return 0; + rte_spinlock_lock(&vq->access_lock); + if (unlikely(vq->enabled == 0 || vq->avail == NULL)) - return 0; + goto out; + + ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; - return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; +out: + rte_spinlock_unlock(&vq->access_lock); + return ret; } int rte_vhost_get_vdpa_device_id(int vid) -- 2.20.1