vhost: protect active rings from async ring changes
[dpdk.git] / lib / librte_vhost / vhost.h
index 1943d06..e52a9b6 100644 (file)
@@ -74,17 +74,21 @@ struct vhost_virtqueue {
 
        uint16_t                last_avail_idx;
        uint16_t                last_used_idx;
+       /* Last used index we notify to front end. */
+       uint16_t                signalled_used;
 #define VIRTIO_INVALID_EVENTFD         (-1)
 #define VIRTIO_UNINITIALIZED_EVENTFD   (-2)
 
        /* Backend value to determine if device should started/stopped */
        int                     backend;
+       int                     enabled;
+       int                     access_ok;
+       rte_spinlock_t          access_lock;
+
        /* Used to notify the guest (trigger interrupt) */
        int                     callfd;
        /* Currently unused as polling mode is enabled */
        int                     kickfd;
-       int                     enabled;
-       int                     access_ok;
 
        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
@@ -178,11 +182,16 @@ struct vhost_msg {
                                (1ULL << VIRTIO_NET_F_GSO) | \
                                (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
                                (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+                               (1ULL << VIRTIO_NET_F_HOST_UFO) | \
+                               (1ULL << VIRTIO_NET_F_HOST_ECN) | \
                                (1ULL << VIRTIO_NET_F_CSUM)    | \
                                (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
                                (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
                                (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+                               (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
+                               (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
                                (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
+                               (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
                                (1ULL << VIRTIO_NET_F_MTU) | \
                                (1ULL << VIRTIO_F_IOMMU_PLATFORM))
 
@@ -336,6 +345,9 @@ void cleanup_device(struct virtio_net *dev, int destroy);
 void reset_device(struct virtio_net *dev);
 void vhost_destroy_device(int);
 
+void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
+void free_vq(struct vhost_virtqueue *vq);
+
 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
 
 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
@@ -365,16 +377,47 @@ vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
        return __vhost_iova_to_vva(dev, vq, iova, size, perm);
 }
 
+#define vhost_used_event(vr) \
+       (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
+
+/*
+ * The following is used with VIRTIO_RING_F_EVENT_IDX.
+ * Assuming a given event_idx value from the other size, if we have
+ * just incremented index from old to new_idx, should we trigger an
+ * event?
+ */
+static __rte_always_inline int
+vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
+{
+       return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
+}
+
 static __rte_always_inline void
-vhost_vring_call(struct vhost_virtqueue *vq)
+vhost_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
        /* Flush used->idx update before we read avail->flags. */
        rte_mb();
 
-       /* Kick the guest if necessary. */
-       if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
-                       && (vq->callfd >= 0))
-               eventfd_write(vq->callfd, (eventfd_t)1);
+       /* Don't kick guest if we don't reach index specified by guest. */
+       if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
+               uint16_t old = vq->signalled_used;
+               uint16_t new = vq->last_used_idx;
+
+               LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
+                       __func__,
+                       vhost_used_event(vq),
+                       old, new);
+               if (vhost_need_event(vhost_used_event(vq), new, old)
+                       && (vq->callfd >= 0)) {
+                       vq->signalled_used = vq->last_used_idx;
+                       eventfd_write(vq->callfd, (eventfd_t) 1);
+               }
+       } else {
+               /* Kick the guest if necessary. */
+               if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
+                               && (vq->callfd >= 0))
+                       eventfd_write(vq->callfd, (eventfd_t)1);
+       }
 }
 
 #endif /* _VHOST_NET_CDEV_H_ */