vhost: checkout resubmit inflight information
[dpdk.git] / lib / librte_vhost / vhost.h
index 5131a97..6250ebd 100644 (file)
@@ -128,6 +128,14 @@ struct vhost_virtqueue {
        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
 
+       /* inflight share memory info */
+       union {
+               struct rte_vhost_inflight_info_split *inflight_split;
+               struct rte_vhost_inflight_info_packed *inflight_packed;
+       };
+       struct rte_vhost_resubmit_info *resubmit_inflight;
+       uint64_t                global_counter;
+
        uint16_t                nr_zmbuf;
        uint16_t                zmbuf_size;
        uint16_t                last_zmbuf_idx;
@@ -286,6 +294,12 @@ struct guest_page {
        uint64_t size;
 };
 
+struct inflight_mem_info {
+       int             fd;
+       void            *addr;
+       uint64_t        size;
+};
+
 /**
  * Device structure contains all configuration information relating
  * to the device.
@@ -303,6 +317,7 @@ struct virtio_net {
        uint32_t                nr_vring;
        int                     dequeue_zero_copy;
        struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+       struct inflight_mem_info *inflight_info;
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
        char                    ifname[IF_NAME_SZ];
        uint64_t                log_size;
@@ -344,7 +359,7 @@ vq_is_packed(struct virtio_net *dev)
 static inline bool
 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 {
-       uint16_t flags = *((volatile uint16_t *) &desc->flags);
+       uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
 
        return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
                wrap_counter != !!(flags & VRING_DESC_F_USED);
@@ -353,9 +368,14 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 void __vhost_log_cache_write(struct virtio_net *dev,
                struct vhost_virtqueue *vq,
                uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+               struct vhost_virtqueue *vq,
+               uint64_t iova, uint64_t len);
 void __vhost_log_cache_sync(struct virtio_net *dev,
                struct vhost_virtqueue *vq);
 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                           uint64_t iova, uint64_t len);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -393,6 +413,32 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
        vhost_log_write(dev, vq->log_guest_addr + offset, len);
 }
 
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                          uint64_t iova, uint64_t len)
+{
+       if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+               return;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               __vhost_log_cache_write_iova(dev, vq, iova, len);
+       else
+               __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                          uint64_t iova, uint64_t len)
+{
+       if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+               return;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               __vhost_log_write_iova(dev, vq, iova, len);
+       else
+               __vhost_log_write(dev, iova, len);
+}
+
 /* Macros for printing using RTE_LOG */
 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_VHOST_DATA   RTE_LOGTYPE_USER1
@@ -447,6 +493,26 @@ gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
        return 0;
 }
 
+static __rte_always_inline uint64_t
+hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
+{
+       struct rte_vhost_mem_region *r;
+       uint32_t i;
+
+       if (unlikely(!dev || !dev->mem))
+               return 0;
+
+       for (i = 0; i < dev->mem->nregions; i++) {
+               r = &dev->mem->regions[i];
+
+               if (vva >= r->host_user_addr &&
+                   vva + len <  r->host_user_addr + r->size) {
+                       return r->guest_phys_addr + vva - r->host_user_addr;
+               }
+       }
+       return 0;
+}
+
 static __rte_always_inline struct virtio_net *
 get_device(int vid)
 {
@@ -467,6 +533,7 @@ void vhost_destroy_device(int);
 void vhost_destroy_device_notify(struct virtio_net *dev);
 
 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
+void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
 
 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);