vhost: try to unroll for each loop
[dpdk.git] / lib / librte_vhost / vhost.h
index 7a31471..a2b9221 100644 (file)
 
 #define VHOST_LOG_CACHE_NR 32
 
+#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
+                           sizeof(struct vring_packed_desc))
+#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
+
+#ifdef VHOST_GCC_UNROLL_PRAGMA
+#define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
+       for (iter = val; iter < size; iter++)
+#endif
+
+#ifdef VHOST_CLANG_UNROLL_PRAGMA
+#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
+       for (iter = val; iter < size; iter++)
+#endif
+
+#ifdef VHOST_ICC_UNROLL_PRAGMA
+#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
+       for (iter = val; iter < size; iter++)
+#endif
+
+#ifndef vhost_for_each_try_unroll
+#define vhost_for_each_try_unroll(iter, val, num) \
+       for (iter = val; iter < num; iter++)
+#endif
+
 /**
  * Structure contains buffer address, length and descriptor index
  * from vring to do scatter RX.
@@ -128,6 +152,14 @@ struct vhost_virtqueue {
        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
 
+       /* inflight share memory info */
+       union {
+               struct rte_vhost_inflight_info_split *inflight_split;
+               struct rte_vhost_inflight_info_packed *inflight_packed;
+       };
+       struct rte_vhost_resubmit_info *resubmit_inflight;
+       uint64_t                global_counter;
+
        uint16_t                nr_zmbuf;
        uint16_t                zmbuf_size;
        uint16_t                last_zmbuf_idx;
@@ -286,6 +318,12 @@ struct guest_page {
        uint64_t size;
 };
 
+struct inflight_mem_info {
+       int             fd;
+       void            *addr;
+       uint64_t        size;
+};
+
 /**
  * Device structure contains all configuration information relating
  * to the device.
@@ -302,7 +340,10 @@ struct virtio_net {
        rte_atomic16_t          broadcast_rarp;
        uint32_t                nr_vring;
        int                     dequeue_zero_copy;
+       int                     extbuf;
+       int                     linearbuf;
        struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
+       struct inflight_mem_info *inflight_info;
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
        char                    ifname[IF_NAME_SZ];
        uint64_t                log_size;
@@ -350,12 +391,37 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
                wrap_counter != !!(flags & VRING_DESC_F_USED);
 }
 
+static inline void
+vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
+{
+       vq->last_used_idx += num;
+       if (vq->last_used_idx >= vq->size) {
+               vq->used_wrap_counter ^= 1;
+               vq->last_used_idx -= vq->size;
+       }
+}
+
+static inline void
+vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
+{
+       vq->last_avail_idx += num;
+       if (vq->last_avail_idx >= vq->size) {
+               vq->avail_wrap_counter ^= 1;
+               vq->last_avail_idx -= vq->size;
+       }
+}
+
 void __vhost_log_cache_write(struct virtio_net *dev,
                struct vhost_virtqueue *vq,
                uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+               struct vhost_virtqueue *vq,
+               uint64_t iova, uint64_t len);
 void __vhost_log_cache_sync(struct virtio_net *dev,
                struct vhost_virtqueue *vq);
 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                           uint64_t iova, uint64_t len);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
@@ -393,6 +459,32 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
        vhost_log_write(dev, vq->log_guest_addr + offset, len);
 }
 
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                          uint64_t iova, uint64_t len)
+{
+       if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+               return;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               __vhost_log_cache_write_iova(dev, vq, iova, len);
+       else
+               __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                          uint64_t iova, uint64_t len)
+{
+       if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+               return;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               __vhost_log_write_iova(dev, vq, iova, len);
+       else
+               __vhost_log_write(dev, iova, len);
+}
+
 /* Macros for printing using RTE_LOG */
 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
 #define RTE_LOGTYPE_VHOST_DATA   RTE_LOGTYPE_USER1
@@ -487,6 +579,7 @@ void vhost_destroy_device(int);
 void vhost_destroy_device_notify(struct virtio_net *dev);
 
 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
+void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
 
 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
@@ -496,6 +589,8 @@ void vhost_attach_vdpa_device(int vid, int did);
 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
 void vhost_enable_dequeue_zero_copy(int vid);
 void vhost_set_builtin_virtio_net(int vid, bool enable);
+void vhost_enable_extbuf(int vid);
+void vhost_enable_linearbuf(int vid);
 
 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);