vhost: un-inline dirty pages logging functions
authorMaxime Coquelin <maxime.coquelin@redhat.com>
Wed, 29 May 2019 13:04:16 +0000 (15:04 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 13 Jun 2019 14:54:29 +0000 (23:54 +0900)
In order to reduce the I-cache pressure, this patch removes
the inlining of the dirty pages logging functions, that we
can consider as cold path.

Indeed, these functions are only called while doing live
migration, so not called most of the time.

Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Reviewed-by: Tiwei Bie <tiwei.bie@intel.com>
lib/librte_vhost/vhost.c
lib/librte_vhost/vhost.h

index 163f459..7d427b6 100644 (file)
@@ -69,6 +69,137 @@ __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
        return 0;
 }
 
+#define VHOST_LOG_PAGE 4096
+
+/*
+ * Atomically set a bit in memory.
+ */
+static __rte_always_inline void
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+       /*
+        * __sync_ built-ins are deprecated, but __atomic_ ones
+        * are sub-optimized in older GCC versions.
+        */
+       __sync_fetch_and_or_1(addr, (1U << nr));
+#else
+       __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
+#endif
+}
+
+static __rte_always_inline void
+vhost_log_page(uint8_t *log_base, uint64_t page)
+{
+       vhost_set_bit(page % 8, &log_base[page / 8]);
+}
+
+void
+__vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+{
+       uint64_t page;
+
+       if (unlikely(!dev->log_base || !len))
+               return;
+
+       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+               return;
+
+       /* To make sure guest memory updates are committed before logging */
+       rte_smp_wmb();
+
+       page = addr / VHOST_LOG_PAGE;
+       while (page * VHOST_LOG_PAGE < addr + len) {
+               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+               page += 1;
+       }
+}
+
+void
+__vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       unsigned long *log_base;
+       int i;
+
+       if (unlikely(!dev->log_base))
+               return;
+
+       rte_smp_wmb();
+
+       log_base = (unsigned long *)(uintptr_t)dev->log_base;
+
+       for (i = 0; i < vq->log_cache_nb_elem; i++) {
+               struct log_cache_entry *elem = vq->log_cache + i;
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+               /*
+                * '__sync' builtins are deprecated, but '__atomic' ones
+                * are sub-optimized in older GCC versions.
+                */
+               __sync_fetch_and_or(log_base + elem->offset, elem->val);
+#else
+               __atomic_fetch_or(log_base + elem->offset, elem->val,
+                               __ATOMIC_RELAXED);
+#endif
+       }
+
+       rte_smp_wmb();
+
+       vq->log_cache_nb_elem = 0;
+}
+
+static __rte_always_inline void
+vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t page)
+{
+       uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
+       uint32_t offset = page / (sizeof(unsigned long) << 3);
+       int i;
+
+       for (i = 0; i < vq->log_cache_nb_elem; i++) {
+               struct log_cache_entry *elem = vq->log_cache + i;
+
+               if (elem->offset == offset) {
+                       elem->val |= (1UL << bit_nr);
+                       return;
+               }
+       }
+
+       if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
+               /*
+                * No more room for a new log cache entry,
+                * so write the dirty log map directly.
+                */
+               rte_smp_wmb();
+               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+
+               return;
+       }
+
+       vq->log_cache[i].offset = offset;
+       vq->log_cache[i].val = (1UL << bit_nr);
+       vq->log_cache_nb_elem++;
+}
+
+void
+__vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t addr, uint64_t len)
+{
+       uint64_t page;
+
+       if (unlikely(!dev->log_base || !len))
+               return;
+
+       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+               return;
+
+       page = addr / VHOST_LOG_PAGE;
+       while (page * VHOST_LOG_PAGE < addr + len) {
+               vhost_log_cache_page(dev, vq, page);
+               page += 1;
+       }
+}
+
 void
 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
 {
index d49c3b8..dd27c24 100644 (file)
@@ -350,138 +350,33 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
                wrap_counter != !!(flags & VRING_DESC_F_USED);
 }
 
-#define VHOST_LOG_PAGE 4096
-
-/*
- * Atomically set a bit in memory.
- */
-static __rte_always_inline void
-vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
-{
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
-       /*
-        * __sync_ built-ins are deprecated, but __atomic_ ones
-        * are sub-optimized in older GCC versions.
-        */
-       __sync_fetch_and_or_1(addr, (1U << nr));
-#else
-       __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
-#endif
-}
-
-static __rte_always_inline void
-vhost_log_page(uint8_t *log_base, uint64_t page)
-{
-       vhost_set_bit(page % 8, &log_base[page / 8]);
-}
+void __vhost_log_cache_write(struct virtio_net *dev,
+               struct vhost_virtqueue *vq,
+               uint64_t addr, uint64_t len);
+void __vhost_log_cache_sync(struct virtio_net *dev,
+               struct vhost_virtqueue *vq);
+void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
 {
-       uint64_t page;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base || !len))
-               return;
-
-       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
-               return;
-
-       /* To make sure guest memory updates are committed before logging */
-       rte_smp_wmb();
-
-       page = addr / VHOST_LOG_PAGE;
-       while (page * VHOST_LOG_PAGE < addr + len) {
-               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-               page += 1;
-       }
+       if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+               __vhost_log_write(dev, addr, len);
 }
 
 static __rte_always_inline void
 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
-       unsigned long *log_base;
-       int i;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base))
-               return;
-
-       rte_smp_wmb();
-
-       log_base = (unsigned long *)(uintptr_t)dev->log_base;
-
-       for (i = 0; i < vq->log_cache_nb_elem; i++) {
-               struct log_cache_entry *elem = vq->log_cache + i;
-
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
-               /*
-                * '__sync' builtins are deprecated, but '__atomic' ones
-                * are sub-optimized in older GCC versions.
-                */
-               __sync_fetch_and_or(log_base + elem->offset, elem->val);
-#else
-               __atomic_fetch_or(log_base + elem->offset, elem->val,
-                               __ATOMIC_RELAXED);
-#endif
-       }
-
-       rte_smp_wmb();
-
-       vq->log_cache_nb_elem = 0;
-}
-
-static __rte_always_inline void
-vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                       uint64_t page)
-{
-       uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
-       uint32_t offset = page / (sizeof(unsigned long) << 3);
-       int i;
-
-       for (i = 0; i < vq->log_cache_nb_elem; i++) {
-               struct log_cache_entry *elem = vq->log_cache + i;
-
-               if (elem->offset == offset) {
-                       elem->val |= (1UL << bit_nr);
-                       return;
-               }
-       }
-
-       if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
-               /*
-                * No more room for a new log cache entry,
-                * so write the dirty log map directly.
-                */
-               rte_smp_wmb();
-               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-
-               return;
-       }
-
-       vq->log_cache[i].offset = offset;
-       vq->log_cache[i].val = (1UL << bit_nr);
-       vq->log_cache_nb_elem++;
+       if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+               __vhost_log_cache_sync(dev, vq);
 }
 
 static __rte_always_inline void
 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        uint64_t addr, uint64_t len)
 {
-       uint64_t page;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base || !len))
-               return;
-
-       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
-               return;
-
-       page = addr / VHOST_LOG_PAGE;
-       while (page * VHOST_LOG_PAGE < addr + len) {
-               vhost_log_cache_page(dev, vq, page);
-               page += 1;
-       }
+       if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+               __vhost_log_cache_write(dev, vq, addr, len);
 }
 
 static __rte_always_inline void