eal/freebsd: add config reattach in secondary process
[dpdk.git] / lib / librte_vhost / vhost.h
index e9138df..884befa 100644 (file)
@@ -308,7 +308,7 @@ struct virtio_net {
        uint64_t                log_size;
        uint64_t                log_base;
        uint64_t                log_addr;
-       struct ether_addr       mac;
+       struct rte_ether_addr   mac;
        uint16_t                mtu;
 
        struct vhost_device_ops const *notify_ops;
@@ -350,138 +350,33 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
                wrap_counter != !!(flags & VRING_DESC_F_USED);
 }
 
-#define VHOST_LOG_PAGE 4096
-
-/*
- * Atomically set a bit in memory.
- */
-static __rte_always_inline void
-vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
-{
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
-       /*
-        * __sync_ built-ins are deprecated, but __atomic_ ones
-        * are sub-optimized in older GCC versions.
-        */
-       __sync_fetch_and_or_1(addr, (1U << nr));
-#else
-       __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
-#endif
-}
-
-static __rte_always_inline void
-vhost_log_page(uint8_t *log_base, uint64_t page)
-{
-       vhost_set_bit(page % 8, &log_base[page / 8]);
-}
+void __vhost_log_cache_write(struct virtio_net *dev,
+               struct vhost_virtqueue *vq,
+               uint64_t addr, uint64_t len);
+void __vhost_log_cache_sync(struct virtio_net *dev,
+               struct vhost_virtqueue *vq);
+void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
 
 static __rte_always_inline void
 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
 {
-       uint64_t page;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base || !len))
-               return;
-
-       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
-               return;
-
-       /* To make sure guest memory updates are committed before logging */
-       rte_smp_wmb();
-
-       page = addr / VHOST_LOG_PAGE;
-       while (page * VHOST_LOG_PAGE < addr + len) {
-               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-               page += 1;
-       }
+       if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+               __vhost_log_write(dev, addr, len);
 }
 
 static __rte_always_inline void
 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
 {
-       unsigned long *log_base;
-       int i;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base))
-               return;
-
-       rte_smp_wmb();
-
-       log_base = (unsigned long *)(uintptr_t)dev->log_base;
-
-       for (i = 0; i < vq->log_cache_nb_elem; i++) {
-               struct log_cache_entry *elem = vq->log_cache + i;
-
-#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
-               /*
-                * '__sync' builtins are deprecated, but '__atomic' ones
-                * are sub-optimized in older GCC versions.
-                */
-               __sync_fetch_and_or(log_base + elem->offset, elem->val);
-#else
-               __atomic_fetch_or(log_base + elem->offset, elem->val,
-                               __ATOMIC_RELAXED);
-#endif
-       }
-
-       rte_smp_wmb();
-
-       vq->log_cache_nb_elem = 0;
-}
-
-static __rte_always_inline void
-vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                       uint64_t page)
-{
-       uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
-       uint32_t offset = page / (sizeof(unsigned long) << 3);
-       int i;
-
-       for (i = 0; i < vq->log_cache_nb_elem; i++) {
-               struct log_cache_entry *elem = vq->log_cache + i;
-
-               if (elem->offset == offset) {
-                       elem->val |= (1UL << bit_nr);
-                       return;
-               }
-       }
-
-       if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
-               /*
-                * No more room for a new log cache entry,
-                * so write the dirty log map directly.
-                */
-               rte_smp_wmb();
-               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
-
-               return;
-       }
-
-       vq->log_cache[i].offset = offset;
-       vq->log_cache[i].val = (1UL << bit_nr);
-       vq->log_cache_nb_elem++;
+       if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+               __vhost_log_cache_sync(dev, vq);
 }
 
 static __rte_always_inline void
 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        uint64_t addr, uint64_t len)
 {
-       uint64_t page;
-
-       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
-                  !dev->log_base || !len))
-               return;
-
-       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
-               return;
-
-       page = addr / VHOST_LOG_PAGE;
-       while (page * VHOST_LOG_PAGE < addr + len) {
-               vhost_log_cache_page(dev, vq, page);
-               page += 1;
-       }
+       if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+               __vhost_log_cache_write(dev, vq, addr, len);
 }
 
 static __rte_always_inline void
@@ -593,6 +488,9 @@ void vhost_backend_cleanup(struct virtio_net *dev);
 
 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        uint64_t iova, uint64_t *len, uint8_t perm);
+void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
+                       struct vhost_virtqueue *vq,
+                       uint64_t desc_addr, uint64_t desc_len);
 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 
@@ -706,39 +604,6 @@ kick:
                eventfd_write(vq->callfd, (eventfd_t)1);
 }
 
-static __rte_always_inline void *
-alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
-               uint64_t desc_addr, uint64_t desc_len)
-{
-       void *idesc;
-       uint64_t src, dst;
-       uint64_t len, remain = desc_len;
-
-       idesc = rte_malloc(__func__, desc_len, 0);
-       if (unlikely(!idesc))
-               return 0;
-
-       dst = (uint64_t)(uintptr_t)idesc;
-
-       while (remain) {
-               len = remain;
-               src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
-                               VHOST_ACCESS_RO);
-               if (unlikely(!src || !len)) {
-                       rte_free(idesc);
-                       return 0;
-               }
-
-               rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
-
-               remain -= len;
-               dst += len;
-               desc_addr += len;
-       }
-
-       return idesc;
-}
-
 static __rte_always_inline void
 free_ind_table(void *idesc)
 {