+static __rte_always_inline bool
+vq_is_packed(struct virtio_net *dev)
+{
+ return dev->features & (1ull << VIRTIO_F_RING_PACKED);
+}
+
+static inline bool
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+ uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
+
+ return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
+ wrap_counter != !!(flags & VRING_DESC_F_USED);
+}
+
+static inline void
+vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
+{
+ vq->last_used_idx += num;
+ if (vq->last_used_idx >= vq->size) {
+ vq->used_wrap_counter ^= 1;
+ vq->last_used_idx -= vq->size;
+ }
+}
+
+static inline void
+vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
+{
+ vq->last_avail_idx += num;
+ if (vq->last_avail_idx >= vq->size) {
+ vq->avail_wrap_counter ^= 1;
+ vq->last_avail_idx -= vq->size;
+ }
+}
+
+void __vhost_log_cache_write(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t addr, uint64_t len);
+void __vhost_log_cache_write_iova(struct virtio_net *dev,
+ struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
+void __vhost_log_cache_sync(struct virtio_net *dev,
+ struct vhost_virtqueue *vq);
+void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
+void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len);
+
+static __rte_always_inline void
+vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+{
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_write(dev, addr, len);
+}
+
+static __rte_always_inline void
+vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_cache_sync(dev, vq);
+}
+
+static __rte_always_inline void
+vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t addr, uint64_t len)
+{
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
+ __vhost_log_cache_write(dev, vq, addr, len);
+}
+
+static __rte_always_inline void
+vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t offset, uint64_t len)
+{
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
+ if (unlikely(vq->log_guest_addr == 0))
+ return;
+ __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
+ len);
+ }
+}
+
+static __rte_always_inline void
+vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t offset, uint64_t len)
+{
+ if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
+ if (unlikely(vq->log_guest_addr == 0))
+ return;
+ __vhost_log_write(dev, vq->log_guest_addr + offset, len);
+ }
+}
+
+static __rte_always_inline void
+vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_cache_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_cache_write(dev, vq, iova, len);
+}
+
+static __rte_always_inline void
+vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t iova, uint64_t len)
+{
+ if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
+ return;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ __vhost_log_write_iova(dev, vq, iova, len);
+ else
+ __vhost_log_write(dev, iova, len);
+}
+
+extern int vhost_config_log_level;
+extern int vhost_data_log_level;
+
+#define VHOST_LOG_CONFIG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, vhost_config_log_level, \
+ "VHOST_CONFIG: " fmt, ##args)
+
+#define VHOST_LOG_DATA(level, fmt, args...) \
+ (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \
+ rte_log(RTE_LOG_ ## level, vhost_data_log_level, \
+ "VHOST_DATA : " fmt, ##args) : \
+ 0)