#include <rte_log.h>
#include <rte_ether.h>
#include <rte_rwlock.h>
+#include <rte_malloc.h>
#include "rte_vhost.h"
#include "rte_vdpa.h"
#define VIRTIO_F_RING_PACKED 34
-#define VRING_DESC_F_NEXT 1
-#define VRING_DESC_F_WRITE 2
-#define VRING_DESC_F_INDIRECT 4
-
-#define VRING_DESC_F_AVAIL (1ULL << 7)
-#define VRING_DESC_F_USED (1ULL << 15)
-
struct vring_packed_desc {
uint64_t addr;
uint32_t len;
uint16_t flags;
};
-#define VRING_EVENT_F_ENABLE 0x0
-#define VRING_EVENT_F_DISABLE 0x1
-#define VRING_EVENT_F_DESC 0x2
-
struct vring_packed_desc_event {
uint16_t off_wrap;
uint16_t flags;
};
#endif
+/*
+ * Declare below packed ring defines unconditionally
+ * as Kernel header might use different names.
+ */
+#define VRING_DESC_F_AVAIL (1ULL << 7)
+#define VRING_DESC_F_USED (1ULL << 15)
+
+#define VRING_EVENT_F_ENABLE 0x0
+#define VRING_EVENT_F_DISABLE 0x1
+#define VRING_EVENT_F_DESC 0x2
+
/*
* Available and used descs are in same order
*/
(1ULL << VIRTIO_RING_F_EVENT_IDX) | \
(1ULL << VIRTIO_NET_F_MTU) | \
(1ULL << VIRTIO_F_IN_ORDER) | \
- (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_F_RING_PACKED))
struct guest_page {
int slave_req_fd;
rte_spinlock_t slave_req_lock;
+ int postcopy_ufd;
+ int postcopy_listening;
+
/*
* Device id to identify a specific backend device.
* It's set to -1 for the default software implementation.
static inline bool
desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
{
- return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL) &&
- wrap_counter != !!(desc->flags & VRING_DESC_F_USED);
+ uint16_t flags = *((volatile uint16_t *) &desc->flags);
+
+ return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
+ wrap_counter != !!(flags & VRING_DESC_F_USED);
}
#define VHOST_LOG_PAGE 4096
!dev->log_base))
return;
- log_base = (unsigned long *)(uintptr_t)dev->log_base;
+ rte_smp_wmb();
- /*
- * It is expected a write memory barrier has been issued
- * before this function is called.
- */
+ log_base = (unsigned long *)(uintptr_t)dev->log_base;
for (i = 0; i < vq->log_cache_nb_elem; i++) {
struct log_cache_entry *elem = vq->log_cache + i;
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
void vhost_attach_vdpa_device(int vid, int did);
-void vhost_detach_vdpa_device(int vid);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
void vhost_enable_dequeue_zero_copy(int vid);
eventfd_write(vq->callfd, (eventfd_t)1);
}
+static __rte_always_inline void *
+alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint64_t desc_addr, uint64_t desc_len)
+{
+ void *idesc;
+ uint64_t src, dst;
+ uint64_t len, remain = desc_len;
+
+ idesc = rte_malloc(__func__, desc_len, 0);
+ if (unlikely(!idesc))
+ return 0;
+
+ dst = (uint64_t)(uintptr_t)idesc;
+
+ while (remain) {
+ len = remain;
+ src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ rte_free(idesc);
+ return 0;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+ remain -= len;
+ dst += len;
+ desc_addr += len;
+ }
+
+ return idesc;
+}
+
+static __rte_always_inline void
+free_ind_table(void *idesc)
+{
+ rte_free(idesc);
+}
+
#endif /* _VHOST_NET_CDEV_H_ */