#define MAX_PKT_BURST 32
-#define ASYNC_MAX_POLL_SEG 255
-
#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
-#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
+#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4)
#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
uint32_t desc_idx;
};
-/*
- * A structure to hold some fields needed in zero copy code path,
- * mainly for associating an mbuf with the right desc_idx.
- */
-struct zcopy_mbuf {
- struct rte_mbuf *mbuf;
- uint32_t desc_idx;
- uint16_t desc_count;
- uint16_t in_use;
-
- TAILQ_ENTRY(zcopy_mbuf) next;
-};
-TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
-
/*
* Structure contains the info for each batched memory copy.
*/
int enabled;
int access_ok;
int ready;
+ int notif_enable;
+#define VIRTIO_UNINITIALIZED_NOTIF (-1)
+
rte_spinlock_t access_lock;
/* Used to notify the guest (trigger interrupt) */
struct rte_vhost_resubmit_info *resubmit_inflight;
uint64_t global_counter;
- uint16_t nr_zmbuf;
- uint16_t zmbuf_size;
- uint16_t last_zmbuf_idx;
- struct zcopy_mbuf *zmbufs;
- struct zcopy_mbuf_list zmbuf_list;
-
union {
struct vring_used_elem *shadow_used_split;
struct vring_used_elem_packed *shadow_used_packed;
/* operation callbacks for async dma */
struct rte_vhost_async_channel_ops async_ops;
- struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
- struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
+ struct rte_vhost_iov_iter *it_pool;
+ struct iovec *vec_pool;
/* async data transfer status */
uintptr_t **async_pkts_pending;
- #define ASYNC_PENDING_INFO_N_MSK 0xFFFF
- #define ASYNC_PENDING_INFO_N_SFT 16
- uint64_t *async_pending_info;
+ struct async_inflight_info *async_pkts_info;
uint16_t async_pkts_idx;
uint16_t async_pkts_inflight_n;
- uint16_t async_last_seg_n;
+ uint16_t async_last_pkts_n;
/* vq async features */
bool async_inorder;
} __rte_cache_aligned;
/* Virtio device status as per Virtio specification */
+#define VIRTIO_DEVICE_STATUS_RESET 0x00
#define VIRTIO_DEVICE_STATUS_ACK 0x01
#define VIRTIO_DEVICE_STATUS_DRIVER 0x02
#define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04
/* to tell if we need broadcast rarp packet */
int16_t broadcast_rarp;
uint32_t nr_vring;
- int dequeue_zero_copy;
int async_copy;
int extbuf;
int linearbuf;
return 0;
}
-/* Convert guest physical address to host physical address */
static __rte_always_inline rte_iova_t
-gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
+gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
+ uint64_t gpa_size, uint64_t *hpa_size)
{
uint32_t i;
struct guest_page *page;
struct guest_page key;
+ *hpa_size = gpa_size;
if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
- key.guest_phys_addr = gpa;
+ key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
sizeof(struct guest_page), guest_page_addrcmp);
if (page) {
- if (gpa + size < page->guest_phys_addr + page->size)
+ if (gpa + gpa_size <=
+ page->guest_phys_addr + page->size) {
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ } else if (gpa < page->guest_phys_addr +
+ page->size) {
+ *hpa_size = page->guest_phys_addr +
+ page->size - gpa;
return gpa - page->guest_phys_addr +
page->host_phys_addr;
+ }
}
} else {
for (i = 0; i < dev->nr_guest_pages; i++) {
page = &dev->guest_pages[i];
- if (gpa >= page->guest_phys_addr &&
- gpa + size < page->guest_phys_addr +
- page->size)
- return gpa - page->guest_phys_addr +
- page->host_phys_addr;
+ if (gpa >= page->guest_phys_addr) {
+ if (gpa + gpa_size <=
+ page->guest_phys_addr + page->size) {
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ } else if (gpa < page->guest_phys_addr +
+ page->size) {
+ *hpa_size = page->guest_phys_addr +
+ page->size - gpa;
+ return gpa - page->guest_phys_addr +
+ page->host_phys_addr;
+ }
+ }
}
}
+ *hpa_size = 0;
return 0;
}
+/* Convert guest physical address to host physical address */
+static __rte_always_inline rte_iova_t
+gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
+{
+ rte_iova_t hpa;
+ uint64_t hpa_size;
+
+ hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
+ return hpa_size == size ? hpa : 0;
+}
+
static __rte_always_inline uint64_t
hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
{
void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
-void vhost_enable_dequeue_zero_copy(int vid);
void vhost_set_builtin_virtio_net(int vid, bool enable);
void vhost_enable_extbuf(int vid);
void vhost_enable_linearbuf(int vid);
+int vhost_enable_guest_notification(struct virtio_net *dev,
+ struct vhost_virtqueue *vq, int enable);
struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
/* Flush used->idx update before we read avail->flags. */
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
/* Don't kick guest if we don't reach index specified by guest. */
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
bool signalled_used_valid, kick = false;
/* Flush used desc update. */
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
if (vq->driver_event->flags !=
goto kick;
}
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
off_wrap = vq->driver_event->off_wrap;
off = off_wrap & ~(1 << 15);
return true;
}
-static __rte_always_inline void
-put_zmbuf(struct zcopy_mbuf *zmbuf)
-{
- zmbuf->in_use = 0;
-}
-
#endif /* _VHOST_NET_CDEV_H_ */