#define MAX_PKT_BURST 32
#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
-#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
+#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4)
#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
/* operation callbacks for async dma */
struct rte_vhost_async_channel_ops async_ops;
- struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT];
- struct iovec vec_pool[VHOST_MAX_ASYNC_VEC];
+ struct rte_vhost_iov_iter *it_pool;
+ struct iovec *vec_pool;
/* async data transfer status */
uintptr_t **async_pkts_pending;
return 0;
}
-/* Convert guest physical address to host physical address */
-static __rte_always_inline rte_iova_t
-gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
-{
- uint32_t i;
- struct guest_page *page;
- struct guest_page key;
-
- if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
- key.guest_phys_addr = gpa;
- page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
- sizeof(struct guest_page), guest_page_addrcmp);
- if (page) {
- if (gpa + size < page->guest_phys_addr + page->size)
- return gpa - page->guest_phys_addr +
- page->host_phys_addr;
- }
- } else {
- for (i = 0; i < dev->nr_guest_pages; i++) {
- page = &dev->guest_pages[i];
-
- if (gpa >= page->guest_phys_addr &&
- gpa + size < page->guest_phys_addr +
- page->size)
- return gpa - page->guest_phys_addr +
- page->host_phys_addr;
- }
- }
-
- return 0;
-}
-
static __rte_always_inline rte_iova_t
gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
uint64_t gpa_size, uint64_t *hpa_size)
return 0;
}
+/* Convert guest physical address to host physical address */
+static __rte_always_inline rte_iova_t
+gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
+{
+ rte_iova_t hpa;
+ uint64_t hpa_size;
+
+ hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
+ return hpa_size == size ? hpa : 0;
+}
+
static __rte_always_inline uint64_t
hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
{
vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
/* Flush used->idx update before we read avail->flags. */
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
/* Don't kick guest if we don't reach index specified by guest. */
if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
bool signalled_used_valid, kick = false;
/* Flush used desc update. */
- rte_smp_mb();
+ rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
if (vq->driver_event->flags !=
goto kick;
}
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
off_wrap = vq->driver_event->off_wrap;
off = off_wrap & ~(1 << 15);