X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.h;h=658f6fc287e27a525d73d862b9b4bec1aae5a974;hb=2b9a66e1b606d3813d72dd81c626949e09706e27;hp=75d79f80a71deffdf6338a1a0a3fc69001b8aa2b;hpb=3e1e9c24648a44f10c4e3f04f016cdb881535218;p=dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 75d79f80a7..658f6fc287 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -202,11 +202,13 @@ struct vhost_virtqueue { struct iovec *vec_pool; /* async data transfer status */ - uintptr_t **async_pkts_pending; struct async_inflight_info *async_pkts_info; uint16_t async_pkts_idx; uint16_t async_pkts_inflight_n; uint16_t async_last_pkts_n; + struct vring_used_elem *async_descs_split; + uint16_t async_desc_idx; + uint16_t last_async_desc_idx; /* vq async features */ bool async_inorder; @@ -563,38 +565,6 @@ static __rte_always_inline int guest_page_addrcmp(const void *p1, return 0; } -/* Convert guest physical address to host physical address */ -static __rte_always_inline rte_iova_t -gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) -{ - uint32_t i; - struct guest_page *page; - struct guest_page key; - - if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { - key.guest_phys_addr = gpa; - page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, - sizeof(struct guest_page), guest_page_addrcmp); - if (page) { - if (gpa + size < page->guest_phys_addr + page->size) - return gpa - page->guest_phys_addr + - page->host_phys_addr; - } - } else { - for (i = 0; i < dev->nr_guest_pages; i++) { - page = &dev->guest_pages[i]; - - if (gpa >= page->guest_phys_addr && - gpa + size < page->guest_phys_addr + - page->size) - return gpa - page->guest_phys_addr + - page->host_phys_addr; - } - } - - return 0; -} - static __rte_always_inline rte_iova_t gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t gpa_size, uint64_t *hpa_size) @@ -645,6 +615,17 @@ gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa, return 0; } +/* Convert guest physical address to host physical address */ +static __rte_always_inline rte_iova_t +gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) +{ + rte_iova_t hpa; + uint64_t hpa_size; + + hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size); + return hpa_size == size ? hpa : 0; +} + static __rte_always_inline uint64_t hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) { @@ -749,13 +730,12 @@ static __rte_always_inline void vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { /* Flush used->idx update before we read avail->flags. */ - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); /* Don't kick guest if we don't reach index specified by guest. */ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { uint16_t old = vq->signalled_used; - uint16_t new = vq->async_pkts_inflight_n ? - vq->used->idx:vq->last_used_idx; + uint16_t new = vq->last_used_idx; bool signalled_used_valid = vq->signalled_used_valid; vq->signalled_used = new; @@ -791,7 +771,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) bool signalled_used_valid, kick = false; /* Flush used desc update. */ - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { if (vq->driver_event->flags != @@ -817,7 +797,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) goto kick; } - rte_smp_rmb(); + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); off_wrap = vq->driver_event->off_wrap; off = off_wrap & ~(1 << 15);