X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.h;h=658f6fc287e27a525d73d862b9b4bec1aae5a974;hb=1b7b24389cee5baa421d334048782e3e99e7dec5;hp=f3731982b2f31d08e4fe9b2b976a997464c52803;hpb=78639d54563a42150fe6e85977f06a6db55d3a06;p=dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index f3731982b2..658f6fc287 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -34,6 +34,8 @@ #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4 /* Used to indicate that the device has its own data path and configured */ #define VIRTIO_DEV_VDPA_CONFIGURED 8 +/* Used to indicate that the feature negotiation failed */ +#define VIRTIO_DEV_FEATURES_FAILED 16 /* Backend value set by guest. */ #define VIRTIO_DEV_STOPPED -1 @@ -45,7 +47,7 @@ #define MAX_PKT_BURST 32 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2) -#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2) +#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4) #define PACKED_DESC_ENQUEUE_USED_FLAG(w) \ ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \ @@ -90,20 +92,6 @@ struct buf_vector { uint32_t desc_idx; }; -/* - * A structure to hold some fields needed in zero copy code path, - * mainly for associating an mbuf with the right desc_idx. - */ -struct zcopy_mbuf { - struct rte_mbuf *mbuf; - uint32_t desc_idx; - uint16_t desc_count; - uint16_t in_use; - - TAILQ_ENTRY(zcopy_mbuf) next; -}; -TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf); - /* * Structure contains the info for each batched memory copy. */ @@ -160,6 +148,9 @@ struct vhost_virtqueue { int enabled; int access_ok; int ready; + int notif_enable; +#define VIRTIO_UNINITIALIZED_NOTIF (-1) + rte_spinlock_t access_lock; /* Used to notify the guest (trigger interrupt) */ @@ -178,12 +169,6 @@ struct vhost_virtqueue { struct rte_vhost_resubmit_info *resubmit_inflight; uint64_t global_counter; - uint16_t nr_zmbuf; - uint16_t zmbuf_size; - uint16_t last_zmbuf_idx; - struct zcopy_mbuf *zmbufs; - struct zcopy_mbuf_list zmbuf_list; - union { struct vring_used_elem *shadow_used_split; struct vring_used_elem_packed *shadow_used_packed; @@ -213,16 +198,17 @@ struct vhost_virtqueue { /* operation callbacks for async dma */ struct rte_vhost_async_channel_ops async_ops; - struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT]; - struct iovec vec_pool[VHOST_MAX_ASYNC_VEC]; + struct rte_vhost_iov_iter *it_pool; + struct iovec *vec_pool; /* async data transfer status */ - uintptr_t **async_pkts_pending; - #define ASYNC_PENDING_INFO_N_MSK 0xFFFF - #define ASYNC_PENDING_INFO_N_SFT 16 - uint64_t *async_pending_info; + struct async_inflight_info *async_pkts_info; uint16_t async_pkts_idx; uint16_t async_pkts_inflight_n; + uint16_t async_last_pkts_n; + struct vring_used_elem *async_descs_split; + uint16_t async_desc_idx; + uint16_t last_async_desc_idx; /* vq async features */ bool async_inorder; @@ -230,6 +216,15 @@ struct vhost_virtqueue { uint16_t async_threshold; } __rte_cache_aligned; +/* Virtio device status as per Virtio specification */ +#define VIRTIO_DEVICE_STATUS_RESET 0x00 +#define VIRTIO_DEVICE_STATUS_ACK 0x01 +#define VIRTIO_DEVICE_STATUS_DRIVER 0x02 +#define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04 +#define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08 +#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40 +#define VIRTIO_DEVICE_STATUS_FAILED 0x80 + #define VHOST_MAX_VRING 0x100 #define VHOST_MAX_QUEUE_PAIRS 0x80 @@ -363,7 +358,6 @@ struct virtio_net { /* to tell if we need broadcast rarp packet */ int16_t broadcast_rarp; uint32_t nr_vring; - int dequeue_zero_copy; int async_copy; int extbuf; int linearbuf; @@ -376,6 +370,7 @@ struct virtio_net { uint64_t log_addr; struct rte_ether_addr mac; uint16_t mtu; + uint8_t status; struct vhost_device_ops const *notify_ops; @@ -570,38 +565,67 @@ static __rte_always_inline int guest_page_addrcmp(const void *p1, return 0; } -/* Convert guest physical address to host physical address */ static __rte_always_inline rte_iova_t -gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) +gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa, + uint64_t gpa_size, uint64_t *hpa_size) { uint32_t i; struct guest_page *page; struct guest_page key; + *hpa_size = gpa_size; if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { - key.guest_phys_addr = gpa; + key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1); page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, sizeof(struct guest_page), guest_page_addrcmp); if (page) { - if (gpa + size < page->guest_phys_addr + page->size) + if (gpa + gpa_size <= + page->guest_phys_addr + page->size) { + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } else if (gpa < page->guest_phys_addr + + page->size) { + *hpa_size = page->guest_phys_addr + + page->size - gpa; return gpa - page->guest_phys_addr + page->host_phys_addr; + } } } else { for (i = 0; i < dev->nr_guest_pages; i++) { page = &dev->guest_pages[i]; - if (gpa >= page->guest_phys_addr && - gpa + size < page->guest_phys_addr + - page->size) - return gpa - page->guest_phys_addr + - page->host_phys_addr; + if (gpa >= page->guest_phys_addr) { + if (gpa + gpa_size <= + page->guest_phys_addr + page->size) { + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } else if (gpa < page->guest_phys_addr + + page->size) { + *hpa_size = page->guest_phys_addr + + page->size - gpa; + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } } } + *hpa_size = 0; return 0; } +/* Convert guest physical address to host physical address */ +static __rte_always_inline rte_iova_t +gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) +{ + rte_iova_t hpa; + uint64_t hpa_size; + + hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size); + return hpa_size == size ? hpa : 0; +} + static __rte_always_inline uint64_t hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) { @@ -650,10 +674,11 @@ int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev); void vhost_set_ifname(int, const char *if_name, unsigned int if_len); -void vhost_enable_dequeue_zero_copy(int vid); void vhost_set_builtin_virtio_net(int vid, bool enable); void vhost_enable_extbuf(int vid); void vhost_enable_linearbuf(int vid); +int vhost_enable_guest_notification(struct virtio_net *dev, + struct vhost_virtqueue *vq, int enable); struct vhost_device_ops const *vhost_driver_callback_get(const char *path); @@ -705,13 +730,12 @@ static __rte_always_inline void vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { /* Flush used->idx update before we read avail->flags. */ - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); /* Don't kick guest if we don't reach index specified by guest. */ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { uint16_t old = vq->signalled_used; - uint16_t new = vq->async_pkts_inflight_n ? - vq->used->idx:vq->last_used_idx; + uint16_t new = vq->last_used_idx; bool signalled_used_valid = vq->signalled_used_valid; vq->signalled_used = new; @@ -747,7 +771,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) bool signalled_used_valid, kick = false; /* Flush used desc update. */ - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { if (vq->driver_event->flags != @@ -773,7 +797,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) goto kick; } - rte_smp_rmb(); + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); off_wrap = vq->driver_event->off_wrap; off = off_wrap & ~(1 << 15); @@ -828,10 +852,4 @@ mbuf_is_consumed(struct rte_mbuf *m) return true; } -static __rte_always_inline void -put_zmbuf(struct zcopy_mbuf *zmbuf) -{ - zmbuf->in_use = 0; -} - #endif /* _VHOST_NET_CDEV_H_ */