X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.h;h=658f6fc287e27a525d73d862b9b4bec1aae5a974;hb=2b9a66e1b606d3813d72dd81c626949e09706e27;hp=02b3c91fff6b9e04ca2168f50ff17bd60d964f84;hpb=86202aae9402a2f181436ace1714282ab8a43871;p=dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 02b3c91fff..658f6fc287 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -22,6 +22,9 @@ #include "rte_vhost.h" #include "rte_vdpa.h" +#include "rte_vdpa_dev.h" + +#include "rte_vhost_async.h" /* Used to indicate that the device is running on a data core */ #define VIRTIO_DEV_RUNNING 1 @@ -31,6 +34,8 @@ #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4 /* Used to indicate that the device has its own data path and configured */ #define VIRTIO_DEV_VDPA_CONFIGURED 8 +/* Used to indicate that the feature negotiation failed */ +#define VIRTIO_DEV_FEATURES_FAILED 16 /* Backend value set by guest. */ #define VIRTIO_DEV_STOPPED -1 @@ -39,6 +44,43 @@ #define VHOST_LOG_CACHE_NR 32 +#define MAX_PKT_BURST 32 + +#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2) +#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4) + +#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \ + VRING_DESC_F_WRITE) +#define PACKED_DESC_DEQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0) +#define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \ + VRING_DESC_F_INDIRECT) + +#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ + sizeof(struct vring_packed_desc)) +#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) + +#ifdef VHOST_GCC_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \ + for (iter = val; iter < size; iter++) +#endif + +#ifdef VHOST_CLANG_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \ + for (iter = val; iter < size; iter++) +#endif + +#ifdef VHOST_ICC_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \ + for (iter = val; iter < size; iter++) +#endif + +#ifndef vhost_for_each_try_unroll +#define vhost_for_each_try_unroll(iter, val, num) \ + for (iter = val; iter < num; iter++) +#endif + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. @@ -50,20 +92,6 @@ struct buf_vector { uint32_t desc_idx; }; -/* - * A structure to hold some fields needed in zero copy code path, - * mainly for associating an mbuf with the right desc_idx. - */ -struct zcopy_mbuf { - struct rte_mbuf *mbuf; - uint32_t desc_idx; - uint16_t desc_count; - uint16_t in_use; - - TAILQ_ENTRY(zcopy_mbuf) next; -}; -TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf); - /* * Structure contains the info for each batched memory copy. */ @@ -84,6 +112,7 @@ struct log_cache_entry { struct vring_used_elem_packed { uint16_t id; + uint16_t flags; uint32_t len; uint32_t count; }; @@ -118,6 +147,10 @@ struct vhost_virtqueue { int backend; int enabled; int access_ok; + int ready; + int notif_enable; +#define VIRTIO_UNINITIALIZED_NOTIF (-1) + rte_spinlock_t access_lock; /* Used to notify the guest (trigger interrupt) */ @@ -136,17 +169,15 @@ struct vhost_virtqueue { struct rte_vhost_resubmit_info *resubmit_inflight; uint64_t global_counter; - uint16_t nr_zmbuf; - uint16_t zmbuf_size; - uint16_t last_zmbuf_idx; - struct zcopy_mbuf *zmbufs; - struct zcopy_mbuf_list zmbuf_list; - union { struct vring_used_elem *shadow_used_split; struct vring_used_elem_packed *shadow_used_packed; }; uint16_t shadow_used_idx; + /* Record packed ring enqueue latest desc cache aligned index */ + uint16_t shadow_aligned_idx; + /* Record packed ring first dequeue desc index */ + uint16_t shadow_last_used_idx; struct vhost_vring_addr ring_addrs; struct batch_copy_elem *batch_copy_elems; @@ -163,28 +194,40 @@ struct vhost_virtqueue { TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list; int iotlb_cache_nr; TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list; -} __rte_cache_aligned; -/* Old kernels have no such macros defined */ -#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE - #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 -#endif + /* operation callbacks for async dma */ + struct rte_vhost_async_channel_ops async_ops; + + struct rte_vhost_iov_iter *it_pool; + struct iovec *vec_pool; + + /* async data transfer status */ + struct async_inflight_info *async_pkts_info; + uint16_t async_pkts_idx; + uint16_t async_pkts_inflight_n; + uint16_t async_last_pkts_n; + struct vring_used_elem *async_descs_split; + uint16_t async_desc_idx; + uint16_t last_async_desc_idx; + + /* vq async features */ + bool async_inorder; + bool async_registered; + uint16_t async_threshold; +} __rte_cache_aligned; -#ifndef VIRTIO_NET_F_MQ - #define VIRTIO_NET_F_MQ 22 -#endif +/* Virtio device status as per Virtio specification */ +#define VIRTIO_DEVICE_STATUS_RESET 0x00 +#define VIRTIO_DEVICE_STATUS_ACK 0x01 +#define VIRTIO_DEVICE_STATUS_DRIVER 0x02 +#define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04 +#define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08 +#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40 +#define VIRTIO_DEVICE_STATUS_FAILED 0x80 #define VHOST_MAX_VRING 0x100 #define VHOST_MAX_QUEUE_PAIRS 0x80 -#ifndef VIRTIO_NET_F_MTU - #define VIRTIO_NET_F_MTU 3 -#endif - -#ifndef VIRTIO_F_ANY_LAYOUT - #define VIRTIO_F_ANY_LAYOUT 27 -#endif - /* Declare IOMMU related bits for older kernels */ #ifndef VIRTIO_F_IOMMU_PLATFORM @@ -313,9 +356,9 @@ struct virtio_net { uint32_t flags; uint16_t vhost_hlen; /* to tell if we need broadcast rarp packet */ - rte_atomic16_t broadcast_rarp; + int16_t broadcast_rarp; uint32_t nr_vring; - int dequeue_zero_copy; + int async_copy; int extbuf; int linearbuf; struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; @@ -327,6 +370,7 @@ struct virtio_net { uint64_t log_addr; struct rte_ether_addr mac; uint16_t mtu; + uint8_t status; struct vhost_device_ops const *notify_ops; @@ -340,11 +384,7 @@ struct virtio_net { int postcopy_ufd; int postcopy_listening; - /* - * Device id to identify a specific backend device. - * It's set to -1 for the default software implementation. - */ - int vdpa_dev_id; + struct rte_vdpa_device *vdpa_dev; /* context data for the external message handlers */ void *extern_data; @@ -425,14 +465,23 @@ static __rte_always_inline void vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { - vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len); + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { + if (unlikely(vq->log_guest_addr == 0)) + return; + __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, + len); + } } static __rte_always_inline void vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { - vhost_log_write(dev, vq->log_guest_addr + offset, len); + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { + if (unlikely(vq->log_guest_addr == 0)) + return; + __vhost_log_write(dev, vq->log_guest_addr + offset, len); + } } static __rte_always_inline void @@ -461,14 +510,21 @@ vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, __vhost_log_write(dev, iova, len); } -/* Macros for printing using RTE_LOG */ -#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1 -#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1 +extern int vhost_config_log_level; +extern int vhost_data_log_level; + +#define VHOST_LOG_CONFIG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, vhost_config_log_level, \ + "VHOST_CONFIG: " fmt, ##args) + +#define VHOST_LOG_DATA(level, fmt, args...) \ + (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \ + rte_log(RTE_LOG_ ## level, vhost_data_log_level, \ + "VHOST_DATA : " fmt, ##args) : \ + 0) #ifdef RTE_LIBRTE_VHOST_DEBUG #define VHOST_MAX_PRINT_BUFF 6072 -#define VHOST_LOG_DEBUG(log_type, fmt, args...) \ - RTE_LOG(DEBUG, log_type, fmt, ##args) #define PRINT_PACKET(device, addr, size, header) do { \ char *pkt_addr = (char *)(addr); \ unsigned int index; \ @@ -484,37 +540,92 @@ vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, } \ snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \ \ - VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \ + VHOST_LOG_DATA(DEBUG, "%s", packet); \ } while (0) #else -#define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0) #define PRINT_PACKET(device, addr, size, header) do {} while (0) #endif -extern uint64_t VHOST_FEATURES; #define MAX_VHOST_DEVICE 1024 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; -/* Convert guest physical address to host physical address */ +#define VHOST_BINARY_SEARCH_THRESH 256 + +static __rte_always_inline int guest_page_addrcmp(const void *p1, + const void *p2) +{ + const struct guest_page *page1 = (const struct guest_page *)p1; + const struct guest_page *page2 = (const struct guest_page *)p2; + + if (page1->guest_phys_addr > page2->guest_phys_addr) + return 1; + if (page1->guest_phys_addr < page2->guest_phys_addr) + return -1; + + return 0; +} + static __rte_always_inline rte_iova_t -gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) +gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa, + uint64_t gpa_size, uint64_t *hpa_size) { uint32_t i; struct guest_page *page; - - for (i = 0; i < dev->nr_guest_pages; i++) { - page = &dev->guest_pages[i]; - - if (gpa >= page->guest_phys_addr && - gpa + size < page->guest_phys_addr + page->size) { - return gpa - page->guest_phys_addr + - page->host_phys_addr; + struct guest_page key; + + *hpa_size = gpa_size; + if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { + key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1); + page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, + sizeof(struct guest_page), guest_page_addrcmp); + if (page) { + if (gpa + gpa_size <= + page->guest_phys_addr + page->size) { + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } else if (gpa < page->guest_phys_addr + + page->size) { + *hpa_size = page->guest_phys_addr + + page->size - gpa; + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } + } else { + for (i = 0; i < dev->nr_guest_pages; i++) { + page = &dev->guest_pages[i]; + + if (gpa >= page->guest_phys_addr) { + if (gpa + gpa_size <= + page->guest_phys_addr + page->size) { + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } else if (gpa < page->guest_phys_addr + + page->size) { + *hpa_size = page->guest_phys_addr + + page->size - gpa; + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } } } + *hpa_size = 0; return 0; } +/* Convert guest physical address to host physical address */ +static __rte_always_inline rte_iova_t +gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) +{ + rte_iova_t hpa; + uint64_t hpa_size; + + hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size); + return hpa_size == size ? hpa : 0; +} + static __rte_always_inline uint64_t hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) { @@ -541,7 +652,7 @@ get_device(int vid) struct virtio_net *dev = vhost_devices[vid]; if (unlikely(!dev)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "(%d) device not found.\n", vid); } @@ -560,13 +671,14 @@ void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq); int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); -void vhost_attach_vdpa_device(int vid, int did); +void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev); void vhost_set_ifname(int, const char *if_name, unsigned int if_len); -void vhost_enable_dequeue_zero_copy(int vid); void vhost_set_builtin_virtio_net(int vid, bool enable); void vhost_enable_extbuf(int vid); void vhost_enable_linearbuf(int vid); +int vhost_enable_guest_notification(struct virtio_net *dev, + struct vhost_virtqueue *vq, int enable); struct vhost_device_ops const *vhost_driver_callback_get(const char *path); @@ -583,6 +695,8 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t desc_addr, uint64_t desc_len); int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); +uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t log_addr); void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); static __rte_always_inline uint64_t @@ -616,7 +730,7 @@ static __rte_always_inline void vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { /* Flush used->idx update before we read avail->flags. */ - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); /* Don't kick guest if we don't reach index specified by guest. */ if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { @@ -627,7 +741,7 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) vq->signalled_used = new; vq->signalled_used_valid = true; - VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n", + VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n", __func__, vhost_used_event(vq), old, new); @@ -657,7 +771,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) bool signalled_used_valid, kick = false; /* Flush used desc update. */ - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { if (vq->driver_event->flags != @@ -683,7 +797,7 @@ vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) goto kick; } - rte_smp_rmb(); + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); off_wrap = vq->driver_event->off_wrap; off = off_wrap & ~(1 << 15); @@ -738,10 +852,4 @@ mbuf_is_consumed(struct rte_mbuf *m) return true; } -static __rte_always_inline void -put_zmbuf(struct zcopy_mbuf *zmbuf) -{ - zmbuf->in_use = 0; -} - #endif /* _VHOST_NET_CDEV_H_ */