X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.h;h=819857a65aed239cb7365c2487ee7f2163c709ab;hb=2263f13941a10989d141e73529fa1b0fe356608b;hp=4ae47c1e13d1710419d2a25f48d473be98d51858;hpb=d87f1a1cb7b666550bb53e39c1d85d9f7b861e6f;p=dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 4ae47c1e13..819857a65a 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -39,6 +39,38 @@ #define VHOST_LOG_CACHE_NR 32 +#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \ + VRING_DESC_F_WRITE) +#define PACKED_DESC_DEQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0) +#define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \ + VRING_DESC_F_INDIRECT) + +#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ + sizeof(struct vring_packed_desc)) +#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) + +#ifdef VHOST_GCC_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \ + for (iter = val; iter < size; iter++) +#endif + +#ifdef VHOST_CLANG_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \ + for (iter = val; iter < size; iter++) +#endif + +#ifdef VHOST_ICC_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \ + for (iter = val; iter < size; iter++) +#endif + +#ifndef vhost_for_each_try_unroll +#define vhost_for_each_try_unroll(iter, val, num) \ + for (iter = val; iter < num; iter++) +#endif + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. @@ -84,6 +116,7 @@ struct log_cache_entry { struct vring_used_elem_packed { uint16_t id; + uint16_t flags; uint32_t len; uint32_t count; }; @@ -128,6 +161,14 @@ struct vhost_virtqueue { /* Physical address of used ring, for logging */ uint64_t log_guest_addr; + /* inflight share memory info */ + union { + struct rte_vhost_inflight_info_split *inflight_split; + struct rte_vhost_inflight_info_packed *inflight_packed; + }; + struct rte_vhost_resubmit_info *resubmit_inflight; + uint64_t global_counter; + uint16_t nr_zmbuf; uint16_t zmbuf_size; uint16_t last_zmbuf_idx; @@ -139,6 +180,10 @@ struct vhost_virtqueue { struct vring_used_elem_packed *shadow_used_packed; }; uint16_t shadow_used_idx; + /* Record packed ring enqueue latest desc cache aligned index */ + uint16_t shadow_aligned_idx; + /* Record packed ring first dequeue desc index */ + uint16_t shadow_last_used_idx; struct vhost_vring_addr ring_addrs; struct batch_copy_elem *batch_copy_elems; @@ -305,9 +350,11 @@ struct virtio_net { uint32_t flags; uint16_t vhost_hlen; /* to tell if we need broadcast rarp packet */ - rte_atomic16_t broadcast_rarp; + int16_t broadcast_rarp; uint32_t nr_vring; int dequeue_zero_copy; + int extbuf; + int linearbuf; struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; struct inflight_mem_info *inflight_info; #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) @@ -330,11 +377,7 @@ struct virtio_net { int postcopy_ufd; int postcopy_listening; - /* - * Device id to identify a specific backend device. - * It's set to -1 for the default software implementation. - */ - int vdpa_dev_id; + struct rte_vdpa_device *vdpa_dev; /* context data for the external message handlers */ void *extern_data; @@ -357,6 +400,26 @@ desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) wrap_counter != !!(flags & VRING_DESC_F_USED); } +static inline void +vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num) +{ + vq->last_used_idx += num; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } +} + +static inline void +vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num) +{ + vq->last_avail_idx += num; + if (vq->last_avail_idx >= vq->size) { + vq->avail_wrap_counter ^= 1; + vq->last_avail_idx -= vq->size; + } +} + void __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t addr, uint64_t len); @@ -395,14 +458,23 @@ static __rte_always_inline void vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { - vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len); + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { + if (unlikely(vq->log_guest_addr == 0)) + return; + __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, + len); + } } static __rte_always_inline void vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { - vhost_log_write(dev, vq->log_guest_addr + offset, len); + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { + if (unlikely(vq->log_guest_addr == 0)) + return; + __vhost_log_write(dev, vq->log_guest_addr + offset, len); + } } static __rte_always_inline void @@ -431,14 +503,21 @@ vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, __vhost_log_write(dev, iova, len); } -/* Macros for printing using RTE_LOG */ -#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1 -#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1 +extern int vhost_config_log_level; +extern int vhost_data_log_level; + +#define VHOST_LOG_CONFIG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, vhost_config_log_level, \ + "VHOST_CONFIG: " fmt, ##args) + +#define VHOST_LOG_DATA(level, fmt, args...) \ + (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \ + rte_log(RTE_LOG_ ## level, vhost_data_log_level, \ + "VHOST_DATA : " fmt, ##args) : \ + 0) #ifdef RTE_LIBRTE_VHOST_DEBUG #define VHOST_MAX_PRINT_BUFF 6072 -#define VHOST_LOG_DEBUG(log_type, fmt, args...) \ - RTE_LOG(DEBUG, log_type, fmt, ##args) #define PRINT_PACKET(device, addr, size, header) do { \ char *pkt_addr = (char *)(addr); \ unsigned int index; \ @@ -454,31 +533,57 @@ vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, } \ snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \ \ - VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \ + VHOST_LOG_DATA(DEBUG, "%s", packet); \ } while (0) #else -#define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0) #define PRINT_PACKET(device, addr, size, header) do {} while (0) #endif -extern uint64_t VHOST_FEATURES; #define MAX_VHOST_DEVICE 1024 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; +#define VHOST_BINARY_SEARCH_THRESH 256 + +static __rte_always_inline int guest_page_addrcmp(const void *p1, + const void *p2) +{ + const struct guest_page *page1 = (const struct guest_page *)p1; + const struct guest_page *page2 = (const struct guest_page *)p2; + + if (page1->guest_phys_addr > page2->guest_phys_addr) + return 1; + if (page1->guest_phys_addr < page2->guest_phys_addr) + return -1; + + return 0; +} + /* Convert guest physical address to host physical address */ static __rte_always_inline rte_iova_t gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) { uint32_t i; struct guest_page *page; - - for (i = 0; i < dev->nr_guest_pages; i++) { - page = &dev->guest_pages[i]; - - if (gpa >= page->guest_phys_addr && - gpa + size < page->guest_phys_addr + page->size) { - return gpa - page->guest_phys_addr + - page->host_phys_addr; + struct guest_page key; + + if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { + key.guest_phys_addr = gpa; + page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, + sizeof(struct guest_page), guest_page_addrcmp); + if (page) { + if (gpa + size < page->guest_phys_addr + page->size) + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } else { + for (i = 0; i < dev->nr_guest_pages; i++) { + page = &dev->guest_pages[i]; + + if (gpa >= page->guest_phys_addr && + gpa + size < page->guest_phys_addr + + page->size) + return gpa - page->guest_phys_addr + + page->host_phys_addr; } } @@ -511,7 +616,7 @@ get_device(int vid) struct virtio_net *dev = vhost_devices[vid]; if (unlikely(!dev)) { - RTE_LOG(ERR, VHOST_CONFIG, + VHOST_LOG_CONFIG(ERR, "(%d) device not found.\n", vid); } @@ -525,15 +630,18 @@ void vhost_destroy_device(int); void vhost_destroy_device_notify(struct virtio_net *dev); void cleanup_vq(struct vhost_virtqueue *vq, int destroy); +void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq); void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq); int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); -void vhost_attach_vdpa_device(int vid, int did); +void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev); void vhost_set_ifname(int, const char *if_name, unsigned int if_len); void vhost_enable_dequeue_zero_copy(int vid); void vhost_set_builtin_virtio_net(int vid, bool enable); +void vhost_enable_extbuf(int vid); +void vhost_enable_linearbuf(int vid); struct vhost_device_ops const *vhost_driver_callback_get(const char *path); @@ -550,6 +658,8 @@ void *vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t desc_addr, uint64_t desc_len); int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); +uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t log_addr); void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); static __rte_always_inline uint64_t @@ -594,7 +704,7 @@ vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) vq->signalled_used = new; vq->signalled_used_valid = true; - VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n", + VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n", __func__, vhost_used_event(vq), old, new);