X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.h;h=a29c6638e2c49fd54c630a3464f23950105a3b31;hb=b53a4972946d4fb82ae323ca036e8d6e61db44d6;hp=1cc81c17c45efa46a7ba89d166250046f6027489;hpb=df6e0a06a390bd2b35043969e024713e22ca8bab;p=dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 1cc81c17c4..a29c6638e2 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -1,40 +1,12 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2018 Intel Corporation */ #ifndef _VHOST_NET_CDEV_H_ #define _VHOST_NET_CDEV_H_ #include #include +#include #include #include #include @@ -46,24 +18,77 @@ #include #include #include +#include #include "rte_vhost.h" +#include "rte_vdpa.h" +#include "rte_vdpa_dev.h" + +#include "rte_vhost_async.h" /* Used to indicate that the device is running on a data core */ #define VIRTIO_DEV_RUNNING 1 /* Used to indicate that the device is ready to operate */ #define VIRTIO_DEV_READY 2 +/* Used to indicate that the built-in vhost net device backend is enabled */ +#define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4 +/* Used to indicate that the device has its own data path and configured */ +#define VIRTIO_DEV_VDPA_CONFIGURED 8 +/* Used to indicate that the feature negotiation failed */ +#define VIRTIO_DEV_FEATURES_FAILED 16 /* Backend value set by guest. */ #define VIRTIO_DEV_STOPPED -1 #define BUF_VECTOR_MAX 256 +#define VHOST_LOG_CACHE_NR 32 + +#define MAX_PKT_BURST 32 + +#define ASYNC_MAX_POLL_SEG 255 + +#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2) +#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2) + +#define PACKED_DESC_ENQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \ + VRING_DESC_F_WRITE) +#define PACKED_DESC_DEQUEUE_USED_FLAG(w) \ + ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0) +#define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \ + VRING_DESC_F_INDIRECT) + +#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \ + sizeof(struct vring_packed_desc)) +#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1) + +#ifdef VHOST_GCC_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \ + for (iter = val; iter < size; iter++) +#endif + +#ifdef VHOST_CLANG_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \ + for (iter = val; iter < size; iter++) +#endif + +#ifdef VHOST_ICC_UNROLL_PRAGMA +#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \ + for (iter = val; iter < size; iter++) +#endif + +#ifndef vhost_for_each_try_unroll +#define vhost_for_each_try_unroll(iter, val, num) \ + for (iter = val; iter < num; iter++) +#endif + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. */ struct buf_vector { + uint64_t buf_iova; uint64_t buf_addr; uint32_t buf_len; uint32_t desc_idx; @@ -76,6 +101,7 @@ struct buf_vector { struct zcopy_mbuf { struct rte_mbuf *mbuf; uint32_t desc_idx; + uint16_t desc_count; uint16_t in_use; TAILQ_ENTRY(zcopy_mbuf) next; @@ -92,44 +118,97 @@ struct batch_copy_elem { uint64_t log_addr; }; +/* + * Structure that contains the info for batched dirty logging. + */ +struct log_cache_entry { + uint32_t offset; + unsigned long val; +}; + +struct vring_used_elem_packed { + uint16_t id; + uint16_t flags; + uint32_t len; + uint32_t count; +}; + /** * Structure contains variables relevant to RX/TX virtqueues. */ struct vhost_virtqueue { - struct vring_desc *desc; - struct vring_avail *avail; - struct vring_used *used; + union { + struct vring_desc *desc; + struct vring_packed_desc *desc_packed; + }; + union { + struct vring_avail *avail; + struct vring_packed_desc_event *driver_event; + }; + union { + struct vring_used *used; + struct vring_packed_desc_event *device_event; + }; uint32_t size; uint16_t last_avail_idx; uint16_t last_used_idx; + /* Last used index we notify to front end. */ + uint16_t signalled_used; + bool signalled_used_valid; #define VIRTIO_INVALID_EVENTFD (-1) #define VIRTIO_UNINITIALIZED_EVENTFD (-2) /* Backend value to determine if device should started/stopped */ int backend; + int enabled; + int access_ok; + int ready; + int notif_enable; +#define VIRTIO_UNINITIALIZED_NOTIF (-1) + + rte_spinlock_t access_lock; + /* Used to notify the guest (trigger interrupt) */ int callfd; /* Currently unused as polling mode is enabled */ int kickfd; - int enabled; - int access_ok; /* Physical address of used ring, for logging */ uint64_t log_guest_addr; + /* inflight share memory info */ + union { + struct rte_vhost_inflight_info_split *inflight_split; + struct rte_vhost_inflight_info_packed *inflight_packed; + }; + struct rte_vhost_resubmit_info *resubmit_inflight; + uint64_t global_counter; + uint16_t nr_zmbuf; uint16_t zmbuf_size; uint16_t last_zmbuf_idx; struct zcopy_mbuf *zmbufs; struct zcopy_mbuf_list zmbuf_list; - struct vring_used_elem *shadow_used_ring; + union { + struct vring_used_elem *shadow_used_split; + struct vring_used_elem_packed *shadow_used_packed; + }; uint16_t shadow_used_idx; + /* Record packed ring enqueue latest desc cache aligned index */ + uint16_t shadow_aligned_idx; + /* Record packed ring first dequeue desc index */ + uint16_t shadow_last_used_idx; struct vhost_vring_addr ring_addrs; struct batch_copy_elem *batch_copy_elems; uint16_t batch_copy_nb_elems; + bool used_wrap_counter; + bool avail_wrap_counter; + + struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR]; + uint16_t log_cache_nb_elem; rte_rwlock_t iotlb_lock; rte_rwlock_t iotlb_pending_lock; @@ -137,24 +216,39 @@ struct vhost_virtqueue { TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list; int iotlb_cache_nr; TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list; + + /* operation callbacks for async dma */ + struct rte_vhost_async_channel_ops async_ops; + + struct rte_vhost_iov_iter it_pool[VHOST_MAX_ASYNC_IT]; + struct iovec vec_pool[VHOST_MAX_ASYNC_VEC]; + + /* async data transfer status */ + uintptr_t **async_pkts_pending; + #define ASYNC_PENDING_INFO_N_MSK 0xFFFF + #define ASYNC_PENDING_INFO_N_SFT 16 + uint64_t *async_pending_info; + uint16_t async_pkts_idx; + uint16_t async_pkts_inflight_n; + uint16_t async_last_seg_n; + + /* vq async features */ + bool async_inorder; + bool async_registered; + uint16_t async_threshold; } __rte_cache_aligned; -/* Old kernels have no such macros defined */ -#ifndef VIRTIO_NET_F_GUEST_ANNOUNCE - #define VIRTIO_NET_F_GUEST_ANNOUNCE 21 -#endif - -#ifndef VIRTIO_NET_F_MQ - #define VIRTIO_NET_F_MQ 22 -#endif +/* Virtio device status as per Virtio specification */ +#define VIRTIO_DEVICE_STATUS_ACK 0x01 +#define VIRTIO_DEVICE_STATUS_DRIVER 0x02 +#define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04 +#define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08 +#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40 +#define VIRTIO_DEVICE_STATUS_FAILED 0x80 #define VHOST_MAX_VRING 0x100 #define VHOST_MAX_QUEUE_PAIRS 0x80 -#ifndef VIRTIO_NET_F_MTU - #define VIRTIO_NET_F_MTU 3 -#endif - /* Declare IOMMU related bits for older kernels */ #ifndef VIRTIO_F_IOMMU_PLATFORM @@ -193,10 +287,45 @@ struct vhost_msg { #define VIRTIO_F_VERSION_1 32 #endif -#define VHOST_USER_F_PROTOCOL_FEATURES 30 +/* Declare packed ring related bits for older kernels */ +#ifndef VIRTIO_F_RING_PACKED + +#define VIRTIO_F_RING_PACKED 34 + +struct vring_packed_desc { + uint64_t addr; + uint32_t len; + uint16_t id; + uint16_t flags; +}; + +struct vring_packed_desc_event { + uint16_t off_wrap; + uint16_t flags; +}; +#endif + +/* + * Declare below packed ring defines unconditionally + * as Kernel header might use different names. + */ +#define VRING_DESC_F_AVAIL (1ULL << 7) +#define VRING_DESC_F_USED (1ULL << 15) + +#define VRING_EVENT_F_ENABLE 0x0 +#define VRING_EVENT_F_DISABLE 0x1 +#define VRING_EVENT_F_DESC 0x2 + +/* + * Available and used descs are in same order + */ +#ifndef VIRTIO_F_IN_ORDER +#define VIRTIO_F_IN_ORDER 35 +#endif /* Features supported by this builtin vhost-user net driver. */ #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \ + (1ULL << VIRTIO_F_ANY_LAYOUT) | \ (1ULL << VIRTIO_NET_F_CTRL_VQ) | \ (1ULL << VIRTIO_NET_F_CTRL_RX) | \ (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \ @@ -204,15 +333,23 @@ struct vhost_msg { (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VHOST_F_LOG_ALL) | \ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \ + (1ULL << VIRTIO_NET_F_GSO) | \ (1ULL << VIRTIO_NET_F_HOST_TSO4) | \ (1ULL << VIRTIO_NET_F_HOST_TSO6) | \ + (1ULL << VIRTIO_NET_F_HOST_UFO) | \ + (1ULL << VIRTIO_NET_F_HOST_ECN) | \ (1ULL << VIRTIO_NET_F_CSUM) | \ (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ + (1ULL << VIRTIO_NET_F_GUEST_UFO) | \ + (1ULL << VIRTIO_NET_F_GUEST_ECN) | \ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ - (1ULL << VIRTIO_NET_F_MTU) | \ - (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ + (1ULL << VIRTIO_NET_F_MTU) | \ + (1ULL << VIRTIO_F_IN_ORDER) | \ + (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ + (1ULL << VIRTIO_F_RING_PACKED)) struct guest_page { @@ -221,6 +358,12 @@ struct guest_page { uint64_t size; }; +struct inflight_mem_info { + int fd; + void *addr; + uint64_t size; +}; + /** * Device structure contains all configuration information relating * to the device. @@ -234,17 +377,22 @@ struct virtio_net { uint32_t flags; uint16_t vhost_hlen; /* to tell if we need broadcast rarp packet */ - rte_atomic16_t broadcast_rarp; + int16_t broadcast_rarp; uint32_t nr_vring; int dequeue_zero_copy; + int async_copy; + int extbuf; + int linearbuf; struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2]; + struct inflight_mem_info *inflight_info; #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ) char ifname[IF_NAME_SZ]; uint64_t log_size; uint64_t log_base; uint64_t log_addr; - struct ether_addr mac; + struct rte_ether_addr mac; uint16_t mtu; + uint8_t status; struct vhost_device_ops const *notify_ops; @@ -253,45 +401,97 @@ struct virtio_net { struct guest_page *guest_pages; int slave_req_fd; + rte_spinlock_t slave_req_lock; + + int postcopy_ufd; + int postcopy_listening; + + struct rte_vdpa_device *vdpa_dev; + + /* context data for the external message handlers */ + void *extern_data; + /* pre and post vhost user message handlers for the device */ + struct rte_vhost_user_extern_ops extern_ops; } __rte_cache_aligned; +static __rte_always_inline bool +vq_is_packed(struct virtio_net *dev) +{ + return dev->features & (1ull << VIRTIO_F_RING_PACKED); +} -#define VHOST_LOG_PAGE 4096 +static inline bool +desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) +{ + uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE); -/* - * Atomically set a bit in memory. - */ -static __rte_always_inline void -vhost_set_bit(unsigned int nr, volatile uint8_t *addr) + return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) && + wrap_counter != !!(flags & VRING_DESC_F_USED); +} + +static inline void +vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num) { - __sync_fetch_and_or_8(addr, (1U << nr)); + vq->last_used_idx += num; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } } -static __rte_always_inline void -vhost_log_page(uint8_t *log_base, uint64_t page) +static inline void +vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num) { - vhost_set_bit(page % 8, &log_base[page / 8]); + vq->last_avail_idx += num; + if (vq->last_avail_idx >= vq->size) { + vq->avail_wrap_counter ^= 1; + vq->last_avail_idx -= vq->size; + } } +void __vhost_log_cache_write(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint64_t addr, uint64_t len); +void __vhost_log_cache_write_iova(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint64_t iova, uint64_t len); +void __vhost_log_cache_sync(struct virtio_net *dev, + struct vhost_virtqueue *vq); +void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len); +void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t iova, uint64_t len); + static __rte_always_inline void vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) { - uint64_t page; - - if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || - !dev->log_base || !len)) - return; + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) + __vhost_log_write(dev, addr, len); +} - if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) - return; +static __rte_always_inline void +vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) + __vhost_log_cache_sync(dev, vq); +} - /* To make sure guest memory updates are committed before logging */ - rte_smp_wmb(); +static __rte_always_inline void +vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t addr, uint64_t len) +{ + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) + __vhost_log_cache_write(dev, vq, addr, len); +} - page = addr / VHOST_LOG_PAGE; - while (page * VHOST_LOG_PAGE < addr + len) { - vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); - page += 1; +static __rte_always_inline void +vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t offset, uint64_t len) +{ + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { + if (unlikely(vq->log_guest_addr == 0)) + return; + __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, + len); } } @@ -299,17 +499,54 @@ static __rte_always_inline void vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) { - vhost_log_write(dev, vq->log_guest_addr + offset, len); + if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) { + if (unlikely(vq->log_guest_addr == 0)) + return; + __vhost_log_write(dev, vq->log_guest_addr + offset, len); + } +} + +static __rte_always_inline void +vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t iova, uint64_t len) +{ + if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL)))) + return; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + __vhost_log_cache_write_iova(dev, vq, iova, len); + else + __vhost_log_cache_write(dev, vq, iova, len); +} + +static __rte_always_inline void +vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t iova, uint64_t len) +{ + if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL)))) + return; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + __vhost_log_write_iova(dev, vq, iova, len); + else + __vhost_log_write(dev, iova, len); } -/* Macros for printing using RTE_LOG */ -#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1 -#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1 +extern int vhost_config_log_level; +extern int vhost_data_log_level; + +#define VHOST_LOG_CONFIG(level, fmt, args...) \ + rte_log(RTE_LOG_ ## level, vhost_config_log_level, \ + "VHOST_CONFIG: " fmt, ##args) + +#define VHOST_LOG_DATA(level, fmt, args...) \ + (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \ + rte_log(RTE_LOG_ ## level, vhost_data_log_level, \ + "VHOST_DATA : " fmt, ##args) : \ + 0) #ifdef RTE_LIBRTE_VHOST_DEBUG #define VHOST_MAX_PRINT_BUFF 6072 -#define LOG_LEVEL RTE_LOG_DEBUG -#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args) #define PRINT_PACKET(device, addr, size, header) do { \ char *pkt_addr = (char *)(addr); \ unsigned int index; \ @@ -325,49 +562,117 @@ vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, } \ snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \ \ - LOG_DEBUG(VHOST_DATA, "%s", packet); \ + VHOST_LOG_DATA(DEBUG, "%s", packet); \ } while (0) #else -#define LOG_LEVEL RTE_LOG_INFO -#define LOG_DEBUG(log_type, fmt, args...) do {} while (0) #define PRINT_PACKET(device, addr, size, header) do {} while (0) #endif -extern uint64_t VHOST_FEATURES; #define MAX_VHOST_DEVICE 1024 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; +#define VHOST_BINARY_SEARCH_THRESH 256 + +static __rte_always_inline int guest_page_addrcmp(const void *p1, + const void *p2) +{ + const struct guest_page *page1 = (const struct guest_page *)p1; + const struct guest_page *page2 = (const struct guest_page *)p2; + + if (page1->guest_phys_addr > page2->guest_phys_addr) + return 1; + if (page1->guest_phys_addr < page2->guest_phys_addr) + return -1; + + return 0; +} + /* Convert guest physical address to host physical address */ static __rte_always_inline rte_iova_t gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) { uint32_t i; struct guest_page *page; + struct guest_page key; + + if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) { + key.guest_phys_addr = gpa; + page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages, + sizeof(struct guest_page), guest_page_addrcmp); + if (page) { + if (gpa + size < page->guest_phys_addr + page->size) + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } else { + for (i = 0; i < dev->nr_guest_pages; i++) { + page = &dev->guest_pages[i]; + + if (gpa >= page->guest_phys_addr && + gpa + size < page->guest_phys_addr + + page->size) + return gpa - page->guest_phys_addr + + page->host_phys_addr; + } + } + + return 0; +} + +static __rte_always_inline uint64_t +hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len) +{ + struct rte_vhost_mem_region *r; + uint32_t i; - for (i = 0; i < dev->nr_guest_pages; i++) { - page = &dev->guest_pages[i]; + if (unlikely(!dev || !dev->mem)) + return 0; - if (gpa >= page->guest_phys_addr && - gpa + size < page->guest_phys_addr + page->size) { - return gpa - page->guest_phys_addr + - page->host_phys_addr; + for (i = 0; i < dev->mem->nregions; i++) { + r = &dev->mem->regions[i]; + + if (vva >= r->host_user_addr && + vva + len < r->host_user_addr + r->size) { + return r->guest_phys_addr + vva - r->host_user_addr; } } - return 0; } -struct virtio_net *get_device(int vid); +static __rte_always_inline struct virtio_net * +get_device(int vid) +{ + struct virtio_net *dev = vhost_devices[vid]; + + if (unlikely(!dev)) { + VHOST_LOG_CONFIG(ERR, + "(%d) device not found.\n", vid); + } + + return dev; +} int vhost_new_device(void); void cleanup_device(struct virtio_net *dev, int destroy); void reset_device(struct virtio_net *dev); void vhost_destroy_device(int); +void vhost_destroy_device_notify(struct virtio_net *dev); + +void cleanup_vq(struct vhost_virtqueue *vq, int destroy); +void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq); +void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq); int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); +void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev); + void vhost_set_ifname(int, const char *if_name, unsigned int if_len); void vhost_enable_dequeue_zero_copy(int vid); +void vhost_set_builtin_virtio_net(int vid, bool enable); +void vhost_enable_extbuf(int vid); +void vhost_enable_linearbuf(int vid); +int vhost_enable_guest_notification(struct virtio_net *dev, + struct vhost_virtqueue *vq, int enable); struct vhost_device_ops const *vhost_driver_callback_get(const char *path); @@ -379,18 +684,173 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path); void vhost_backend_cleanup(struct virtio_net *dev); uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t iova, uint64_t size, uint8_t perm); + uint64_t iova, uint64_t *len, uint8_t perm); +void *vhost_alloc_copy_ind_table(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint64_t desc_addr, uint64_t desc_len); int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); +uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t log_addr); void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); static __rte_always_inline uint64_t vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t iova, uint64_t size, uint8_t perm) + uint64_t iova, uint64_t *len, uint8_t perm) { if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) - return rte_vhost_gpa_to_vva(dev->mem, iova); + return rte_vhost_va_from_guest_pa(dev->mem, iova, len); - return __vhost_iova_to_vva(dev, vq, iova, size, perm); + return __vhost_iova_to_vva(dev, vq, iova, len, perm); +} + +#define vhost_avail_event(vr) \ + (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size]) +#define vhost_used_event(vr) \ + (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size]) + +/* + * The following is used with VIRTIO_RING_F_EVENT_IDX. + * Assuming a given event_idx value from the other size, if we have + * just incremented index from old to new_idx, should we trigger an + * event? + */ +static __rte_always_inline int +vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old) +{ + return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old); +} + +static __rte_always_inline void +vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + /* Flush used->idx update before we read avail->flags. */ + rte_smp_mb(); + + /* Don't kick guest if we don't reach index specified by guest. */ + if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) { + uint16_t old = vq->signalled_used; + uint16_t new = vq->async_pkts_inflight_n ? + vq->used->idx:vq->last_used_idx; + bool signalled_used_valid = vq->signalled_used_valid; + + vq->signalled_used = new; + vq->signalled_used_valid = true; + + VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n", + __func__, + vhost_used_event(vq), + old, new); + + if ((vhost_need_event(vhost_used_event(vq), new, old) && + (vq->callfd >= 0)) || + unlikely(!signalled_used_valid)) { + eventfd_write(vq->callfd, (eventfd_t) 1); + if (dev->notify_ops->guest_notified) + dev->notify_ops->guest_notified(dev->vid); + } + } else { + /* Kick the guest if necessary. */ + if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) + && (vq->callfd >= 0)) { + eventfd_write(vq->callfd, (eventfd_t)1); + if (dev->notify_ops->guest_notified) + dev->notify_ops->guest_notified(dev->vid); + } + } +} + +static __rte_always_inline void +vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + uint16_t old, new, off, off_wrap; + bool signalled_used_valid, kick = false; + + /* Flush used desc update. */ + rte_smp_mb(); + + if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) { + if (vq->driver_event->flags != + VRING_EVENT_F_DISABLE) + kick = true; + goto kick; + } + + old = vq->signalled_used; + new = vq->last_used_idx; + vq->signalled_used = new; + signalled_used_valid = vq->signalled_used_valid; + vq->signalled_used_valid = true; + + if (vq->driver_event->flags != VRING_EVENT_F_DESC) { + if (vq->driver_event->flags != VRING_EVENT_F_DISABLE) + kick = true; + goto kick; + } + + if (unlikely(!signalled_used_valid)) { + kick = true; + goto kick; + } + + rte_smp_rmb(); + + off_wrap = vq->driver_event->off_wrap; + off = off_wrap & ~(1 << 15); + + if (new <= old) + old -= vq->size; + + if (vq->used_wrap_counter != off_wrap >> 15) + off -= vq->size; + + if (vhost_need_event(off, new, old)) + kick = true; +kick: + if (kick) { + eventfd_write(vq->callfd, (eventfd_t)1); + if (dev->notify_ops->guest_notified) + dev->notify_ops->guest_notified(dev->vid); + } +} + +static __rte_always_inline void +free_ind_table(void *idesc) +{ + rte_free(idesc); +} + +static __rte_always_inline void +restore_mbuf(struct rte_mbuf *m) +{ + uint32_t mbuf_size, priv_size; + + while (m) { + priv_size = rte_pktmbuf_priv_size(m->pool); + mbuf_size = sizeof(struct rte_mbuf) + priv_size; + /* start of buffer is after mbuf structure and priv data */ + + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m = m->next; + } +} + +static __rte_always_inline bool +mbuf_is_consumed(struct rte_mbuf *m) +{ + while (m) { + if (rte_mbuf_refcnt_read(m) > 1) + return false; + m = m->next; + } + + return true; +} + +static __rte_always_inline void +put_zmbuf(struct zcopy_mbuf *zmbuf) +{ + zmbuf->in_use = 0; } #endif /* _VHOST_NET_CDEV_H_ */