X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=lib%2Flibrte_vhost%2Fvhost.h;h=fc31796bfe0de6e72eaa1d907df80e19a688ccd6;hb=7c7b7562252742a6c298de64df486873336fd058;hp=d81da06742ac1407e9248dd1201f38b3c22a2fe2;hpb=9eefef3b5970f1e2f13d9a72e1fcd9990ad19c8a;p=dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index d81da06742..fc31796bfe 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -18,6 +18,7 @@ #include #include #include +#include #include "rte_vhost.h" #include "rte_vdpa.h" @@ -219,13 +220,6 @@ struct vhost_msg { #define VIRTIO_F_RING_PACKED 34 -#define VRING_DESC_F_NEXT 1 -#define VRING_DESC_F_WRITE 2 -#define VRING_DESC_F_INDIRECT 4 - -#define VRING_DESC_F_AVAIL (1ULL << 7) -#define VRING_DESC_F_USED (1ULL << 15) - struct vring_packed_desc { uint64_t addr; uint32_t len; @@ -233,16 +227,23 @@ struct vring_packed_desc { uint16_t flags; }; -#define VRING_EVENT_F_ENABLE 0x0 -#define VRING_EVENT_F_DISABLE 0x1 -#define VRING_EVENT_F_DESC 0x2 - struct vring_packed_desc_event { uint16_t off_wrap; uint16_t flags; }; #endif +/* + * Declare below packed ring defines unconditionally + * as Kernel header might use different names. + */ +#define VRING_DESC_F_AVAIL (1ULL << 7) +#define VRING_DESC_F_USED (1ULL << 15) + +#define VRING_EVENT_F_ENABLE 0x0 +#define VRING_EVENT_F_DISABLE 0x1 +#define VRING_EVENT_F_DESC 0x2 + /* * Available and used descs are in same order */ @@ -275,7 +276,8 @@ struct vring_packed_desc_event { (1ULL << VIRTIO_RING_F_EVENT_IDX) | \ (1ULL << VIRTIO_NET_F_MTU) | \ (1ULL << VIRTIO_F_IN_ORDER) | \ - (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ + (1ULL << VIRTIO_F_RING_PACKED)) struct guest_page { @@ -284,56 +286,6 @@ struct guest_page { uint64_t size; }; -/* The possible results of a message handling function */ -enum vh_result { - /* Message handling failed */ - VH_RESULT_ERR = -1, - /* Message handling successful */ - VH_RESULT_OK = 0, - /* Message handling successful and reply prepared */ - VH_RESULT_REPLY = 1, -}; - -/** - * function prototype for the vhost backend to handler specific vhost user - * messages prior to the master message handling - * - * @param vid - * vhost device id - * @param msg - * Message pointer. - * @param skip_master - * If the handler requires skipping the master message handling, this variable - * shall be written 1, otherwise 0. - * @return - * VH_RESULT_OK on success, VH_RESULT_REPLY on success with reply, - * VH_RESULT_ERR on failure - */ -typedef enum vh_result (*vhost_msg_pre_handle)(int vid, void *msg, - uint32_t *skip_master); - -/** - * function prototype for the vhost backend to handler specific vhost user - * messages after the master message handling is done - * - * @param vid - * vhost device id - * @param msg - * Message pointer. - * @return - * VH_RESULT_OK on success, VH_RESULT_REPLY on success with reply, - * VH_RESULT_ERR on failure - */ -typedef enum vh_result (*vhost_msg_post_handle)(int vid, void *msg); - -/** - * pre and post vhost user message handlers - */ -struct vhost_user_extern_ops { - vhost_msg_pre_handle pre_msg_handle; - vhost_msg_post_handle post_msg_handle; -}; - /** * Device structure contains all configuration information relating * to the device. @@ -369,6 +321,7 @@ struct virtio_net { rte_spinlock_t slave_req_lock; int postcopy_ufd; + int postcopy_listening; /* * Device id to identify a specific backend device. @@ -376,10 +329,10 @@ struct virtio_net { */ int vdpa_dev_id; - /* private data for virtio device */ + /* context data for the external message handlers */ void *extern_data; /* pre and post vhost user message handlers for the device */ - struct vhost_user_extern_ops extern_ops; + struct rte_vhost_user_extern_ops extern_ops; } __rte_cache_aligned; static __rte_always_inline bool @@ -391,8 +344,10 @@ vq_is_packed(struct virtio_net *dev) static inline bool desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter) { - return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL) && - wrap_counter != !!(desc->flags & VRING_DESC_F_USED); + uint16_t flags = *((volatile uint16_t *) &desc->flags); + + return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) && + wrap_counter != !!(flags & VRING_DESC_F_USED); } #define VHOST_LOG_PAGE 4096 @@ -452,12 +407,9 @@ vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) !dev->log_base)) return; - log_base = (unsigned long *)(uintptr_t)dev->log_base; + rte_smp_wmb(); - /* - * It is expected a write memory barrier has been issued - * before this function is called. - */ + log_base = (unsigned long *)(uintptr_t)dev->log_base; for (i = 0; i < vq->log_cache_nb_elem; i++) { struct log_cache_entry *elem = vq->log_cache + i; @@ -625,7 +577,6 @@ void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq); int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); void vhost_attach_vdpa_device(int vid, int did); -void vhost_detach_vdpa_device(int vid); void vhost_set_ifname(int, const char *if_name, unsigned int if_len); void vhost_enable_dequeue_zero_copy(int vid); @@ -751,4 +702,43 @@ kick: eventfd_write(vq->callfd, (eventfd_t)1); } +static __rte_always_inline void * +alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t desc_addr, uint64_t desc_len) +{ + void *idesc; + uint64_t src, dst; + uint64_t len, remain = desc_len; + + idesc = rte_malloc(__func__, desc_len, 0); + if (unlikely(!idesc)) + return 0; + + dst = (uint64_t)(uintptr_t)idesc; + + while (remain) { + len = remain; + src = vhost_iova_to_vva(dev, vq, desc_addr, &len, + VHOST_ACCESS_RO); + if (unlikely(!src || !len)) { + rte_free(idesc); + return 0; + } + + rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); + + remain -= len; + dst += len; + desc_addr += len; + } + + return idesc; +} + +static __rte_always_inline void +free_ind_table(void *idesc) +{ + rte_free(idesc); +} + #endif /* _VHOST_NET_CDEV_H_ */