X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Frte_vhost.h;h=7fb172912a7514687b3f5c5337ffee5f16651cfd;hb=2b5fa25708cfe6594ba755faf864eef4efce4aad;hp=fe0338d00ac49e9ed5e1a85b7e15137f46037cae;hpb=07718b4f87aa7b89ce6a950364b958fbb586ff25;p=dpdk.git diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h index fe0338d00a..7fb172912a 100644 --- a/lib/librte_vhost/rte_vhost.h +++ b/lib/librte_vhost/rte_vhost.h @@ -23,11 +23,13 @@ extern "C" { /* These are not C++-aware. */ #include #include +#include #define RTE_VHOST_USER_CLIENT (1ULL << 0) #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1) #define RTE_VHOST_USER_DEQUEUE_ZERO_COPY (1ULL << 2) #define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3) +#define RTE_VHOST_USER_POSTCOPY_SUPPORT (1ULL << 4) /** Protocol features. */ #ifndef VHOST_USER_PROTOCOL_F_MQ @@ -54,6 +56,22 @@ extern "C" { #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5 #endif +#ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION +#define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7 +#endif + +#ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT +#define VHOST_USER_PROTOCOL_F_PAGEFAULT 8 +#endif + +#ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD +#define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10 +#endif + +#ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER +#define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11 +#endif + /** Indicate whether protocol features negotiation is supported. */ #ifndef VHOST_USER_F_PROTOCOL_FEATURES #define VHOST_USER_F_PROTOCOL_FEATURES 30 @@ -94,6 +112,46 @@ struct rte_vhost_vring { uint16_t size; }; +/** + * Possible results of the vhost user message handling callbacks + */ +enum rte_vhost_msg_result { + /* Message handling failed */ + RTE_VHOST_MSG_RESULT_ERR = -1, + /* Message handling successful */ + RTE_VHOST_MSG_RESULT_OK = 0, + /* Message handling successful and reply prepared */ + RTE_VHOST_MSG_RESULT_REPLY = 1, + /* Message not handled */ + RTE_VHOST_MSG_RESULT_NOT_HANDLED, +}; + +/** + * Function prototype for the vhost backend to handle specific vhost user + * messages. + * + * @param vid + * vhost device id + * @param msg + * Message pointer. + * @return + * RTE_VHOST_MSG_RESULT_OK on success, + * RTE_VHOST_MSG_RESULT_REPLY on success with reply, + * RTE_VHOST_MSG_RESULT_ERR on failure, + * RTE_VHOST_MSG_RESULT_NOT_HANDLED if message was not handled. + */ +typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg); + +/** + * Optional vhost user message handlers. + */ +struct rte_vhost_user_extern_ops { + /* Called prior to the master message handling. */ + rte_vhost_msg_handle pre_msg_handle; + /* Called after the master message handling. */ + rte_vhost_msg_handle post_msg_handle; +}; + /** * Device and vring operations. */ @@ -120,6 +178,11 @@ struct vhost_device_ops { /** * Convert guest physical address to host virtual address * + * This function is deprecated because unsafe. + * New rte_vhost_va_from_guest_pa() should be used instead to ensure + * guest physical ranges are fully and contiguously mapped into + * process virtual address space. + * * @param mem * the guest memory regions * @param gpa @@ -127,6 +190,7 @@ struct vhost_device_ops { * @return * the host virtual address on success, 0 on failure */ +__rte_deprecated static __rte_always_inline uint64_t rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa) { @@ -145,6 +209,46 @@ rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa) return 0; } +/** + * Convert guest physical address to host virtual address safely + * + * This variant of rte_vhost_gpa_to_vva() takes care all the + * requested length is mapped and contiguous in process address + * space. + * + * @param mem + * the guest memory regions + * @param gpa + * the guest physical address for querying + * @param len + * the size of the requested area to map, updated with actual size mapped + * @return + * the host virtual address on success, 0 on failure + */ +static __rte_always_inline uint64_t +rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem, + uint64_t gpa, uint64_t *len) +{ + struct rte_vhost_mem_region *r; + uint32_t i; + + for (i = 0; i < mem->nregions; i++) { + r = &mem->regions[i]; + if (gpa >= r->guest_phys_addr && + gpa < r->guest_phys_addr + r->size) { + + if (unlikely(*len > r->guest_phys_addr + r->size - gpa)) + *len = r->guest_phys_addr + r->size - gpa; + + return gpa - r->guest_phys_addr + + r->host_user_addr; + } + } + *len = 0; + + return 0; +} + #define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL)) /** @@ -209,7 +313,8 @@ int rte_vhost_driver_unregister(const char *path); * @return * 0 on success, -1 on failure */ -int __rte_experimental +__rte_experimental +int rte_vhost_driver_attach_vdpa_device(const char *path, int did); /** @@ -220,7 +325,8 @@ rte_vhost_driver_attach_vdpa_device(const char *path, int did); * @return * 0 on success, -1 on failure */ -int __rte_experimental +__rte_experimental +int rte_vhost_driver_detach_vdpa_device(const char *path); /** @@ -231,7 +337,8 @@ rte_vhost_driver_detach_vdpa_device(const char *path); * @return * Device id, -1 on failure */ -int __rte_experimental +__rte_experimental +int rte_vhost_driver_get_vdpa_device_id(const char *path); /** @@ -289,6 +396,21 @@ int rte_vhost_driver_disable_features(const char *path, uint64_t features); */ int rte_vhost_driver_get_features(const char *path, uint64_t *features); +/** + * Set the protocol feature bits before feature negotiation. + * + * @param path + * The vhost-user socket file path + * @param protocol_features + * Supported protocol features + * @return + * 0 on success, -1 on failure + */ +__rte_experimental +int +rte_vhost_driver_set_protocol_features(const char *path, + uint64_t protocol_features); + /** * Get the protocol feature bits before feature negotiation. * @@ -299,7 +421,8 @@ int rte_vhost_driver_get_features(const char *path, uint64_t *features); * @return * 0 on success, -1 on failure */ -int __rte_experimental +__rte_experimental +int rte_vhost_driver_get_protocol_features(const char *path, uint64_t *protocol_features); @@ -313,7 +436,8 @@ rte_vhost_driver_get_protocol_features(const char *path, * @return * 0 on success, -1 on failure */ -int __rte_experimental +__rte_experimental +int rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num); /** @@ -425,7 +549,7 @@ int rte_vhost_get_ifname(int vid, char *buf, size_t len); * virtio queue index * * @return - * num of avail entires left + * num of avail entries left */ uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id); @@ -473,7 +597,7 @@ uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, /** * Get guest mem table: a list of memory regions. * - * An rte_vhost_vhost_memory object will be allocated internaly, to hold the + * An rte_vhost_vhost_memory object will be allocated internally, to hold the * guest memory regions. Application should free it at destroy_device() * callback. * @@ -526,6 +650,77 @@ int rte_vhost_vring_call(int vid, uint16_t vring_idx); */ uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid); +/** + * Get log base and log size of the vhost device + * + * @param vid + * vhost device ID + * @param log_base + * vhost log base + * @param log_size + * vhost log size + * @return + * 0 on success, -1 on failure + */ +__rte_experimental +int +rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size); + +/** + * Get last_avail/used_idx of the vhost virtqueue + * + * @param vid + * vhost device ID + * @param queue_id + * vhost queue index + * @param last_avail_idx + * vhost last_avail_idx to get + * @param last_used_idx + * vhost last_used_idx to get + * @return + * 0 on success, -1 on failure + */ +__rte_experimental +int +rte_vhost_get_vring_base(int vid, uint16_t queue_id, + uint16_t *last_avail_idx, uint16_t *last_used_idx); + +/** + * Set last_avail/used_idx of the vhost virtqueue + * + * @param vid + * vhost device ID + * @param queue_id + * vhost queue index + * @param last_avail_idx + * last_avail_idx to set + * @param last_used_idx + * last_used_idx to set + * @return + * 0 on success, -1 on failure + */ +__rte_experimental +int +rte_vhost_set_vring_base(int vid, uint16_t queue_id, + uint16_t last_avail_idx, uint16_t last_used_idx); + +/** + * Register external message handling callbacks + * + * @param vid + * vhost device ID + * @param ops + * virtio external callbacks to register + * @param ctx + * additional context passed to the callbacks + * @return + * 0 on success, -1 on failure + */ +__rte_experimental +int +rte_vhost_extern_callback_register(int vid, + struct rte_vhost_user_extern_ops const * const ops, void *ctx); + /** * Get vdpa device id for vhost device. * @@ -534,7 +729,8 @@ uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid); * @return * device id */ -int __rte_experimental +__rte_experimental +int rte_vhost_get_vdpa_device_id(int vid); #ifdef __cplusplus