1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
10 * Interface to vhost-user
15 #include <sys/eventfd.h>
17 #include <rte_memory.h>
18 #include <rte_mempool.h>
24 /* These are not C++-aware. */
25 #include <linux/vhost.h>
26 #include <linux/virtio_ring.h>
27 #include <linux/virtio_net.h>
29 #define RTE_VHOST_USER_CLIENT (1ULL << 0)
30 #define RTE_VHOST_USER_NO_RECONNECT (1ULL << 1)
31 #define RTE_VHOST_USER_RESERVED_1 (1ULL << 2)
32 #define RTE_VHOST_USER_IOMMU_SUPPORT (1ULL << 3)
33 #define RTE_VHOST_USER_POSTCOPY_SUPPORT (1ULL << 4)
34 /* support mbuf with external buffer attached */
35 #define RTE_VHOST_USER_EXTBUF_SUPPORT (1ULL << 5)
36 /* support only linear buffers (no chained mbufs) */
37 #define RTE_VHOST_USER_LINEARBUF_SUPPORT (1ULL << 6)
38 #define RTE_VHOST_USER_ASYNC_COPY (1ULL << 7)
39 #define RTE_VHOST_USER_NET_COMPLIANT_OL_FLAGS (1ULL << 8)
42 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
43 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
46 #ifndef VIRTIO_NET_F_MQ
47 #define VIRTIO_NET_F_MQ 22
50 #ifndef VIRTIO_NET_F_MTU
51 #define VIRTIO_NET_F_MTU 3
54 #ifndef VIRTIO_F_ANY_LAYOUT
55 #define VIRTIO_F_ANY_LAYOUT 27
58 /** Protocol features. */
59 #ifndef VHOST_USER_PROTOCOL_F_MQ
60 #define VHOST_USER_PROTOCOL_F_MQ 0
63 #ifndef VHOST_USER_PROTOCOL_F_LOG_SHMFD
64 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
67 #ifndef VHOST_USER_PROTOCOL_F_RARP
68 #define VHOST_USER_PROTOCOL_F_RARP 2
71 #ifndef VHOST_USER_PROTOCOL_F_REPLY_ACK
72 #define VHOST_USER_PROTOCOL_F_REPLY_ACK 3
75 #ifndef VHOST_USER_PROTOCOL_F_NET_MTU
76 #define VHOST_USER_PROTOCOL_F_NET_MTU 4
79 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_REQ
80 #define VHOST_USER_PROTOCOL_F_SLAVE_REQ 5
83 #ifndef VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
84 #define VHOST_USER_PROTOCOL_F_CRYPTO_SESSION 7
87 #ifndef VHOST_USER_PROTOCOL_F_PAGEFAULT
88 #define VHOST_USER_PROTOCOL_F_PAGEFAULT 8
91 #ifndef VHOST_USER_PROTOCOL_F_CONFIG
92 #define VHOST_USER_PROTOCOL_F_CONFIG 9
95 #ifndef VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
96 #define VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD 10
99 #ifndef VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
100 #define VHOST_USER_PROTOCOL_F_HOST_NOTIFIER 11
103 #ifndef VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
104 #define VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD 12
107 #ifndef VHOST_USER_PROTOCOL_F_STATUS
108 #define VHOST_USER_PROTOCOL_F_STATUS 16
111 /** Indicate whether protocol features negotiation is supported. */
112 #ifndef VHOST_USER_F_PROTOCOL_FEATURES
113 #define VHOST_USER_F_PROTOCOL_FEATURES 30
116 struct rte_vdpa_device;
119 * Information relating to memory regions including offsets to
120 * addresses in QEMUs memory file.
122 struct rte_vhost_mem_region {
123 uint64_t guest_phys_addr;
124 uint64_t guest_user_addr;
125 uint64_t host_user_addr;
133 * Memory structure includes region and mapping information.
135 struct rte_vhost_memory {
137 struct rte_vhost_mem_region regions[];
140 struct rte_vhost_inflight_desc_split {
147 struct rte_vhost_inflight_info_split {
151 uint16_t last_inflight_io;
153 struct rte_vhost_inflight_desc_split desc[0];
156 struct rte_vhost_inflight_desc_packed {
169 struct rte_vhost_inflight_info_packed {
174 uint16_t old_free_head;
176 uint16_t old_used_idx;
177 uint8_t used_wrap_counter;
178 uint8_t old_used_wrap_counter;
180 struct rte_vhost_inflight_desc_packed desc[0];
183 struct rte_vhost_resubmit_desc {
188 struct rte_vhost_resubmit_info {
189 struct rte_vhost_resubmit_desc *resubmit_list;
190 uint16_t resubmit_num;
193 struct rte_vhost_ring_inflight {
195 struct rte_vhost_inflight_info_split *inflight_split;
196 struct rte_vhost_inflight_info_packed *inflight_packed;
199 struct rte_vhost_resubmit_info *resubmit_inflight;
202 struct rte_vhost_vring {
204 struct vring_desc *desc;
205 struct vring_packed_desc *desc_packed;
208 struct vring_avail *avail;
209 struct vring_packed_desc_event *driver_event;
212 struct vring_used *used;
213 struct vring_packed_desc_event *device_event;
215 uint64_t log_guest_addr;
217 /** Deprecated, use rte_vhost_vring_call() instead. */
225 * Possible results of the vhost user message handling callbacks
227 enum rte_vhost_msg_result {
228 /* Message handling failed */
229 RTE_VHOST_MSG_RESULT_ERR = -1,
230 /* Message handling successful */
231 RTE_VHOST_MSG_RESULT_OK = 0,
232 /* Message handling successful and reply prepared */
233 RTE_VHOST_MSG_RESULT_REPLY = 1,
234 /* Message not handled */
235 RTE_VHOST_MSG_RESULT_NOT_HANDLED,
239 * Function prototype for the vhost backend to handle specific vhost user
247 * RTE_VHOST_MSG_RESULT_OK on success,
248 * RTE_VHOST_MSG_RESULT_REPLY on success with reply,
249 * RTE_VHOST_MSG_RESULT_ERR on failure,
250 * RTE_VHOST_MSG_RESULT_NOT_HANDLED if message was not handled.
252 typedef enum rte_vhost_msg_result (*rte_vhost_msg_handle)(int vid, void *msg);
255 * Optional vhost user message handlers.
257 struct rte_vhost_user_extern_ops {
258 /* Called prior to the master message handling. */
259 rte_vhost_msg_handle pre_msg_handle;
260 /* Called after the master message handling. */
261 rte_vhost_msg_handle post_msg_handle;
265 * Device and vring operations.
267 struct vhost_device_ops {
268 int (*new_device)(int vid); /**< Add device. */
269 void (*destroy_device)(int vid); /**< Remove device. */
271 int (*vring_state_changed)(int vid, uint16_t queue_id, int enable); /**< triggered when a vring is enabled or disabled */
274 * Features could be changed after the feature negotiation.
275 * For example, VHOST_F_LOG_ALL will be set/cleared at the
276 * start/end of live migration, respectively. This callback
277 * is used to inform the application on such change.
279 int (*features_changed)(int vid, uint64_t features);
281 int (*new_connection)(int vid);
282 void (*destroy_connection)(int vid);
285 * This callback gets called each time a guest gets notified
286 * about waiting packets. This is the interrupt handling through
287 * the eventfd_write(callfd), which can be used for counting these
290 void (*guest_notified)(int vid);
292 void *reserved[1]; /**< Reserved for future extension */
296 * Convert guest physical address to host virtual address
298 * This function is deprecated because unsafe.
299 * New rte_vhost_va_from_guest_pa() should be used instead to ensure
300 * guest physical ranges are fully and contiguously mapped into
301 * process virtual address space.
304 * the guest memory regions
306 * the guest physical address for querying
308 * the host virtual address on success, 0 on failure
311 static __rte_always_inline uint64_t
312 rte_vhost_gpa_to_vva(struct rte_vhost_memory *mem, uint64_t gpa)
314 struct rte_vhost_mem_region *reg;
317 for (i = 0; i < mem->nregions; i++) {
318 reg = &mem->regions[i];
319 if (gpa >= reg->guest_phys_addr &&
320 gpa < reg->guest_phys_addr + reg->size) {
321 return gpa - reg->guest_phys_addr +
330 * Convert guest physical address to host virtual address safely
332 * This variant of rte_vhost_gpa_to_vva() takes care all the
333 * requested length is mapped and contiguous in process address
337 * the guest memory regions
339 * the guest physical address for querying
341 * the size of the requested area to map, updated with actual size mapped
343 * the host virtual address on success, 0 on failure
346 static __rte_always_inline uint64_t
347 rte_vhost_va_from_guest_pa(struct rte_vhost_memory *mem,
348 uint64_t gpa, uint64_t *len)
350 struct rte_vhost_mem_region *r;
353 for (i = 0; i < mem->nregions; i++) {
354 r = &mem->regions[i];
355 if (gpa >= r->guest_phys_addr &&
356 gpa < r->guest_phys_addr + r->size) {
358 if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
359 *len = r->guest_phys_addr + r->size - gpa;
361 return gpa - r->guest_phys_addr +
370 #define RTE_VHOST_NEED_LOG(features) ((features) & (1ULL << VHOST_F_LOG_ALL))
373 * Log the memory write start with given address.
375 * This function only need be invoked when the live migration starts.
376 * Therefore, we won't need call it at all in the most of time. For
377 * making the performance impact be minimum, it's suggested to do a
378 * check before calling it:
380 * if (unlikely(RTE_VHOST_NEED_LOG(features)))
381 * rte_vhost_log_write(vid, addr, len);
386 * the starting address for write (in guest physical address space)
388 * the length to write
390 void rte_vhost_log_write(int vid, uint64_t addr, uint64_t len);
393 * Log the used ring update start at given offset.
395 * Same as rte_vhost_log_write, it's suggested to do a check before
398 * if (unlikely(RTE_VHOST_NEED_LOG(features)))
399 * rte_vhost_log_used_vring(vid, vring_idx, offset, len);
406 * the offset inside the used ring
408 * the length to write
410 void rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
411 uint64_t offset, uint64_t len);
413 int rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable);
416 * Register vhost driver. path could be different for multiple
419 int rte_vhost_driver_register(const char *path, uint64_t flags);
421 /* Unregister vhost driver. This is only meaningful to vhost user. */
422 int rte_vhost_driver_unregister(const char *path);
425 * Set the vdpa device id, enforce single connection per socket
428 * The vhost-user socket file path
430 * vDPA device pointer
432 * 0 on success, -1 on failure
435 rte_vhost_driver_attach_vdpa_device(const char *path,
436 struct rte_vdpa_device *dev);
439 * Unset the vdpa device id
442 * The vhost-user socket file path
444 * 0 on success, -1 on failure
447 rte_vhost_driver_detach_vdpa_device(const char *path);
453 * The vhost-user socket file path
455 * vDPA device pointer, NULL on failure
457 struct rte_vdpa_device *
458 rte_vhost_driver_get_vdpa_device(const char *path);
461 * Set the feature bits the vhost-user driver supports.
464 * The vhost-user socket file path
468 * 0 on success, -1 on failure
470 int rte_vhost_driver_set_features(const char *path, uint64_t features);
473 * Enable vhost-user driver features.
476 * - the param features should be a subset of the feature bits provided
477 * by rte_vhost_driver_set_features().
478 * - it must be invoked before vhost-user negotiation starts.
481 * The vhost-user socket file path
485 * 0 on success, -1 on failure
487 int rte_vhost_driver_enable_features(const char *path, uint64_t features);
490 * Disable vhost-user driver features.
492 * The two notes at rte_vhost_driver_enable_features() also apply here.
495 * The vhost-user socket file path
497 * Features to disable
499 * 0 on success, -1 on failure
501 int rte_vhost_driver_disable_features(const char *path, uint64_t features);
504 * Get the feature bits before feature negotiation.
507 * The vhost-user socket file path
509 * A pointer to store the queried feature bits
511 * 0 on success, -1 on failure
513 int rte_vhost_driver_get_features(const char *path, uint64_t *features);
516 * Set the protocol feature bits before feature negotiation.
519 * The vhost-user socket file path
520 * @param protocol_features
521 * Supported protocol features
523 * 0 on success, -1 on failure
527 rte_vhost_driver_set_protocol_features(const char *path,
528 uint64_t protocol_features);
531 * Get the protocol feature bits before feature negotiation.
534 * The vhost-user socket file path
535 * @param protocol_features
536 * A pointer to store the queried protocol feature bits
538 * 0 on success, -1 on failure
542 rte_vhost_driver_get_protocol_features(const char *path,
543 uint64_t *protocol_features);
546 * Get the queue number bits before feature negotiation.
549 * The vhost-user socket file path
551 * A pointer to store the queried queue number bits
553 * 0 on success, -1 on failure
557 rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num);
560 * Get the feature bits after negotiation
565 * A pointer to store the queried feature bits
567 * 0 on success, -1 on failure
569 int rte_vhost_get_negotiated_features(int vid, uint64_t *features);
572 * Get the protocol feature bits after negotiation
576 * @param protocol_features
577 * A pointer to store the queried protocol feature bits
579 * 0 on success, -1 on failure
583 rte_vhost_get_negotiated_protocol_features(int vid,
584 uint64_t *protocol_features);
586 /* Register callbacks. */
587 int rte_vhost_driver_callback_register(const char *path,
588 struct vhost_device_ops const * const ops);
592 * Start the vhost-user driver.
594 * This function triggers the vhost-user negotiation.
597 * The vhost-user socket file path
599 * 0 on success, -1 on failure
601 int rte_vhost_driver_start(const char *path);
604 * Get the MTU value of the device if set in QEMU.
607 * virtio-net device ID
609 * The variable to store the MTU value
613 * -EAGAIN: device not yet started
614 * -ENOTSUP: device does not support MTU feature
616 int rte_vhost_get_mtu(int vid, uint16_t *mtu);
619 * Get the numa node from which the virtio net device's memory
626 * The numa node, -1 on failure
628 int rte_vhost_get_numa_node(int vid);
632 * Get the number of queues the device supports.
634 * Note this function is deprecated, as it returns a queue pair number,
635 * which is vhost specific. Instead, rte_vhost_get_vring_num should
642 * The number of queues, 0 on failure
645 uint32_t rte_vhost_get_queue_num(int vid);
648 * Get the number of vrings the device supports.
654 * The number of vrings, 0 on failure
656 uint16_t rte_vhost_get_vring_num(int vid);
659 * Get the virtio net device's ifname, which is the vhost-user socket
665 * The buffer to stored the queried ifname
670 * 0 on success, -1 on failure
672 int rte_vhost_get_ifname(int vid, char *buf, size_t len);
675 * Get how many avail entries are left in the queue
683 * num of avail entries left
685 uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
690 * This function adds buffers to the virtio devices RX virtqueue. Buffers can
691 * be received from the physical port or from another virtual device. A packet
692 * count is returned to indicate the number of packets that were successfully
693 * added to the RX queue.
697 * virtio queue index in mq case
699 * array to contain packets to be enqueued
701 * packets num to be enqueued
703 * num of packets enqueued
705 uint16_t rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
706 struct rte_mbuf **pkts, uint16_t count);
709 * This function gets guest buffers from the virtio device TX virtqueue,
710 * construct host mbufs, copies guest buffer content to host mbufs and
711 * store them in pkts to be processed.
715 * virtio queue index in mq case
717 * mbuf_pool where host mbuf is allocated.
719 * array to contain packets to be dequeued
721 * packets num to be dequeued
723 * num of packets dequeued
725 uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
726 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count);
729 * Get guest mem table: a list of memory regions.
731 * An rte_vhost_vhost_memory object will be allocated internally, to hold the
732 * guest memory regions. Application should free it at destroy_device()
738 * To store the returned mem regions
740 * 0 on success, -1 on failure
742 int rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem);
745 * Get guest vring info, including the vring address, vring size, etc.
752 * the structure to hold the requested vring info
754 * 0 on success, -1 on failure
756 int rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
757 struct rte_vhost_vring *vring);
760 * Get guest inflight vring info, including inflight ring and resubmit list.
767 * the structure to hold the requested inflight vring info
769 * 0 on success, -1 on failure
773 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
774 struct rte_vhost_ring_inflight *vring);
777 * Set split inflight descriptor.
779 * This function save descriptors that has been comsumed in available
787 * inflight entry index
789 * 0 on success, -1 on failure
793 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
797 * Set packed inflight descriptor and get corresponding inflight entry
799 * This function save descriptors that has been comsumed
806 * head of descriptors
808 * last of descriptors
809 * @param inflight_entry
810 * corresponding inflight entry
812 * 0 on success, -1 on failure
816 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
817 uint16_t head, uint16_t last, uint16_t *inflight_entry);
820 * Save the head of list that the last batch of used descriptors.
827 * descriptor entry index
829 * 0 on success, -1 on failure
833 rte_vhost_set_last_inflight_io_split(int vid,
834 uint16_t vring_idx, uint16_t idx);
837 * Update the inflight free_head, used_idx and used_wrap_counter.
839 * This function will update status first before updating descriptors
847 * head of descriptors
849 * 0 on success, -1 on failure
853 rte_vhost_set_last_inflight_io_packed(int vid,
854 uint16_t vring_idx, uint16_t head);
857 * Clear the split inflight status.
863 * @param last_used_idx
864 * last used idx of used ring
866 * inflight entry index
868 * 0 on success, -1 on failure
872 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
873 uint16_t last_used_idx, uint16_t idx);
876 * Clear the packed inflight status.
883 * inflight entry index
885 * 0 on success, -1 on failure
889 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
893 * Notify the guest that used descriptors have been added to the vring. This
894 * function acts as a memory barrier.
901 * 0 on success, -1 on failure
903 int rte_vhost_vring_call(int vid, uint16_t vring_idx);
906 * Get vhost RX queue avail count.
911 * virtio queue index in mq case
913 * num of desc available
915 uint32_t rte_vhost_rx_queue_count(int vid, uint16_t qid);
918 * Get log base and log size of the vhost device
927 * 0 on success, -1 on failure
930 rte_vhost_get_log_base(int vid, uint64_t *log_base, uint64_t *log_size);
933 * Get last_avail/used_idx of the vhost virtqueue
939 * @param last_avail_idx
940 * vhost last_avail_idx to get
941 * @param last_used_idx
942 * vhost last_used_idx to get
944 * 0 on success, -1 on failure
947 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
948 uint16_t *last_avail_idx, uint16_t *last_used_idx);
951 * Get last_avail/last_used of the vhost virtqueue
953 * This function is designed for the reconnection and it's specific for
954 * the packed ring as we can get the two parameters from the inflight
961 * @param last_avail_idx
962 * vhost last_avail_idx to get
963 * @param last_used_idx
964 * vhost last_used_idx to get
966 * 0 on success, -1 on failure
970 rte_vhost_get_vring_base_from_inflight(int vid,
971 uint16_t queue_id, uint16_t *last_avail_idx, uint16_t *last_used_idx);
974 * Set last_avail/used_idx of the vhost virtqueue
980 * @param last_avail_idx
981 * last_avail_idx to set
982 * @param last_used_idx
983 * last_used_idx to set
985 * 0 on success, -1 on failure
988 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
989 uint16_t last_avail_idx, uint16_t last_used_idx);
992 * Register external message handling callbacks
997 * virtio external callbacks to register
999 * additional context passed to the callbacks
1001 * 0 on success, -1 on failure
1005 rte_vhost_extern_callback_register(int vid,
1006 struct rte_vhost_user_extern_ops const * const ops, void *ctx);
1009 * Get vdpa device id for vhost device.
1014 * vDPA device pointer on success, NULL on failure
1016 struct rte_vdpa_device *
1017 rte_vhost_get_vdpa_device(int vid);
1020 * Notify the guest that should get virtio configuration space from backend.
1025 * wait for the master response the status of this operation
1027 * 0 on success, < 0 on failure
1031 rte_vhost_slave_config_change(int vid, bool need_reply);
1037 #endif /* _RTE_VHOST_H_ */