1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
22 #include "rte_vhost.h"
25 /* Used to indicate that the device is running on a data core */
26 #define VIRTIO_DEV_RUNNING 1
27 /* Used to indicate that the device is ready to operate */
28 #define VIRTIO_DEV_READY 2
29 /* Used to indicate that the built-in vhost net device backend is enabled */
30 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
31 /* Used to indicate that the device has its own data path and configured */
32 #define VIRTIO_DEV_VDPA_CONFIGURED 8
34 /* Backend value set by guest. */
35 #define VIRTIO_DEV_STOPPED -1
37 #define BUF_VECTOR_MAX 256
39 #define VHOST_LOG_CACHE_NR 32
42 * Structure contains buffer address, length and descriptor index
43 * from vring to do scatter RX.
53 * A structure to hold some fields needed in zero copy code path,
54 * mainly for associating an mbuf with the right desc_idx.
57 struct rte_mbuf *mbuf;
62 TAILQ_ENTRY(zcopy_mbuf) next;
64 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
67 * Structure contains the info for each batched memory copy.
69 struct batch_copy_elem {
77 * Structure that contains the info for batched dirty logging.
79 struct log_cache_entry {
84 struct vring_used_elem_packed {
91 * Structure contains variables relevant to RX/TX virtqueues.
93 struct vhost_virtqueue {
95 struct vring_desc *desc;
96 struct vring_packed_desc *desc_packed;
99 struct vring_avail *avail;
100 struct vring_packed_desc_event *driver_event;
103 struct vring_used *used;
104 struct vring_packed_desc_event *device_event;
108 uint16_t last_avail_idx;
109 uint16_t last_used_idx;
110 /* Last used index we notify to front end. */
111 uint16_t signalled_used;
112 bool signalled_used_valid;
113 #define VIRTIO_INVALID_EVENTFD (-1)
114 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
116 /* Backend value to determine if device should started/stopped */
120 rte_spinlock_t access_lock;
122 /* Used to notify the guest (trigger interrupt) */
124 /* Currently unused as polling mode is enabled */
127 /* Physical address of used ring, for logging */
128 uint64_t log_guest_addr;
132 uint16_t last_zmbuf_idx;
133 struct zcopy_mbuf *zmbufs;
134 struct zcopy_mbuf_list zmbuf_list;
137 struct vring_used_elem *shadow_used_split;
138 struct vring_used_elem_packed *shadow_used_packed;
140 uint16_t shadow_used_idx;
141 struct vhost_vring_addr ring_addrs;
143 struct batch_copy_elem *batch_copy_elems;
144 uint16_t batch_copy_nb_elems;
145 bool used_wrap_counter;
146 bool avail_wrap_counter;
148 struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
149 uint16_t log_cache_nb_elem;
151 rte_rwlock_t iotlb_lock;
152 rte_rwlock_t iotlb_pending_lock;
153 struct rte_mempool *iotlb_pool;
154 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
156 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
157 } __rte_cache_aligned;
159 /* Old kernels have no such macros defined */
160 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
161 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
164 #ifndef VIRTIO_NET_F_MQ
165 #define VIRTIO_NET_F_MQ 22
168 #define VHOST_MAX_VRING 0x100
169 #define VHOST_MAX_QUEUE_PAIRS 0x80
171 #ifndef VIRTIO_NET_F_MTU
172 #define VIRTIO_NET_F_MTU 3
175 #ifndef VIRTIO_F_ANY_LAYOUT
176 #define VIRTIO_F_ANY_LAYOUT 27
179 /* Declare IOMMU related bits for older kernels */
180 #ifndef VIRTIO_F_IOMMU_PLATFORM
182 #define VIRTIO_F_IOMMU_PLATFORM 33
184 struct vhost_iotlb_msg {
188 #define VHOST_ACCESS_RO 0x1
189 #define VHOST_ACCESS_WO 0x2
190 #define VHOST_ACCESS_RW 0x3
192 #define VHOST_IOTLB_MISS 1
193 #define VHOST_IOTLB_UPDATE 2
194 #define VHOST_IOTLB_INVALIDATE 3
195 #define VHOST_IOTLB_ACCESS_FAIL 4
199 #define VHOST_IOTLB_MSG 0x1
204 struct vhost_iotlb_msg iotlb;
211 * Define virtio 1.0 for older kernels
213 #ifndef VIRTIO_F_VERSION_1
214 #define VIRTIO_F_VERSION_1 32
217 /* Declare packed ring related bits for older kernels */
218 #ifndef VIRTIO_F_RING_PACKED
220 #define VIRTIO_F_RING_PACKED 34
222 struct vring_packed_desc {
229 struct vring_packed_desc_event {
236 * Declare below packed ring defines unconditionally
237 * as Kernel header might use different names.
239 #define VRING_DESC_F_AVAIL (1ULL << 7)
240 #define VRING_DESC_F_USED (1ULL << 15)
242 #define VRING_EVENT_F_ENABLE 0x0
243 #define VRING_EVENT_F_DISABLE 0x1
244 #define VRING_EVENT_F_DESC 0x2
247 * Available and used descs are in same order
249 #ifndef VIRTIO_F_IN_ORDER
250 #define VIRTIO_F_IN_ORDER 35
253 /* Features supported by this builtin vhost-user net driver. */
254 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
255 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
256 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
257 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
258 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
259 (1ULL << VIRTIO_NET_F_MQ) | \
260 (1ULL << VIRTIO_F_VERSION_1) | \
261 (1ULL << VHOST_F_LOG_ALL) | \
262 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
263 (1ULL << VIRTIO_NET_F_GSO) | \
264 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
265 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
266 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
267 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
268 (1ULL << VIRTIO_NET_F_CSUM) | \
269 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
270 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
271 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
272 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
273 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
274 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
275 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
276 (1ULL << VIRTIO_NET_F_MTU) | \
277 (1ULL << VIRTIO_F_IN_ORDER) | \
278 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
279 (1ULL << VIRTIO_F_RING_PACKED))
283 uint64_t guest_phys_addr;
284 uint64_t host_phys_addr;
288 /* The possible results of a message handling function */
290 /* Message handling failed */
292 /* Message handling successful */
294 /* Message handling successful and reply prepared */
299 * function prototype for the vhost backend to handler specific vhost user
300 * messages prior to the master message handling
307 * If the handler requires skipping the master message handling, this variable
308 * shall be written 1, otherwise 0.
310 * VH_RESULT_OK on success, VH_RESULT_REPLY on success with reply,
311 * VH_RESULT_ERR on failure
313 typedef enum vh_result (*vhost_msg_pre_handle)(int vid, void *msg,
314 uint32_t *skip_master);
317 * function prototype for the vhost backend to handler specific vhost user
318 * messages after the master message handling is done
325 * VH_RESULT_OK on success, VH_RESULT_REPLY on success with reply,
326 * VH_RESULT_ERR on failure
328 typedef enum vh_result (*vhost_msg_post_handle)(int vid, void *msg);
331 * pre and post vhost user message handlers
333 struct vhost_user_extern_ops {
334 vhost_msg_pre_handle pre_msg_handle;
335 vhost_msg_post_handle post_msg_handle;
339 * Device structure contains all configuration information relating
343 /* Frontend (QEMU) memory and memory region information */
344 struct rte_vhost_memory *mem;
346 uint64_t protocol_features;
350 /* to tell if we need broadcast rarp packet */
351 rte_atomic16_t broadcast_rarp;
353 int dequeue_zero_copy;
354 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
355 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
356 char ifname[IF_NAME_SZ];
360 struct ether_addr mac;
363 struct vhost_device_ops const *notify_ops;
365 uint32_t nr_guest_pages;
366 uint32_t max_guest_pages;
367 struct guest_page *guest_pages;
370 rte_spinlock_t slave_req_lock;
373 int postcopy_listening;
376 * Device id to identify a specific backend device.
377 * It's set to -1 for the default software implementation.
381 /* private data for virtio device */
383 /* pre and post vhost user message handlers for the device */
384 struct vhost_user_extern_ops extern_ops;
385 } __rte_cache_aligned;
387 static __rte_always_inline bool
388 vq_is_packed(struct virtio_net *dev)
390 return dev->features & (1ull << VIRTIO_F_RING_PACKED);
394 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
396 uint16_t flags = *((volatile uint16_t *) &desc->flags);
398 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
399 wrap_counter != !!(flags & VRING_DESC_F_USED);
402 #define VHOST_LOG_PAGE 4096
405 * Atomically set a bit in memory.
407 static __rte_always_inline void
408 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
410 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
412 * __sync_ built-ins are deprecated, but __atomic_ ones
413 * are sub-optimized in older GCC versions.
415 __sync_fetch_and_or_1(addr, (1U << nr));
417 __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
421 static __rte_always_inline void
422 vhost_log_page(uint8_t *log_base, uint64_t page)
424 vhost_set_bit(page % 8, &log_base[page / 8]);
427 static __rte_always_inline void
428 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
432 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
433 !dev->log_base || !len))
436 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
439 /* To make sure guest memory updates are committed before logging */
442 page = addr / VHOST_LOG_PAGE;
443 while (page * VHOST_LOG_PAGE < addr + len) {
444 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
449 static __rte_always_inline void
450 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
452 unsigned long *log_base;
455 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
459 log_base = (unsigned long *)(uintptr_t)dev->log_base;
462 * It is expected a write memory barrier has been issued
463 * before this function is called.
466 for (i = 0; i < vq->log_cache_nb_elem; i++) {
467 struct log_cache_entry *elem = vq->log_cache + i;
469 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
471 * '__sync' builtins are deprecated, but '__atomic' ones
472 * are sub-optimized in older GCC versions.
474 __sync_fetch_and_or(log_base + elem->offset, elem->val);
476 __atomic_fetch_or(log_base + elem->offset, elem->val,
483 vq->log_cache_nb_elem = 0;
486 static __rte_always_inline void
487 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
490 uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
491 uint32_t offset = page / (sizeof(unsigned long) << 3);
494 for (i = 0; i < vq->log_cache_nb_elem; i++) {
495 struct log_cache_entry *elem = vq->log_cache + i;
497 if (elem->offset == offset) {
498 elem->val |= (1UL << bit_nr);
503 if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
505 * No more room for a new log cache entry,
506 * so write the dirty log map directly.
509 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
514 vq->log_cache[i].offset = offset;
515 vq->log_cache[i].val = (1UL << bit_nr);
516 vq->log_cache_nb_elem++;
519 static __rte_always_inline void
520 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
521 uint64_t addr, uint64_t len)
525 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
526 !dev->log_base || !len))
529 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
532 page = addr / VHOST_LOG_PAGE;
533 while (page * VHOST_LOG_PAGE < addr + len) {
534 vhost_log_cache_page(dev, vq, page);
539 static __rte_always_inline void
540 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
541 uint64_t offset, uint64_t len)
543 vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
546 static __rte_always_inline void
547 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
548 uint64_t offset, uint64_t len)
550 vhost_log_write(dev, vq->log_guest_addr + offset, len);
553 /* Macros for printing using RTE_LOG */
554 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
555 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
557 #ifdef RTE_LIBRTE_VHOST_DEBUG
558 #define VHOST_MAX_PRINT_BUFF 6072
559 #define VHOST_LOG_DEBUG(log_type, fmt, args...) \
560 RTE_LOG(DEBUG, log_type, fmt, ##args)
561 #define PRINT_PACKET(device, addr, size, header) do { \
562 char *pkt_addr = (char *)(addr); \
563 unsigned int index; \
564 char packet[VHOST_MAX_PRINT_BUFF]; \
567 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
569 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
570 for (index = 0; index < (size); index++) { \
571 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
572 "%02hhx ", pkt_addr[index]); \
574 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
576 VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
579 #define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
580 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
583 extern uint64_t VHOST_FEATURES;
584 #define MAX_VHOST_DEVICE 1024
585 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
587 /* Convert guest physical address to host physical address */
588 static __rte_always_inline rte_iova_t
589 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
592 struct guest_page *page;
594 for (i = 0; i < dev->nr_guest_pages; i++) {
595 page = &dev->guest_pages[i];
597 if (gpa >= page->guest_phys_addr &&
598 gpa + size < page->guest_phys_addr + page->size) {
599 return gpa - page->guest_phys_addr +
600 page->host_phys_addr;
607 static __rte_always_inline struct virtio_net *
610 struct virtio_net *dev = vhost_devices[vid];
612 if (unlikely(!dev)) {
613 RTE_LOG(ERR, VHOST_CONFIG,
614 "(%d) device not found.\n", vid);
620 int vhost_new_device(void);
621 void cleanup_device(struct virtio_net *dev, int destroy);
622 void reset_device(struct virtio_net *dev);
623 void vhost_destroy_device(int);
624 void vhost_destroy_device_notify(struct virtio_net *dev);
626 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
627 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
629 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
631 void vhost_attach_vdpa_device(int vid, int did);
633 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
634 void vhost_enable_dequeue_zero_copy(int vid);
635 void vhost_set_builtin_virtio_net(int vid, bool enable);
637 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
640 * Backend-specific cleanup.
642 * TODO: fix it; we have one backend now
644 void vhost_backend_cleanup(struct virtio_net *dev);
646 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
647 uint64_t iova, uint64_t *len, uint8_t perm);
648 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
649 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
651 static __rte_always_inline uint64_t
652 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
653 uint64_t iova, uint64_t *len, uint8_t perm)
655 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
656 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
658 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
661 #define vhost_avail_event(vr) \
662 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
663 #define vhost_used_event(vr) \
664 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
667 * The following is used with VIRTIO_RING_F_EVENT_IDX.
668 * Assuming a given event_idx value from the other size, if we have
669 * just incremented index from old to new_idx, should we trigger an
672 static __rte_always_inline int
673 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
675 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
678 static __rte_always_inline void
679 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
681 /* Flush used->idx update before we read avail->flags. */
684 /* Don't kick guest if we don't reach index specified by guest. */
685 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
686 uint16_t old = vq->signalled_used;
687 uint16_t new = vq->last_used_idx;
689 VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
691 vhost_used_event(vq),
693 if (vhost_need_event(vhost_used_event(vq), new, old)
694 && (vq->callfd >= 0)) {
695 vq->signalled_used = vq->last_used_idx;
696 eventfd_write(vq->callfd, (eventfd_t) 1);
699 /* Kick the guest if necessary. */
700 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
701 && (vq->callfd >= 0))
702 eventfd_write(vq->callfd, (eventfd_t)1);
706 static __rte_always_inline void
707 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
709 uint16_t old, new, off, off_wrap;
710 bool signalled_used_valid, kick = false;
712 /* Flush used desc update. */
715 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
716 if (vq->driver_event->flags !=
717 VRING_EVENT_F_DISABLE)
722 old = vq->signalled_used;
723 new = vq->last_used_idx;
724 vq->signalled_used = new;
725 signalled_used_valid = vq->signalled_used_valid;
726 vq->signalled_used_valid = true;
728 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
729 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
734 if (unlikely(!signalled_used_valid)) {
741 off_wrap = vq->driver_event->off_wrap;
742 off = off_wrap & ~(1 << 15);
747 if (vq->used_wrap_counter != off_wrap >> 15)
750 if (vhost_need_event(off, new, old))
754 eventfd_write(vq->callfd, (eventfd_t)1);
757 #endif /* _VHOST_NET_CDEV_H_ */