1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
23 #include "rte_vhost.h"
26 /* Used to indicate that the device is running on a data core */
27 #define VIRTIO_DEV_RUNNING 1
28 /* Used to indicate that the device is ready to operate */
29 #define VIRTIO_DEV_READY 2
30 /* Used to indicate that the built-in vhost net device backend is enabled */
31 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
32 /* Used to indicate that the device has its own data path and configured */
33 #define VIRTIO_DEV_VDPA_CONFIGURED 8
35 /* Backend value set by guest. */
36 #define VIRTIO_DEV_STOPPED -1
38 #define BUF_VECTOR_MAX 256
40 #define VHOST_LOG_CACHE_NR 32
42 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
43 VRING_DESC_F_INDIRECT)
45 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
46 sizeof(struct vring_packed_desc))
47 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
49 #ifdef VHOST_GCC_UNROLL_PRAGMA
50 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
51 for (iter = val; iter < size; iter++)
54 #ifdef VHOST_CLANG_UNROLL_PRAGMA
55 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
56 for (iter = val; iter < size; iter++)
59 #ifdef VHOST_ICC_UNROLL_PRAGMA
60 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
61 for (iter = val; iter < size; iter++)
64 #ifndef vhost_for_each_try_unroll
65 #define vhost_for_each_try_unroll(iter, val, num) \
66 for (iter = val; iter < num; iter++)
70 * Structure contains buffer address, length and descriptor index
71 * from vring to do scatter RX.
81 * A structure to hold some fields needed in zero copy code path,
82 * mainly for associating an mbuf with the right desc_idx.
85 struct rte_mbuf *mbuf;
90 TAILQ_ENTRY(zcopy_mbuf) next;
92 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
95 * Structure contains the info for each batched memory copy.
97 struct batch_copy_elem {
105 * Structure that contains the info for batched dirty logging.
107 struct log_cache_entry {
112 struct vring_used_elem_packed {
119 * Structure contains variables relevant to RX/TX virtqueues.
121 struct vhost_virtqueue {
123 struct vring_desc *desc;
124 struct vring_packed_desc *desc_packed;
127 struct vring_avail *avail;
128 struct vring_packed_desc_event *driver_event;
131 struct vring_used *used;
132 struct vring_packed_desc_event *device_event;
136 uint16_t last_avail_idx;
137 uint16_t last_used_idx;
138 /* Last used index we notify to front end. */
139 uint16_t signalled_used;
140 bool signalled_used_valid;
141 #define VIRTIO_INVALID_EVENTFD (-1)
142 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
144 /* Backend value to determine if device should started/stopped */
148 rte_spinlock_t access_lock;
150 /* Used to notify the guest (trigger interrupt) */
152 /* Currently unused as polling mode is enabled */
155 /* Physical address of used ring, for logging */
156 uint64_t log_guest_addr;
158 /* inflight share memory info */
160 struct rte_vhost_inflight_info_split *inflight_split;
161 struct rte_vhost_inflight_info_packed *inflight_packed;
163 struct rte_vhost_resubmit_info *resubmit_inflight;
164 uint64_t global_counter;
168 uint16_t last_zmbuf_idx;
169 struct zcopy_mbuf *zmbufs;
170 struct zcopy_mbuf_list zmbuf_list;
173 struct vring_used_elem *shadow_used_split;
174 struct vring_used_elem_packed *shadow_used_packed;
176 uint16_t shadow_used_idx;
177 /* Record packed ring enqueue latest desc cache aligned index */
178 uint16_t shadow_aligned_idx;
179 struct vhost_vring_addr ring_addrs;
181 struct batch_copy_elem *batch_copy_elems;
182 uint16_t batch_copy_nb_elems;
183 bool used_wrap_counter;
184 bool avail_wrap_counter;
186 struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
187 uint16_t log_cache_nb_elem;
189 rte_rwlock_t iotlb_lock;
190 rte_rwlock_t iotlb_pending_lock;
191 struct rte_mempool *iotlb_pool;
192 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
194 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
195 } __rte_cache_aligned;
197 /* Old kernels have no such macros defined */
198 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
199 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
202 #ifndef VIRTIO_NET_F_MQ
203 #define VIRTIO_NET_F_MQ 22
206 #define VHOST_MAX_VRING 0x100
207 #define VHOST_MAX_QUEUE_PAIRS 0x80
209 #ifndef VIRTIO_NET_F_MTU
210 #define VIRTIO_NET_F_MTU 3
213 #ifndef VIRTIO_F_ANY_LAYOUT
214 #define VIRTIO_F_ANY_LAYOUT 27
217 /* Declare IOMMU related bits for older kernels */
218 #ifndef VIRTIO_F_IOMMU_PLATFORM
220 #define VIRTIO_F_IOMMU_PLATFORM 33
222 struct vhost_iotlb_msg {
226 #define VHOST_ACCESS_RO 0x1
227 #define VHOST_ACCESS_WO 0x2
228 #define VHOST_ACCESS_RW 0x3
230 #define VHOST_IOTLB_MISS 1
231 #define VHOST_IOTLB_UPDATE 2
232 #define VHOST_IOTLB_INVALIDATE 3
233 #define VHOST_IOTLB_ACCESS_FAIL 4
237 #define VHOST_IOTLB_MSG 0x1
242 struct vhost_iotlb_msg iotlb;
249 * Define virtio 1.0 for older kernels
251 #ifndef VIRTIO_F_VERSION_1
252 #define VIRTIO_F_VERSION_1 32
255 /* Declare packed ring related bits for older kernels */
256 #ifndef VIRTIO_F_RING_PACKED
258 #define VIRTIO_F_RING_PACKED 34
260 struct vring_packed_desc {
267 struct vring_packed_desc_event {
274 * Declare below packed ring defines unconditionally
275 * as Kernel header might use different names.
277 #define VRING_DESC_F_AVAIL (1ULL << 7)
278 #define VRING_DESC_F_USED (1ULL << 15)
280 #define VRING_EVENT_F_ENABLE 0x0
281 #define VRING_EVENT_F_DISABLE 0x1
282 #define VRING_EVENT_F_DESC 0x2
285 * Available and used descs are in same order
287 #ifndef VIRTIO_F_IN_ORDER
288 #define VIRTIO_F_IN_ORDER 35
291 /* Features supported by this builtin vhost-user net driver. */
292 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
293 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
294 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
295 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
296 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
297 (1ULL << VIRTIO_NET_F_MQ) | \
298 (1ULL << VIRTIO_F_VERSION_1) | \
299 (1ULL << VHOST_F_LOG_ALL) | \
300 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
301 (1ULL << VIRTIO_NET_F_GSO) | \
302 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
303 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
304 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
305 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
306 (1ULL << VIRTIO_NET_F_CSUM) | \
307 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
308 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
309 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
310 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
311 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
312 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
313 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
314 (1ULL << VIRTIO_NET_F_MTU) | \
315 (1ULL << VIRTIO_F_IN_ORDER) | \
316 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
317 (1ULL << VIRTIO_F_RING_PACKED))
321 uint64_t guest_phys_addr;
322 uint64_t host_phys_addr;
326 struct inflight_mem_info {
333 * Device structure contains all configuration information relating
337 /* Frontend (QEMU) memory and memory region information */
338 struct rte_vhost_memory *mem;
340 uint64_t protocol_features;
344 /* to tell if we need broadcast rarp packet */
345 rte_atomic16_t broadcast_rarp;
347 int dequeue_zero_copy;
350 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
351 struct inflight_mem_info *inflight_info;
352 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
353 char ifname[IF_NAME_SZ];
357 struct rte_ether_addr mac;
360 struct vhost_device_ops const *notify_ops;
362 uint32_t nr_guest_pages;
363 uint32_t max_guest_pages;
364 struct guest_page *guest_pages;
367 rte_spinlock_t slave_req_lock;
370 int postcopy_listening;
373 * Device id to identify a specific backend device.
374 * It's set to -1 for the default software implementation.
378 /* context data for the external message handlers */
380 /* pre and post vhost user message handlers for the device */
381 struct rte_vhost_user_extern_ops extern_ops;
382 } __rte_cache_aligned;
384 static __rte_always_inline bool
385 vq_is_packed(struct virtio_net *dev)
387 return dev->features & (1ull << VIRTIO_F_RING_PACKED);
391 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
393 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
395 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
396 wrap_counter != !!(flags & VRING_DESC_F_USED);
400 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
402 vq->last_used_idx += num;
403 if (vq->last_used_idx >= vq->size) {
404 vq->used_wrap_counter ^= 1;
405 vq->last_used_idx -= vq->size;
410 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
412 vq->last_avail_idx += num;
413 if (vq->last_avail_idx >= vq->size) {
414 vq->avail_wrap_counter ^= 1;
415 vq->last_avail_idx -= vq->size;
419 void __vhost_log_cache_write(struct virtio_net *dev,
420 struct vhost_virtqueue *vq,
421 uint64_t addr, uint64_t len);
422 void __vhost_log_cache_write_iova(struct virtio_net *dev,
423 struct vhost_virtqueue *vq,
424 uint64_t iova, uint64_t len);
425 void __vhost_log_cache_sync(struct virtio_net *dev,
426 struct vhost_virtqueue *vq);
427 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
428 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
429 uint64_t iova, uint64_t len);
431 static __rte_always_inline void
432 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
434 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
435 __vhost_log_write(dev, addr, len);
438 static __rte_always_inline void
439 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
441 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
442 __vhost_log_cache_sync(dev, vq);
445 static __rte_always_inline void
446 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
447 uint64_t addr, uint64_t len)
449 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
450 __vhost_log_cache_write(dev, vq, addr, len);
453 static __rte_always_inline void
454 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
455 uint64_t offset, uint64_t len)
457 vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
460 static __rte_always_inline void
461 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
462 uint64_t offset, uint64_t len)
464 vhost_log_write(dev, vq->log_guest_addr + offset, len);
467 static __rte_always_inline void
468 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
469 uint64_t iova, uint64_t len)
471 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
474 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
475 __vhost_log_cache_write_iova(dev, vq, iova, len);
477 __vhost_log_cache_write(dev, vq, iova, len);
480 static __rte_always_inline void
481 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
482 uint64_t iova, uint64_t len)
484 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
487 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
488 __vhost_log_write_iova(dev, vq, iova, len);
490 __vhost_log_write(dev, iova, len);
493 /* Macros for printing using RTE_LOG */
494 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
495 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
497 #ifdef RTE_LIBRTE_VHOST_DEBUG
498 #define VHOST_MAX_PRINT_BUFF 6072
499 #define VHOST_LOG_DEBUG(log_type, fmt, args...) \
500 RTE_LOG(DEBUG, log_type, fmt, ##args)
501 #define PRINT_PACKET(device, addr, size, header) do { \
502 char *pkt_addr = (char *)(addr); \
503 unsigned int index; \
504 char packet[VHOST_MAX_PRINT_BUFF]; \
507 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
509 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
510 for (index = 0; index < (size); index++) { \
511 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
512 "%02hhx ", pkt_addr[index]); \
514 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
516 VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
519 #define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
520 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
523 extern uint64_t VHOST_FEATURES;
524 #define MAX_VHOST_DEVICE 1024
525 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
527 /* Convert guest physical address to host physical address */
528 static __rte_always_inline rte_iova_t
529 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
532 struct guest_page *page;
534 for (i = 0; i < dev->nr_guest_pages; i++) {
535 page = &dev->guest_pages[i];
537 if (gpa >= page->guest_phys_addr &&
538 gpa + size < page->guest_phys_addr + page->size) {
539 return gpa - page->guest_phys_addr +
540 page->host_phys_addr;
547 static __rte_always_inline uint64_t
548 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
550 struct rte_vhost_mem_region *r;
553 if (unlikely(!dev || !dev->mem))
556 for (i = 0; i < dev->mem->nregions; i++) {
557 r = &dev->mem->regions[i];
559 if (vva >= r->host_user_addr &&
560 vva + len < r->host_user_addr + r->size) {
561 return r->guest_phys_addr + vva - r->host_user_addr;
567 static __rte_always_inline struct virtio_net *
570 struct virtio_net *dev = vhost_devices[vid];
572 if (unlikely(!dev)) {
573 RTE_LOG(ERR, VHOST_CONFIG,
574 "(%d) device not found.\n", vid);
580 int vhost_new_device(void);
581 void cleanup_device(struct virtio_net *dev, int destroy);
582 void reset_device(struct virtio_net *dev);
583 void vhost_destroy_device(int);
584 void vhost_destroy_device_notify(struct virtio_net *dev);
586 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
587 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
588 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
590 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
592 void vhost_attach_vdpa_device(int vid, int did);
594 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
595 void vhost_enable_dequeue_zero_copy(int vid);
596 void vhost_set_builtin_virtio_net(int vid, bool enable);
597 void vhost_enable_extbuf(int vid);
598 void vhost_enable_linearbuf(int vid);
600 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
603 * Backend-specific cleanup.
605 * TODO: fix it; we have one backend now
607 void vhost_backend_cleanup(struct virtio_net *dev);
609 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
610 uint64_t iova, uint64_t *len, uint8_t perm);
611 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
612 struct vhost_virtqueue *vq,
613 uint64_t desc_addr, uint64_t desc_len);
614 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
615 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
617 static __rte_always_inline uint64_t
618 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
619 uint64_t iova, uint64_t *len, uint8_t perm)
621 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
622 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
624 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
627 #define vhost_avail_event(vr) \
628 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
629 #define vhost_used_event(vr) \
630 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
633 * The following is used with VIRTIO_RING_F_EVENT_IDX.
634 * Assuming a given event_idx value from the other size, if we have
635 * just incremented index from old to new_idx, should we trigger an
638 static __rte_always_inline int
639 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
641 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
644 static __rte_always_inline void
645 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
647 /* Flush used->idx update before we read avail->flags. */
650 /* Don't kick guest if we don't reach index specified by guest. */
651 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
652 uint16_t old = vq->signalled_used;
653 uint16_t new = vq->last_used_idx;
654 bool signalled_used_valid = vq->signalled_used_valid;
656 vq->signalled_used = new;
657 vq->signalled_used_valid = true;
659 VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
661 vhost_used_event(vq),
664 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
665 (vq->callfd >= 0)) ||
666 unlikely(!signalled_used_valid)) {
667 eventfd_write(vq->callfd, (eventfd_t) 1);
668 if (dev->notify_ops->guest_notified)
669 dev->notify_ops->guest_notified(dev->vid);
672 /* Kick the guest if necessary. */
673 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
674 && (vq->callfd >= 0)) {
675 eventfd_write(vq->callfd, (eventfd_t)1);
676 if (dev->notify_ops->guest_notified)
677 dev->notify_ops->guest_notified(dev->vid);
682 static __rte_always_inline void
683 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
685 uint16_t old, new, off, off_wrap;
686 bool signalled_used_valid, kick = false;
688 /* Flush used desc update. */
691 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
692 if (vq->driver_event->flags !=
693 VRING_EVENT_F_DISABLE)
698 old = vq->signalled_used;
699 new = vq->last_used_idx;
700 vq->signalled_used = new;
701 signalled_used_valid = vq->signalled_used_valid;
702 vq->signalled_used_valid = true;
704 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
705 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
710 if (unlikely(!signalled_used_valid)) {
717 off_wrap = vq->driver_event->off_wrap;
718 off = off_wrap & ~(1 << 15);
723 if (vq->used_wrap_counter != off_wrap >> 15)
726 if (vhost_need_event(off, new, old))
730 eventfd_write(vq->callfd, (eventfd_t)1);
731 if (dev->notify_ops->guest_notified)
732 dev->notify_ops->guest_notified(dev->vid);
736 static __rte_always_inline void
737 free_ind_table(void *idesc)
742 static __rte_always_inline void
743 restore_mbuf(struct rte_mbuf *m)
745 uint32_t mbuf_size, priv_size;
748 priv_size = rte_pktmbuf_priv_size(m->pool);
749 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
750 /* start of buffer is after mbuf structure and priv data */
752 m->buf_addr = (char *)m + mbuf_size;
753 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
758 static __rte_always_inline bool
759 mbuf_is_consumed(struct rte_mbuf *m)
762 if (rte_mbuf_refcnt_read(m) > 1)
770 static __rte_always_inline void
771 put_zmbuf(struct zcopy_mbuf *zmbuf)
776 #endif /* _VHOST_NET_CDEV_H_ */