1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
23 #include "rte_vhost.h"
26 /* Used to indicate that the device is running on a data core */
27 #define VIRTIO_DEV_RUNNING 1
28 /* Used to indicate that the device is ready to operate */
29 #define VIRTIO_DEV_READY 2
30 /* Used to indicate that the built-in vhost net device backend is enabled */
31 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
32 /* Used to indicate that the device has its own data path and configured */
33 #define VIRTIO_DEV_VDPA_CONFIGURED 8
35 /* Backend value set by guest. */
36 #define VIRTIO_DEV_STOPPED -1
38 #define BUF_VECTOR_MAX 256
40 #define VHOST_LOG_CACHE_NR 32
43 * Structure contains buffer address, length and descriptor index
44 * from vring to do scatter RX.
54 * A structure to hold some fields needed in zero copy code path,
55 * mainly for associating an mbuf with the right desc_idx.
58 struct rte_mbuf *mbuf;
63 TAILQ_ENTRY(zcopy_mbuf) next;
65 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
68 * Structure contains the info for each batched memory copy.
70 struct batch_copy_elem {
78 * Structure that contains the info for batched dirty logging.
80 struct log_cache_entry {
85 struct vring_used_elem_packed {
92 * Structure contains variables relevant to RX/TX virtqueues.
94 struct vhost_virtqueue {
96 struct vring_desc *desc;
97 struct vring_packed_desc *desc_packed;
100 struct vring_avail *avail;
101 struct vring_packed_desc_event *driver_event;
104 struct vring_used *used;
105 struct vring_packed_desc_event *device_event;
109 uint16_t last_avail_idx;
110 uint16_t last_used_idx;
111 /* Last used index we notify to front end. */
112 uint16_t signalled_used;
113 bool signalled_used_valid;
114 #define VIRTIO_INVALID_EVENTFD (-1)
115 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
117 /* Backend value to determine if device should started/stopped */
121 rte_spinlock_t access_lock;
123 /* Used to notify the guest (trigger interrupt) */
125 /* Currently unused as polling mode is enabled */
128 /* Physical address of used ring, for logging */
129 uint64_t log_guest_addr;
133 uint16_t last_zmbuf_idx;
134 struct zcopy_mbuf *zmbufs;
135 struct zcopy_mbuf_list zmbuf_list;
138 struct vring_used_elem *shadow_used_split;
139 struct vring_used_elem_packed *shadow_used_packed;
141 uint16_t shadow_used_idx;
142 struct vhost_vring_addr ring_addrs;
144 struct batch_copy_elem *batch_copy_elems;
145 uint16_t batch_copy_nb_elems;
146 bool used_wrap_counter;
147 bool avail_wrap_counter;
149 struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
150 uint16_t log_cache_nb_elem;
152 rte_rwlock_t iotlb_lock;
153 rte_rwlock_t iotlb_pending_lock;
154 struct rte_mempool *iotlb_pool;
155 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
157 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
158 } __rte_cache_aligned;
160 /* Old kernels have no such macros defined */
161 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
162 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
165 #ifndef VIRTIO_NET_F_MQ
166 #define VIRTIO_NET_F_MQ 22
169 #define VHOST_MAX_VRING 0x100
170 #define VHOST_MAX_QUEUE_PAIRS 0x80
172 #ifndef VIRTIO_NET_F_MTU
173 #define VIRTIO_NET_F_MTU 3
176 #ifndef VIRTIO_F_ANY_LAYOUT
177 #define VIRTIO_F_ANY_LAYOUT 27
180 /* Declare IOMMU related bits for older kernels */
181 #ifndef VIRTIO_F_IOMMU_PLATFORM
183 #define VIRTIO_F_IOMMU_PLATFORM 33
185 struct vhost_iotlb_msg {
189 #define VHOST_ACCESS_RO 0x1
190 #define VHOST_ACCESS_WO 0x2
191 #define VHOST_ACCESS_RW 0x3
193 #define VHOST_IOTLB_MISS 1
194 #define VHOST_IOTLB_UPDATE 2
195 #define VHOST_IOTLB_INVALIDATE 3
196 #define VHOST_IOTLB_ACCESS_FAIL 4
200 #define VHOST_IOTLB_MSG 0x1
205 struct vhost_iotlb_msg iotlb;
212 * Define virtio 1.0 for older kernels
214 #ifndef VIRTIO_F_VERSION_1
215 #define VIRTIO_F_VERSION_1 32
218 /* Declare packed ring related bits for older kernels */
219 #ifndef VIRTIO_F_RING_PACKED
221 #define VIRTIO_F_RING_PACKED 34
223 struct vring_packed_desc {
230 struct vring_packed_desc_event {
237 * Declare below packed ring defines unconditionally
238 * as Kernel header might use different names.
240 #define VRING_DESC_F_AVAIL (1ULL << 7)
241 #define VRING_DESC_F_USED (1ULL << 15)
243 #define VRING_EVENT_F_ENABLE 0x0
244 #define VRING_EVENT_F_DISABLE 0x1
245 #define VRING_EVENT_F_DESC 0x2
248 * Available and used descs are in same order
250 #ifndef VIRTIO_F_IN_ORDER
251 #define VIRTIO_F_IN_ORDER 35
254 /* Features supported by this builtin vhost-user net driver. */
255 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
256 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
257 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
258 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
259 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
260 (1ULL << VIRTIO_NET_F_MQ) | \
261 (1ULL << VIRTIO_F_VERSION_1) | \
262 (1ULL << VHOST_F_LOG_ALL) | \
263 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
264 (1ULL << VIRTIO_NET_F_GSO) | \
265 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
266 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
267 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
268 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
269 (1ULL << VIRTIO_NET_F_CSUM) | \
270 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
271 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
272 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
273 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
274 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
275 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
276 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
277 (1ULL << VIRTIO_NET_F_MTU) | \
278 (1ULL << VIRTIO_F_IN_ORDER) | \
279 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
280 (1ULL << VIRTIO_F_RING_PACKED))
284 uint64_t guest_phys_addr;
285 uint64_t host_phys_addr;
290 * Device structure contains all configuration information relating
294 /* Frontend (QEMU) memory and memory region information */
295 struct rte_vhost_memory *mem;
297 uint64_t protocol_features;
301 /* to tell if we need broadcast rarp packet */
302 rte_atomic16_t broadcast_rarp;
304 int dequeue_zero_copy;
305 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
306 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
307 char ifname[IF_NAME_SZ];
311 struct rte_ether_addr mac;
314 struct vhost_device_ops const *notify_ops;
316 uint32_t nr_guest_pages;
317 uint32_t max_guest_pages;
318 struct guest_page *guest_pages;
321 rte_spinlock_t slave_req_lock;
324 int postcopy_listening;
327 * Device id to identify a specific backend device.
328 * It's set to -1 for the default software implementation.
332 /* context data for the external message handlers */
334 /* pre and post vhost user message handlers for the device */
335 struct rte_vhost_user_extern_ops extern_ops;
336 } __rte_cache_aligned;
338 static __rte_always_inline bool
339 vq_is_packed(struct virtio_net *dev)
341 return dev->features & (1ull << VIRTIO_F_RING_PACKED);
345 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
347 uint16_t flags = *((volatile uint16_t *) &desc->flags);
349 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
350 wrap_counter != !!(flags & VRING_DESC_F_USED);
353 void __vhost_log_cache_write(struct virtio_net *dev,
354 struct vhost_virtqueue *vq,
355 uint64_t addr, uint64_t len);
356 void __vhost_log_cache_sync(struct virtio_net *dev,
357 struct vhost_virtqueue *vq);
358 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
360 static __rte_always_inline void
361 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
363 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
364 __vhost_log_write(dev, addr, len);
367 static __rte_always_inline void
368 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
370 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
371 __vhost_log_cache_sync(dev, vq);
374 static __rte_always_inline void
375 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
376 uint64_t addr, uint64_t len)
378 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
379 __vhost_log_cache_write(dev, vq, addr, len);
382 static __rte_always_inline void
383 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
384 uint64_t offset, uint64_t len)
386 vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
389 static __rte_always_inline void
390 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
391 uint64_t offset, uint64_t len)
393 vhost_log_write(dev, vq->log_guest_addr + offset, len);
396 /* Macros for printing using RTE_LOG */
397 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
398 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
400 #ifdef RTE_LIBRTE_VHOST_DEBUG
401 #define VHOST_MAX_PRINT_BUFF 6072
402 #define VHOST_LOG_DEBUG(log_type, fmt, args...) \
403 RTE_LOG(DEBUG, log_type, fmt, ##args)
404 #define PRINT_PACKET(device, addr, size, header) do { \
405 char *pkt_addr = (char *)(addr); \
406 unsigned int index; \
407 char packet[VHOST_MAX_PRINT_BUFF]; \
410 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
412 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
413 for (index = 0; index < (size); index++) { \
414 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
415 "%02hhx ", pkt_addr[index]); \
417 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
419 VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
422 #define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
423 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
426 extern uint64_t VHOST_FEATURES;
427 #define MAX_VHOST_DEVICE 1024
428 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
430 /* Convert guest physical address to host physical address */
431 static __rte_always_inline rte_iova_t
432 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
435 struct guest_page *page;
437 for (i = 0; i < dev->nr_guest_pages; i++) {
438 page = &dev->guest_pages[i];
440 if (gpa >= page->guest_phys_addr &&
441 gpa + size < page->guest_phys_addr + page->size) {
442 return gpa - page->guest_phys_addr +
443 page->host_phys_addr;
450 static __rte_always_inline struct virtio_net *
453 struct virtio_net *dev = vhost_devices[vid];
455 if (unlikely(!dev)) {
456 RTE_LOG(ERR, VHOST_CONFIG,
457 "(%d) device not found.\n", vid);
463 int vhost_new_device(void);
464 void cleanup_device(struct virtio_net *dev, int destroy);
465 void reset_device(struct virtio_net *dev);
466 void vhost_destroy_device(int);
467 void vhost_destroy_device_notify(struct virtio_net *dev);
469 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
470 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
472 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
474 void vhost_attach_vdpa_device(int vid, int did);
476 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
477 void vhost_enable_dequeue_zero_copy(int vid);
478 void vhost_set_builtin_virtio_net(int vid, bool enable);
480 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
483 * Backend-specific cleanup.
485 * TODO: fix it; we have one backend now
487 void vhost_backend_cleanup(struct virtio_net *dev);
489 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
490 uint64_t iova, uint64_t *len, uint8_t perm);
491 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
492 struct vhost_virtqueue *vq,
493 uint64_t desc_addr, uint64_t desc_len);
494 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
495 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
497 static __rte_always_inline uint64_t
498 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
499 uint64_t iova, uint64_t *len, uint8_t perm)
501 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
502 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
504 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
507 #define vhost_avail_event(vr) \
508 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
509 #define vhost_used_event(vr) \
510 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
513 * The following is used with VIRTIO_RING_F_EVENT_IDX.
514 * Assuming a given event_idx value from the other size, if we have
515 * just incremented index from old to new_idx, should we trigger an
518 static __rte_always_inline int
519 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
521 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
524 static __rte_always_inline void
525 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
527 /* Flush used->idx update before we read avail->flags. */
530 /* Don't kick guest if we don't reach index specified by guest. */
531 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
532 uint16_t old = vq->signalled_used;
533 uint16_t new = vq->last_used_idx;
534 bool signalled_used_valid = vq->signalled_used_valid;
536 vq->signalled_used = new;
537 vq->signalled_used_valid = true;
539 VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
541 vhost_used_event(vq),
544 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
545 (vq->callfd >= 0)) ||
546 unlikely(!signalled_used_valid))
547 eventfd_write(vq->callfd, (eventfd_t) 1);
549 /* Kick the guest if necessary. */
550 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
551 && (vq->callfd >= 0))
552 eventfd_write(vq->callfd, (eventfd_t)1);
556 static __rte_always_inline void
557 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
559 uint16_t old, new, off, off_wrap;
560 bool signalled_used_valid, kick = false;
562 /* Flush used desc update. */
565 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
566 if (vq->driver_event->flags !=
567 VRING_EVENT_F_DISABLE)
572 old = vq->signalled_used;
573 new = vq->last_used_idx;
574 vq->signalled_used = new;
575 signalled_used_valid = vq->signalled_used_valid;
576 vq->signalled_used_valid = true;
578 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
579 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
584 if (unlikely(!signalled_used_valid)) {
591 off_wrap = vq->driver_event->off_wrap;
592 off = off_wrap & ~(1 << 15);
597 if (vq->used_wrap_counter != off_wrap >> 15)
600 if (vhost_need_event(off, new, old))
604 eventfd_write(vq->callfd, (eventfd_t)1);
607 static __rte_always_inline void
608 free_ind_table(void *idesc)
613 static __rte_always_inline void
614 restore_mbuf(struct rte_mbuf *m)
616 uint32_t mbuf_size, priv_size;
619 priv_size = rte_pktmbuf_priv_size(m->pool);
620 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
621 /* start of buffer is after mbuf structure and priv data */
623 m->buf_addr = (char *)m + mbuf_size;
624 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
629 static __rte_always_inline bool
630 mbuf_is_consumed(struct rte_mbuf *m)
633 if (rte_mbuf_refcnt_read(m) > 1)
641 static __rte_always_inline void
642 put_zmbuf(struct zcopy_mbuf *zmbuf)
647 #endif /* _VHOST_NET_CDEV_H_ */