1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
22 #include "rte_vhost.h"
25 /* Used to indicate that the device is running on a data core */
26 #define VIRTIO_DEV_RUNNING 1
27 /* Used to indicate that the device is ready to operate */
28 #define VIRTIO_DEV_READY 2
29 /* Used to indicate that the built-in vhost net device backend is enabled */
30 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
32 /* Backend value set by guest. */
33 #define VIRTIO_DEV_STOPPED -1
35 #define BUF_VECTOR_MAX 256
38 * Structure contains buffer address, length and descriptor index
39 * from vring to do scatter RX.
48 * A structure to hold some fields needed in zero copy code path,
49 * mainly for associating an mbuf with the right desc_idx.
52 struct rte_mbuf *mbuf;
56 TAILQ_ENTRY(zcopy_mbuf) next;
58 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
61 * Structure contains the info for each batched memory copy.
63 struct batch_copy_elem {
71 * Structure contains variables relevant to RX/TX virtqueues.
73 struct vhost_virtqueue {
74 struct vring_desc *desc;
75 struct vring_avail *avail;
76 struct vring_used *used;
79 uint16_t last_avail_idx;
80 uint16_t last_used_idx;
81 /* Last used index we notify to front end. */
82 uint16_t signalled_used;
83 #define VIRTIO_INVALID_EVENTFD (-1)
84 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
86 /* Backend value to determine if device should started/stopped */
90 rte_spinlock_t access_lock;
92 /* Used to notify the guest (trigger interrupt) */
94 /* Currently unused as polling mode is enabled */
97 /* Physical address of used ring, for logging */
98 uint64_t log_guest_addr;
102 uint16_t last_zmbuf_idx;
103 struct zcopy_mbuf *zmbufs;
104 struct zcopy_mbuf_list zmbuf_list;
106 struct vring_used_elem *shadow_used_ring;
107 uint16_t shadow_used_idx;
108 struct vhost_vring_addr ring_addrs;
110 struct batch_copy_elem *batch_copy_elems;
111 uint16_t batch_copy_nb_elems;
113 rte_rwlock_t iotlb_lock;
114 rte_rwlock_t iotlb_pending_lock;
115 struct rte_mempool *iotlb_pool;
116 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
118 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
119 } __rte_cache_aligned;
121 /* Old kernels have no such macros defined */
122 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
123 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
126 #ifndef VIRTIO_NET_F_MQ
127 #define VIRTIO_NET_F_MQ 22
130 #define VHOST_MAX_VRING 0x100
131 #define VHOST_MAX_QUEUE_PAIRS 0x80
133 #ifndef VIRTIO_NET_F_MTU
134 #define VIRTIO_NET_F_MTU 3
137 #ifndef VIRTIO_F_ANY_LAYOUT
138 #define VIRTIO_F_ANY_LAYOUT 27
141 /* Declare IOMMU related bits for older kernels */
142 #ifndef VIRTIO_F_IOMMU_PLATFORM
144 #define VIRTIO_F_IOMMU_PLATFORM 33
146 struct vhost_iotlb_msg {
150 #define VHOST_ACCESS_RO 0x1
151 #define VHOST_ACCESS_WO 0x2
152 #define VHOST_ACCESS_RW 0x3
154 #define VHOST_IOTLB_MISS 1
155 #define VHOST_IOTLB_UPDATE 2
156 #define VHOST_IOTLB_INVALIDATE 3
157 #define VHOST_IOTLB_ACCESS_FAIL 4
161 #define VHOST_IOTLB_MSG 0x1
166 struct vhost_iotlb_msg iotlb;
173 * Define virtio 1.0 for older kernels
175 #ifndef VIRTIO_F_VERSION_1
176 #define VIRTIO_F_VERSION_1 32
179 /* Features supported by this builtin vhost-user net driver. */
180 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
181 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
182 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
183 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
184 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
185 (1ULL << VIRTIO_NET_F_MQ) | \
186 (1ULL << VIRTIO_F_VERSION_1) | \
187 (1ULL << VHOST_F_LOG_ALL) | \
188 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
189 (1ULL << VIRTIO_NET_F_GSO) | \
190 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
191 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
192 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
193 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
194 (1ULL << VIRTIO_NET_F_CSUM) | \
195 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
196 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
197 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
198 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
199 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
200 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
201 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
202 (1ULL << VIRTIO_NET_F_MTU) | \
203 (1ULL << VIRTIO_F_IOMMU_PLATFORM))
207 uint64_t guest_phys_addr;
208 uint64_t host_phys_addr;
213 * Device structure contains all configuration information relating
217 /* Frontend (QEMU) memory and memory region information */
218 struct rte_vhost_memory *mem;
220 uint64_t protocol_features;
224 /* to tell if we need broadcast rarp packet */
225 rte_atomic16_t broadcast_rarp;
227 int dequeue_zero_copy;
228 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
229 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
230 char ifname[IF_NAME_SZ];
234 struct ether_addr mac;
237 struct vhost_device_ops const *notify_ops;
239 uint32_t nr_guest_pages;
240 uint32_t max_guest_pages;
241 struct guest_page *guest_pages;
246 * Device id to identify a specific backend device.
247 * It's set to -1 for the default software implementation.
250 } __rte_cache_aligned;
253 #define VHOST_LOG_PAGE 4096
256 * Mark all pages belonging to the same dirty log bitmap byte
257 * as dirty. The goal is to avoid concurrency between different
258 * threads doing atomic read-modify-writes on the same byte.
260 static __rte_always_inline void
261 vhost_log_page(uint8_t *log_base, uint64_t page)
263 log_base[page / 8] = 0xff;
266 static __rte_always_inline void
267 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
271 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
272 !dev->log_base || !len))
275 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
278 /* To make sure guest memory updates are committed before logging */
281 page = addr / VHOST_LOG_PAGE;
282 while (page * VHOST_LOG_PAGE < addr + len) {
283 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
288 static __rte_always_inline void
289 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
290 uint64_t offset, uint64_t len)
292 vhost_log_write(dev, vq->log_guest_addr + offset, len);
295 /* Macros for printing using RTE_LOG */
296 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
297 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
299 #ifdef RTE_LIBRTE_VHOST_DEBUG
300 #define VHOST_MAX_PRINT_BUFF 6072
301 #define VHOST_LOG_DEBUG(log_type, fmt, args...) \
302 RTE_LOG(DEBUG, log_type, fmt, ##args)
303 #define PRINT_PACKET(device, addr, size, header) do { \
304 char *pkt_addr = (char *)(addr); \
305 unsigned int index; \
306 char packet[VHOST_MAX_PRINT_BUFF]; \
309 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
311 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
312 for (index = 0; index < (size); index++) { \
313 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
314 "%02hhx ", pkt_addr[index]); \
316 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
318 VHOST_LOG_DEBUG(VHOST_DATA, "%s", packet); \
321 #define VHOST_LOG_DEBUG(log_type, fmt, args...) do {} while (0)
322 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
325 extern uint64_t VHOST_FEATURES;
326 #define MAX_VHOST_DEVICE 1024
327 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
329 /* Convert guest physical address to host physical address */
330 static __rte_always_inline rte_iova_t
331 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
334 struct guest_page *page;
336 for (i = 0; i < dev->nr_guest_pages; i++) {
337 page = &dev->guest_pages[i];
339 if (gpa >= page->guest_phys_addr &&
340 gpa + size < page->guest_phys_addr + page->size) {
341 return gpa - page->guest_phys_addr +
342 page->host_phys_addr;
349 static __rte_always_inline struct virtio_net *
352 struct virtio_net *dev = vhost_devices[vid];
354 if (unlikely(!dev)) {
355 RTE_LOG(ERR, VHOST_CONFIG,
356 "(%d) device not found.\n", vid);
362 int vhost_new_device(void);
363 void cleanup_device(struct virtio_net *dev, int destroy);
364 void reset_device(struct virtio_net *dev);
365 void vhost_destroy_device(int);
367 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
368 void free_vq(struct vhost_virtqueue *vq);
370 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
372 void vhost_attach_vdpa_device(int vid, int did);
373 void vhost_detach_vdpa_device(int vid);
375 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
376 void vhost_enable_dequeue_zero_copy(int vid);
377 void vhost_set_builtin_virtio_net(int vid, bool enable);
379 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
382 * Backend-specific cleanup.
384 * TODO: fix it; we have one backend now
386 void vhost_backend_cleanup(struct virtio_net *dev);
388 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
389 uint64_t iova, uint64_t size, uint8_t perm);
390 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
391 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
393 static __rte_always_inline uint64_t
394 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
395 uint64_t iova, uint64_t size, uint8_t perm)
397 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
398 return rte_vhost_gpa_to_vva(dev->mem, iova);
400 return __vhost_iova_to_vva(dev, vq, iova, size, perm);
403 #define vhost_used_event(vr) \
404 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
407 * The following is used with VIRTIO_RING_F_EVENT_IDX.
408 * Assuming a given event_idx value from the other size, if we have
409 * just incremented index from old to new_idx, should we trigger an
412 static __rte_always_inline int
413 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
415 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
418 static __rte_always_inline void
419 vhost_vring_call(struct virtio_net *dev, struct vhost_virtqueue *vq)
421 /* Flush used->idx update before we read avail->flags. */
424 /* Don't kick guest if we don't reach index specified by guest. */
425 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
426 uint16_t old = vq->signalled_used;
427 uint16_t new = vq->last_used_idx;
429 VHOST_LOG_DEBUG(VHOST_DATA, "%s: used_event_idx=%d, old=%d, new=%d\n",
431 vhost_used_event(vq),
433 if (vhost_need_event(vhost_used_event(vq), new, old)
434 && (vq->callfd >= 0)) {
435 vq->signalled_used = vq->last_used_idx;
436 eventfd_write(vq->callfd, (eventfd_t) 1);
439 /* Kick the guest if necessary. */
440 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
441 && (vq->callfd >= 0))
442 eventfd_write(vq->callfd, (eventfd_t)1);
446 #endif /* _VHOST_NET_CDEV_H_ */