1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_malloc.h>
21 #include <rte_dmadev.h>
23 #include "rte_vhost.h"
24 #include "vdpa_driver.h"
26 #include "rte_vhost_async.h"
28 /* Used to indicate that the device is running on a data core */
29 #define VIRTIO_DEV_RUNNING ((uint32_t)1 << 0)
30 /* Used to indicate that the device is ready to operate */
31 #define VIRTIO_DEV_READY ((uint32_t)1 << 1)
32 /* Used to indicate that the built-in vhost net device backend is enabled */
33 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET ((uint32_t)1 << 2)
34 /* Used to indicate that the device has its own data path and configured */
35 #define VIRTIO_DEV_VDPA_CONFIGURED ((uint32_t)1 << 3)
36 /* Used to indicate that the feature negotiation failed */
37 #define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4)
38 /* Used to indicate that the virtio_net tx code should fill TX ol_flags */
39 #define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
40 /* Used to indicate the application has requested statistics collection */
41 #define VIRTIO_DEV_STATS_ENABLED ((uint32_t)1 << 6)
43 /* Backend value set by guest. */
44 #define VIRTIO_DEV_STOPPED -1
46 #define BUF_VECTOR_MAX 256
48 #define VHOST_LOG_CACHE_NR 32
50 #define MAX_PKT_BURST 32
52 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST)
53 #define VHOST_MAX_ASYNC_VEC 2048
54 #define VIRTIO_MAX_RX_PKTLEN 9728U
55 #define VHOST_DMA_MAX_COPY_COMPLETE ((VIRTIO_MAX_RX_PKTLEN / RTE_MBUF_DEFAULT_DATAROOM) \
58 #define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
59 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
61 #define PACKED_DESC_DEQUEUE_USED_FLAG(w) \
62 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
63 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
64 VRING_DESC_F_INDIRECT)
66 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
67 sizeof(struct vring_packed_desc))
68 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
70 #ifdef VHOST_GCC_UNROLL_PRAGMA
71 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
72 for (iter = val; iter < size; iter++)
75 #ifdef VHOST_CLANG_UNROLL_PRAGMA
76 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
77 for (iter = val; iter < size; iter++)
80 #ifdef VHOST_ICC_UNROLL_PRAGMA
81 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
82 for (iter = val; iter < size; iter++)
85 #ifndef vhost_for_each_try_unroll
86 #define vhost_for_each_try_unroll(iter, val, num) \
87 for (iter = val; iter < num; iter++)
91 * Structure contains buffer address, length and descriptor index
92 * from vring to do scatter RX.
102 * Structure contains the info for each batched memory copy.
104 struct batch_copy_elem {
112 * Structure that contains the info for batched dirty logging.
114 struct log_cache_entry {
119 struct vring_used_elem_packed {
127 * Virtqueue statistics
129 struct virtqueue_stats {
134 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
135 uint64_t size_bins[8];
136 uint64_t guest_notifications;
138 uint64_t iotlb_misses;
139 uint64_t inflight_submitted;
140 uint64_t inflight_completed;
155 struct vhost_iov_iter {
156 /** pointer to the iovec array */
157 struct vhost_iovec *iov;
158 /** number of iovec in this iterator */
159 unsigned long nr_segs;
162 struct async_dma_vchan_info {
163 /* circular array to track if packet copy completes */
164 bool **pkts_cmpl_flag_addr;
166 /* max elements in 'pkts_cmpl_flag_addr' */
168 /* ring index mask for 'pkts_cmpl_flag_addr' */
172 * DMA virtual channel lock. Although it is able to bind DMA
173 * virtual channels to data plane threads, vhost control plane
174 * thread could call data plane functions too, thus causing
175 * DMA device contention.
177 * For example, in VM exit case, vhost control plane thread needs
178 * to clear in-flight packets before disable vring, but there could
179 * be anotther data plane thread is enqueuing packets to the same
180 * vring with the same DMA virtual channel. As dmadev PMD functions
181 * are lock-free, the control plane and data plane threads could
182 * operate the same DMA virtual channel at the same time.
184 rte_spinlock_t dma_lock;
187 struct async_dma_info {
188 struct async_dma_vchan_info *vchans;
189 /* number of registered virtual channels */
193 extern struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
196 * inflight async packet information
198 struct async_inflight_info {
199 struct rte_mbuf *mbuf;
200 uint16_t descs; /* num of descs inflight */
201 uint16_t nr_buffers; /* num of buffers inflight for packed ring */
202 struct virtio_net_hdr nethdr;
206 struct vhost_iov_iter iov_iter[VHOST_MAX_ASYNC_IT];
207 struct vhost_iovec iovec[VHOST_MAX_ASYNC_VEC];
211 /* data transfer status */
212 struct async_inflight_info *pkts_info;
214 * Packet reorder array. "true" indicates that DMA device
215 * completes all copies for the packet.
217 * Note that this array could be written by multiple threads
218 * simultaneously. For example, in the case of thread0 and
219 * thread1 RX packets from NIC and then enqueue packets to
220 * vring0 and vring1 with own DMA device DMA0 and DMA1, it's
221 * possible for thread0 to get completed copies belonging to
222 * vring1 from DMA0, while thread0 is calling rte_vhost_poll
223 * _enqueue_completed() for vring0 and thread1 is calling
224 * rte_vhost_submit_enqueue_burst() for vring1. In this case,
225 * vq->access_lock cannot protect pkts_cmpl_flag of vring1.
227 * However, since offloading is per-packet basis, each packet
228 * flag will only be written by one thread. And single byte
229 * write is atomic, so no lock for pkts_cmpl_flag is needed.
231 bool *pkts_cmpl_flag;
233 uint16_t pkts_inflight_n;
235 struct vring_used_elem *descs_split;
236 struct vring_used_elem_packed *buffers_packed;
239 uint16_t desc_idx_split;
240 uint16_t buffer_idx_packed;
243 uint16_t last_desc_idx_split;
244 uint16_t last_buffer_idx_packed;
249 * Structure contains variables relevant to RX/TX virtqueues.
251 struct vhost_virtqueue {
253 struct vring_desc *desc;
254 struct vring_packed_desc *desc_packed;
257 struct vring_avail *avail;
258 struct vring_packed_desc_event *driver_event;
261 struct vring_used *used;
262 struct vring_packed_desc_event *device_event;
266 uint16_t last_avail_idx;
267 uint16_t last_used_idx;
268 /* Last used index we notify to front end. */
269 uint16_t signalled_used;
270 bool signalled_used_valid;
271 #define VIRTIO_INVALID_EVENTFD (-1)
272 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
278 rte_spinlock_t access_lock;
282 struct vring_used_elem *shadow_used_split;
283 struct vring_used_elem_packed *shadow_used_packed;
285 uint16_t shadow_used_idx;
286 /* Record packed ring enqueue latest desc cache aligned index */
287 uint16_t shadow_aligned_idx;
288 /* Record packed ring first dequeue desc index */
289 uint16_t shadow_last_used_idx;
291 uint16_t batch_copy_nb_elems;
292 struct batch_copy_elem *batch_copy_elems;
294 bool used_wrap_counter;
295 bool avail_wrap_counter;
297 /* Physical address of used ring, for logging */
298 uint16_t log_cache_nb_elem;
299 uint64_t log_guest_addr;
300 struct log_cache_entry *log_cache;
302 rte_rwlock_t iotlb_lock;
303 rte_rwlock_t iotlb_pending_lock;
304 struct rte_mempool *iotlb_pool;
305 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
306 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
309 /* Used to notify the guest (trigger interrupt) */
311 /* Currently unused as polling mode is enabled */
314 /* inflight share memory info */
316 struct rte_vhost_inflight_info_split *inflight_split;
317 struct rte_vhost_inflight_info_packed *inflight_packed;
319 struct rte_vhost_resubmit_info *resubmit_inflight;
320 uint64_t global_counter;
322 struct vhost_async *async;
325 #define VIRTIO_UNINITIALIZED_NOTIF (-1)
327 struct vhost_vring_addr ring_addrs;
328 struct virtqueue_stats stats;
329 } __rte_cache_aligned;
331 /* Virtio device status as per Virtio specification */
332 #define VIRTIO_DEVICE_STATUS_RESET 0x00
333 #define VIRTIO_DEVICE_STATUS_ACK 0x01
334 #define VIRTIO_DEVICE_STATUS_DRIVER 0x02
335 #define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04
336 #define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08
337 #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40
338 #define VIRTIO_DEVICE_STATUS_FAILED 0x80
340 #define VHOST_MAX_VRING 0x100
341 #define VHOST_MAX_QUEUE_PAIRS 0x80
343 /* Declare IOMMU related bits for older kernels */
344 #ifndef VIRTIO_F_IOMMU_PLATFORM
346 #define VIRTIO_F_IOMMU_PLATFORM 33
348 struct vhost_iotlb_msg {
352 #define VHOST_ACCESS_RO 0x1
353 #define VHOST_ACCESS_WO 0x2
354 #define VHOST_ACCESS_RW 0x3
356 #define VHOST_IOTLB_MISS 1
357 #define VHOST_IOTLB_UPDATE 2
358 #define VHOST_IOTLB_INVALIDATE 3
359 #define VHOST_IOTLB_ACCESS_FAIL 4
363 #define VHOST_IOTLB_MSG 0x1
368 struct vhost_iotlb_msg iotlb;
375 * Define virtio 1.0 for older kernels
377 #ifndef VIRTIO_F_VERSION_1
378 #define VIRTIO_F_VERSION_1 32
381 /* Declare packed ring related bits for older kernels */
382 #ifndef VIRTIO_F_RING_PACKED
384 #define VIRTIO_F_RING_PACKED 34
386 struct vring_packed_desc {
393 struct vring_packed_desc_event {
400 * Declare below packed ring defines unconditionally
401 * as Kernel header might use different names.
403 #define VRING_DESC_F_AVAIL (1ULL << 7)
404 #define VRING_DESC_F_USED (1ULL << 15)
406 #define VRING_EVENT_F_ENABLE 0x0
407 #define VRING_EVENT_F_DISABLE 0x1
408 #define VRING_EVENT_F_DESC 0x2
411 * Available and used descs are in same order
413 #ifndef VIRTIO_F_IN_ORDER
414 #define VIRTIO_F_IN_ORDER 35
417 /* Features supported by this builtin vhost-user net driver. */
418 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
419 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
420 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
421 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
422 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
423 (1ULL << VIRTIO_NET_F_MQ) | \
424 (1ULL << VIRTIO_F_VERSION_1) | \
425 (1ULL << VHOST_F_LOG_ALL) | \
426 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
427 (1ULL << VIRTIO_NET_F_GSO) | \
428 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
429 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
430 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
431 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
432 (1ULL << VIRTIO_NET_F_CSUM) | \
433 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
434 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
435 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
436 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
437 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
438 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
439 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
440 (1ULL << VIRTIO_NET_F_MTU) | \
441 (1ULL << VIRTIO_F_IN_ORDER) | \
442 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
443 (1ULL << VIRTIO_F_RING_PACKED))
447 uint64_t guest_phys_addr;
449 uint64_t host_user_addr;
453 struct inflight_mem_info {
460 * Device structure contains all configuration information relating
464 /* Frontend (QEMU) memory and memory region information */
465 struct rte_vhost_memory *mem;
467 uint64_t protocol_features;
471 /* to tell if we need broadcast rarp packet */
472 int16_t broadcast_rarp;
478 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
479 struct inflight_mem_info *inflight_info;
480 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
481 char ifname[IF_NAME_SZ];
485 struct rte_ether_addr mac;
489 struct rte_vhost_device_ops const *notify_ops;
491 uint32_t nr_guest_pages;
492 uint32_t max_guest_pages;
493 struct guest_page *guest_pages;
496 rte_spinlock_t slave_req_lock;
499 int postcopy_listening;
501 struct rte_vdpa_device *vdpa_dev;
503 /* context data for the external message handlers */
505 /* pre and post vhost user message handlers for the device */
506 struct rte_vhost_user_extern_ops extern_ops;
507 } __rte_cache_aligned;
509 static __rte_always_inline bool
510 vq_is_packed(struct virtio_net *dev)
512 return dev->features & (1ull << VIRTIO_F_RING_PACKED);
516 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
518 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
520 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
521 wrap_counter != !!(flags & VRING_DESC_F_USED);
525 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
527 vq->last_used_idx += num;
528 if (vq->last_used_idx >= vq->size) {
529 vq->used_wrap_counter ^= 1;
530 vq->last_used_idx -= vq->size;
535 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
537 vq->last_avail_idx += num;
538 if (vq->last_avail_idx >= vq->size) {
539 vq->avail_wrap_counter ^= 1;
540 vq->last_avail_idx -= vq->size;
544 void __vhost_log_cache_write(struct virtio_net *dev,
545 struct vhost_virtqueue *vq,
546 uint64_t addr, uint64_t len);
547 void __vhost_log_cache_write_iova(struct virtio_net *dev,
548 struct vhost_virtqueue *vq,
549 uint64_t iova, uint64_t len);
550 void __vhost_log_cache_sync(struct virtio_net *dev,
551 struct vhost_virtqueue *vq);
552 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
553 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
554 uint64_t iova, uint64_t len);
556 static __rte_always_inline void
557 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
559 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
560 __vhost_log_write(dev, addr, len);
563 static __rte_always_inline void
564 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
566 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
567 __vhost_log_cache_sync(dev, vq);
570 static __rte_always_inline void
571 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
572 uint64_t addr, uint64_t len)
574 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
575 __vhost_log_cache_write(dev, vq, addr, len);
578 static __rte_always_inline void
579 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
580 uint64_t offset, uint64_t len)
582 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
583 if (unlikely(vq->log_guest_addr == 0))
585 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
590 static __rte_always_inline void
591 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
592 uint64_t offset, uint64_t len)
594 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
595 if (unlikely(vq->log_guest_addr == 0))
597 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
601 static __rte_always_inline void
602 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
603 uint64_t iova, uint64_t len)
605 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
608 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
609 __vhost_log_cache_write_iova(dev, vq, iova, len);
611 __vhost_log_cache_write(dev, vq, iova, len);
614 static __rte_always_inline void
615 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
616 uint64_t iova, uint64_t len)
618 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
621 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
622 __vhost_log_write_iova(dev, vq, iova, len);
624 __vhost_log_write(dev, iova, len);
627 extern int vhost_config_log_level;
628 extern int vhost_data_log_level;
630 #define VHOST_LOG_CONFIG(level, fmt, args...) \
631 rte_log(RTE_LOG_ ## level, vhost_config_log_level, \
632 "VHOST_CONFIG: " fmt, ##args)
634 #define VHOST_LOG_DATA(level, fmt, args...) \
635 (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \
636 rte_log(RTE_LOG_ ## level, vhost_data_log_level, \
637 "VHOST_DATA : " fmt, ##args) : \
640 #ifdef RTE_LIBRTE_VHOST_DEBUG
641 #define VHOST_MAX_PRINT_BUFF 6072
642 #define PRINT_PACKET(device, addr, size, header) do { \
643 char *pkt_addr = (char *)(addr); \
644 unsigned int index; \
645 char packet[VHOST_MAX_PRINT_BUFF]; \
648 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
650 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
651 for (index = 0; index < (size); index++) { \
652 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
653 "%02hhx ", pkt_addr[index]); \
655 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
657 VHOST_LOG_DATA(DEBUG, "%s", packet); \
660 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
663 extern struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
665 #define VHOST_BINARY_SEARCH_THRESH 256
667 static __rte_always_inline int guest_page_addrcmp(const void *p1,
670 const struct guest_page *page1 = (const struct guest_page *)p1;
671 const struct guest_page *page2 = (const struct guest_page *)p2;
673 if (page1->guest_phys_addr > page2->guest_phys_addr)
675 if (page1->guest_phys_addr < page2->guest_phys_addr)
681 static __rte_always_inline int guest_page_rangecmp(const void *p1, const void *p2)
683 const struct guest_page *page1 = (const struct guest_page *)p1;
684 const struct guest_page *page2 = (const struct guest_page *)p2;
686 if (page1->guest_phys_addr >= page2->guest_phys_addr) {
687 if (page1->guest_phys_addr < page2->guest_phys_addr + page2->size)
695 static __rte_always_inline rte_iova_t
696 gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
697 uint64_t gpa_size, uint64_t *hpa_size)
700 struct guest_page *page;
701 struct guest_page key;
703 *hpa_size = gpa_size;
704 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
705 key.guest_phys_addr = gpa;
706 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
707 sizeof(struct guest_page), guest_page_rangecmp);
709 if (gpa + gpa_size <=
710 page->guest_phys_addr + page->size) {
711 return gpa - page->guest_phys_addr +
713 } else if (gpa < page->guest_phys_addr +
715 *hpa_size = page->guest_phys_addr +
717 return gpa - page->guest_phys_addr +
722 for (i = 0; i < dev->nr_guest_pages; i++) {
723 page = &dev->guest_pages[i];
725 if (gpa >= page->guest_phys_addr) {
726 if (gpa + gpa_size <=
727 page->guest_phys_addr + page->size) {
728 return gpa - page->guest_phys_addr +
730 } else if (gpa < page->guest_phys_addr +
732 *hpa_size = page->guest_phys_addr +
734 return gpa - page->guest_phys_addr +
745 /* Convert guest physical address to host physical address */
746 static __rte_always_inline rte_iova_t
747 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
752 hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
753 return hpa_size == size ? hpa : 0;
756 static __rte_always_inline uint64_t
757 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
759 struct rte_vhost_mem_region *r;
762 if (unlikely(!dev || !dev->mem))
765 for (i = 0; i < dev->mem->nregions; i++) {
766 r = &dev->mem->regions[i];
768 if (vva >= r->host_user_addr &&
769 vva + len < r->host_user_addr + r->size) {
770 return r->guest_phys_addr + vva - r->host_user_addr;
776 static __rte_always_inline struct virtio_net *
779 struct virtio_net *dev = vhost_devices[vid];
781 if (unlikely(!dev)) {
782 VHOST_LOG_CONFIG(ERR,
783 "(%d) device not found.\n", vid);
789 int vhost_new_device(void);
790 void cleanup_device(struct virtio_net *dev, int destroy);
791 void reset_device(struct virtio_net *dev);
792 void vhost_destroy_device(int);
793 void vhost_destroy_device_notify(struct virtio_net *dev);
795 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
796 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
797 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
799 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
801 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
803 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
804 void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags, bool stats_enabled);
805 void vhost_enable_extbuf(int vid);
806 void vhost_enable_linearbuf(int vid);
807 int vhost_enable_guest_notification(struct virtio_net *dev,
808 struct vhost_virtqueue *vq, int enable);
810 struct rte_vhost_device_ops const *vhost_driver_callback_get(const char *path);
813 * Backend-specific cleanup.
815 * TODO: fix it; we have one backend now
817 void vhost_backend_cleanup(struct virtio_net *dev);
819 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
820 uint64_t iova, uint64_t *len, uint8_t perm);
821 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
822 struct vhost_virtqueue *vq,
823 uint64_t desc_addr, uint64_t desc_len);
824 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
825 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
827 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
829 static __rte_always_inline uint64_t
830 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
831 uint64_t iova, uint64_t *len, uint8_t perm)
833 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
834 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
836 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
839 #define vhost_avail_event(vr) \
840 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
841 #define vhost_used_event(vr) \
842 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
845 * The following is used with VIRTIO_RING_F_EVENT_IDX.
846 * Assuming a given event_idx value from the other size, if we have
847 * just incremented index from old to new_idx, should we trigger an
850 static __rte_always_inline int
851 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
853 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
856 static __rte_always_inline void
857 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
859 /* Flush used->idx update before we read avail->flags. */
860 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
862 /* Don't kick guest if we don't reach index specified by guest. */
863 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
864 uint16_t old = vq->signalled_used;
865 uint16_t new = vq->last_used_idx;
866 bool signalled_used_valid = vq->signalled_used_valid;
868 vq->signalled_used = new;
869 vq->signalled_used_valid = true;
871 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
873 vhost_used_event(vq),
876 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
877 (vq->callfd >= 0)) ||
878 unlikely(!signalled_used_valid)) {
879 eventfd_write(vq->callfd, (eventfd_t) 1);
880 if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
881 vq->stats.guest_notifications++;
882 if (dev->notify_ops->guest_notified)
883 dev->notify_ops->guest_notified(dev->vid);
886 /* Kick the guest if necessary. */
887 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
888 && (vq->callfd >= 0)) {
889 eventfd_write(vq->callfd, (eventfd_t)1);
890 if (dev->flags & VIRTIO_DEV_STATS_ENABLED)
891 vq->stats.guest_notifications++;
892 if (dev->notify_ops->guest_notified)
893 dev->notify_ops->guest_notified(dev->vid);
898 static __rte_always_inline void
899 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
901 uint16_t old, new, off, off_wrap;
902 bool signalled_used_valid, kick = false;
904 /* Flush used desc update. */
905 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
907 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
908 if (vq->driver_event->flags !=
909 VRING_EVENT_F_DISABLE)
914 old = vq->signalled_used;
915 new = vq->last_used_idx;
916 vq->signalled_used = new;
917 signalled_used_valid = vq->signalled_used_valid;
918 vq->signalled_used_valid = true;
920 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
921 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
926 if (unlikely(!signalled_used_valid)) {
931 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
933 off_wrap = vq->driver_event->off_wrap;
934 off = off_wrap & ~(1 << 15);
939 if (vq->used_wrap_counter != off_wrap >> 15)
942 if (vhost_need_event(off, new, old))
946 eventfd_write(vq->callfd, (eventfd_t)1);
947 if (dev->notify_ops->guest_notified)
948 dev->notify_ops->guest_notified(dev->vid);
952 static __rte_always_inline void
953 free_ind_table(void *idesc)
958 static __rte_always_inline void
959 restore_mbuf(struct rte_mbuf *m)
961 uint32_t mbuf_size, priv_size;
964 priv_size = rte_pktmbuf_priv_size(m->pool);
965 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
966 /* start of buffer is after mbuf structure and priv data */
968 m->buf_addr = (char *)m + mbuf_size;
969 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
974 static __rte_always_inline bool
975 mbuf_is_consumed(struct rte_mbuf *m)
978 if (rte_mbuf_refcnt_read(m) > 1)
985 #endif /* _VHOST_NET_CDEV_H_ */