1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
23 #include "rte_vhost.h"
25 #include "rte_vdpa_dev.h"
27 /* Used to indicate that the device is running on a data core */
28 #define VIRTIO_DEV_RUNNING 1
29 /* Used to indicate that the device is ready to operate */
30 #define VIRTIO_DEV_READY 2
31 /* Used to indicate that the built-in vhost net device backend is enabled */
32 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET 4
33 /* Used to indicate that the device has its own data path and configured */
34 #define VIRTIO_DEV_VDPA_CONFIGURED 8
36 /* Backend value set by guest. */
37 #define VIRTIO_DEV_STOPPED -1
39 #define BUF_VECTOR_MAX 256
41 #define VHOST_LOG_CACHE_NR 32
43 #define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
44 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
46 #define PACKED_DESC_DEQUEUE_USED_FLAG(w) \
47 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
48 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
49 VRING_DESC_F_INDIRECT)
51 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
52 sizeof(struct vring_packed_desc))
53 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
55 #ifdef VHOST_GCC_UNROLL_PRAGMA
56 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
57 for (iter = val; iter < size; iter++)
60 #ifdef VHOST_CLANG_UNROLL_PRAGMA
61 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
62 for (iter = val; iter < size; iter++)
65 #ifdef VHOST_ICC_UNROLL_PRAGMA
66 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
67 for (iter = val; iter < size; iter++)
70 #ifndef vhost_for_each_try_unroll
71 #define vhost_for_each_try_unroll(iter, val, num) \
72 for (iter = val; iter < num; iter++)
76 * Structure contains buffer address, length and descriptor index
77 * from vring to do scatter RX.
87 * A structure to hold some fields needed in zero copy code path,
88 * mainly for associating an mbuf with the right desc_idx.
91 struct rte_mbuf *mbuf;
96 TAILQ_ENTRY(zcopy_mbuf) next;
98 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
101 * Structure contains the info for each batched memory copy.
103 struct batch_copy_elem {
111 * Structure that contains the info for batched dirty logging.
113 struct log_cache_entry {
118 struct vring_used_elem_packed {
126 * Structure contains variables relevant to RX/TX virtqueues.
128 struct vhost_virtqueue {
130 struct vring_desc *desc;
131 struct vring_packed_desc *desc_packed;
134 struct vring_avail *avail;
135 struct vring_packed_desc_event *driver_event;
138 struct vring_used *used;
139 struct vring_packed_desc_event *device_event;
143 uint16_t last_avail_idx;
144 uint16_t last_used_idx;
145 /* Last used index we notify to front end. */
146 uint16_t signalled_used;
147 bool signalled_used_valid;
148 #define VIRTIO_INVALID_EVENTFD (-1)
149 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
151 /* Backend value to determine if device should started/stopped */
155 rte_spinlock_t access_lock;
157 /* Used to notify the guest (trigger interrupt) */
159 /* Currently unused as polling mode is enabled */
162 /* Physical address of used ring, for logging */
163 uint64_t log_guest_addr;
165 /* inflight share memory info */
167 struct rte_vhost_inflight_info_split *inflight_split;
168 struct rte_vhost_inflight_info_packed *inflight_packed;
170 struct rte_vhost_resubmit_info *resubmit_inflight;
171 uint64_t global_counter;
175 uint16_t last_zmbuf_idx;
176 struct zcopy_mbuf *zmbufs;
177 struct zcopy_mbuf_list zmbuf_list;
180 struct vring_used_elem *shadow_used_split;
181 struct vring_used_elem_packed *shadow_used_packed;
183 uint16_t shadow_used_idx;
184 /* Record packed ring enqueue latest desc cache aligned index */
185 uint16_t shadow_aligned_idx;
186 /* Record packed ring first dequeue desc index */
187 uint16_t shadow_last_used_idx;
188 struct vhost_vring_addr ring_addrs;
190 struct batch_copy_elem *batch_copy_elems;
191 uint16_t batch_copy_nb_elems;
192 bool used_wrap_counter;
193 bool avail_wrap_counter;
195 struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
196 uint16_t log_cache_nb_elem;
198 rte_rwlock_t iotlb_lock;
199 rte_rwlock_t iotlb_pending_lock;
200 struct rte_mempool *iotlb_pool;
201 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
203 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
204 } __rte_cache_aligned;
206 /* Old kernels have no such macros defined */
207 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
208 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
211 #ifndef VIRTIO_NET_F_MQ
212 #define VIRTIO_NET_F_MQ 22
215 #define VHOST_MAX_VRING 0x100
216 #define VHOST_MAX_QUEUE_PAIRS 0x80
218 #ifndef VIRTIO_NET_F_MTU
219 #define VIRTIO_NET_F_MTU 3
222 #ifndef VIRTIO_F_ANY_LAYOUT
223 #define VIRTIO_F_ANY_LAYOUT 27
226 /* Declare IOMMU related bits for older kernels */
227 #ifndef VIRTIO_F_IOMMU_PLATFORM
229 #define VIRTIO_F_IOMMU_PLATFORM 33
231 struct vhost_iotlb_msg {
235 #define VHOST_ACCESS_RO 0x1
236 #define VHOST_ACCESS_WO 0x2
237 #define VHOST_ACCESS_RW 0x3
239 #define VHOST_IOTLB_MISS 1
240 #define VHOST_IOTLB_UPDATE 2
241 #define VHOST_IOTLB_INVALIDATE 3
242 #define VHOST_IOTLB_ACCESS_FAIL 4
246 #define VHOST_IOTLB_MSG 0x1
251 struct vhost_iotlb_msg iotlb;
258 * Define virtio 1.0 for older kernels
260 #ifndef VIRTIO_F_VERSION_1
261 #define VIRTIO_F_VERSION_1 32
264 /* Declare packed ring related bits for older kernels */
265 #ifndef VIRTIO_F_RING_PACKED
267 #define VIRTIO_F_RING_PACKED 34
269 struct vring_packed_desc {
276 struct vring_packed_desc_event {
283 * Declare below packed ring defines unconditionally
284 * as Kernel header might use different names.
286 #define VRING_DESC_F_AVAIL (1ULL << 7)
287 #define VRING_DESC_F_USED (1ULL << 15)
289 #define VRING_EVENT_F_ENABLE 0x0
290 #define VRING_EVENT_F_DISABLE 0x1
291 #define VRING_EVENT_F_DESC 0x2
294 * Available and used descs are in same order
296 #ifndef VIRTIO_F_IN_ORDER
297 #define VIRTIO_F_IN_ORDER 35
300 /* Features supported by this builtin vhost-user net driver. */
301 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
302 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
303 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
304 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
305 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
306 (1ULL << VIRTIO_NET_F_MQ) | \
307 (1ULL << VIRTIO_F_VERSION_1) | \
308 (1ULL << VHOST_F_LOG_ALL) | \
309 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
310 (1ULL << VIRTIO_NET_F_GSO) | \
311 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
312 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
313 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
314 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
315 (1ULL << VIRTIO_NET_F_CSUM) | \
316 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
317 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
318 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
319 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
320 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
321 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
322 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
323 (1ULL << VIRTIO_NET_F_MTU) | \
324 (1ULL << VIRTIO_F_IN_ORDER) | \
325 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
326 (1ULL << VIRTIO_F_RING_PACKED))
330 uint64_t guest_phys_addr;
331 uint64_t host_phys_addr;
335 struct inflight_mem_info {
342 * Device structure contains all configuration information relating
346 /* Frontend (QEMU) memory and memory region information */
347 struct rte_vhost_memory *mem;
349 uint64_t protocol_features;
353 /* to tell if we need broadcast rarp packet */
354 int16_t broadcast_rarp;
356 int dequeue_zero_copy;
359 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
360 struct inflight_mem_info *inflight_info;
361 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
362 char ifname[IF_NAME_SZ];
366 struct rte_ether_addr mac;
369 struct vhost_device_ops const *notify_ops;
371 uint32_t nr_guest_pages;
372 uint32_t max_guest_pages;
373 struct guest_page *guest_pages;
376 rte_spinlock_t slave_req_lock;
379 int postcopy_listening;
381 struct rte_vdpa_device *vdpa_dev;
383 /* context data for the external message handlers */
385 /* pre and post vhost user message handlers for the device */
386 struct rte_vhost_user_extern_ops extern_ops;
387 } __rte_cache_aligned;
389 static __rte_always_inline bool
390 vq_is_packed(struct virtio_net *dev)
392 return dev->features & (1ull << VIRTIO_F_RING_PACKED);
396 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
398 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
400 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
401 wrap_counter != !!(flags & VRING_DESC_F_USED);
405 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
407 vq->last_used_idx += num;
408 if (vq->last_used_idx >= vq->size) {
409 vq->used_wrap_counter ^= 1;
410 vq->last_used_idx -= vq->size;
415 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
417 vq->last_avail_idx += num;
418 if (vq->last_avail_idx >= vq->size) {
419 vq->avail_wrap_counter ^= 1;
420 vq->last_avail_idx -= vq->size;
424 void __vhost_log_cache_write(struct virtio_net *dev,
425 struct vhost_virtqueue *vq,
426 uint64_t addr, uint64_t len);
427 void __vhost_log_cache_write_iova(struct virtio_net *dev,
428 struct vhost_virtqueue *vq,
429 uint64_t iova, uint64_t len);
430 void __vhost_log_cache_sync(struct virtio_net *dev,
431 struct vhost_virtqueue *vq);
432 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
433 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
434 uint64_t iova, uint64_t len);
436 static __rte_always_inline void
437 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
439 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
440 __vhost_log_write(dev, addr, len);
443 static __rte_always_inline void
444 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
446 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
447 __vhost_log_cache_sync(dev, vq);
450 static __rte_always_inline void
451 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
452 uint64_t addr, uint64_t len)
454 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
455 __vhost_log_cache_write(dev, vq, addr, len);
458 static __rte_always_inline void
459 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
460 uint64_t offset, uint64_t len)
462 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
463 if (unlikely(vq->log_guest_addr == 0))
465 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
470 static __rte_always_inline void
471 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
472 uint64_t offset, uint64_t len)
474 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
475 if (unlikely(vq->log_guest_addr == 0))
477 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
481 static __rte_always_inline void
482 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
483 uint64_t iova, uint64_t len)
485 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
488 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
489 __vhost_log_cache_write_iova(dev, vq, iova, len);
491 __vhost_log_cache_write(dev, vq, iova, len);
494 static __rte_always_inline void
495 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
496 uint64_t iova, uint64_t len)
498 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
501 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
502 __vhost_log_write_iova(dev, vq, iova, len);
504 __vhost_log_write(dev, iova, len);
507 extern int vhost_config_log_level;
508 extern int vhost_data_log_level;
510 #define VHOST_LOG_CONFIG(level, fmt, args...) \
511 rte_log(RTE_LOG_ ## level, vhost_config_log_level, \
512 "VHOST_CONFIG: " fmt, ##args)
514 #define VHOST_LOG_DATA(level, fmt, args...) \
515 (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \
516 rte_log(RTE_LOG_ ## level, vhost_data_log_level, \
517 "VHOST_DATA : " fmt, ##args) : \
520 #ifdef RTE_LIBRTE_VHOST_DEBUG
521 #define VHOST_MAX_PRINT_BUFF 6072
522 #define PRINT_PACKET(device, addr, size, header) do { \
523 char *pkt_addr = (char *)(addr); \
524 unsigned int index; \
525 char packet[VHOST_MAX_PRINT_BUFF]; \
528 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
530 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
531 for (index = 0; index < (size); index++) { \
532 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
533 "%02hhx ", pkt_addr[index]); \
535 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
537 VHOST_LOG_DATA(DEBUG, "%s", packet); \
540 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
543 #define MAX_VHOST_DEVICE 1024
544 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
546 #define VHOST_BINARY_SEARCH_THRESH 256
548 static __rte_always_inline int guest_page_addrcmp(const void *p1,
551 const struct guest_page *page1 = (const struct guest_page *)p1;
552 const struct guest_page *page2 = (const struct guest_page *)p2;
554 if (page1->guest_phys_addr > page2->guest_phys_addr)
556 if (page1->guest_phys_addr < page2->guest_phys_addr)
562 /* Convert guest physical address to host physical address */
563 static __rte_always_inline rte_iova_t
564 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
567 struct guest_page *page;
568 struct guest_page key;
570 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
571 key.guest_phys_addr = gpa;
572 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
573 sizeof(struct guest_page), guest_page_addrcmp);
575 if (gpa + size < page->guest_phys_addr + page->size)
576 return gpa - page->guest_phys_addr +
577 page->host_phys_addr;
580 for (i = 0; i < dev->nr_guest_pages; i++) {
581 page = &dev->guest_pages[i];
583 if (gpa >= page->guest_phys_addr &&
584 gpa + size < page->guest_phys_addr +
586 return gpa - page->guest_phys_addr +
587 page->host_phys_addr;
594 static __rte_always_inline uint64_t
595 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
597 struct rte_vhost_mem_region *r;
600 if (unlikely(!dev || !dev->mem))
603 for (i = 0; i < dev->mem->nregions; i++) {
604 r = &dev->mem->regions[i];
606 if (vva >= r->host_user_addr &&
607 vva + len < r->host_user_addr + r->size) {
608 return r->guest_phys_addr + vva - r->host_user_addr;
614 static __rte_always_inline struct virtio_net *
617 struct virtio_net *dev = vhost_devices[vid];
619 if (unlikely(!dev)) {
620 VHOST_LOG_CONFIG(ERR,
621 "(%d) device not found.\n", vid);
627 int vhost_new_device(void);
628 void cleanup_device(struct virtio_net *dev, int destroy);
629 void reset_device(struct virtio_net *dev);
630 void vhost_destroy_device(int);
631 void vhost_destroy_device_notify(struct virtio_net *dev);
633 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
634 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
635 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
637 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
639 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
641 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
642 void vhost_enable_dequeue_zero_copy(int vid);
643 void vhost_set_builtin_virtio_net(int vid, bool enable);
644 void vhost_enable_extbuf(int vid);
645 void vhost_enable_linearbuf(int vid);
647 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
650 * Backend-specific cleanup.
652 * TODO: fix it; we have one backend now
654 void vhost_backend_cleanup(struct virtio_net *dev);
656 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
657 uint64_t iova, uint64_t *len, uint8_t perm);
658 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
659 struct vhost_virtqueue *vq,
660 uint64_t desc_addr, uint64_t desc_len);
661 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
662 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
664 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
666 static __rte_always_inline uint64_t
667 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
668 uint64_t iova, uint64_t *len, uint8_t perm)
670 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
671 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
673 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
676 #define vhost_avail_event(vr) \
677 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
678 #define vhost_used_event(vr) \
679 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
682 * The following is used with VIRTIO_RING_F_EVENT_IDX.
683 * Assuming a given event_idx value from the other size, if we have
684 * just incremented index from old to new_idx, should we trigger an
687 static __rte_always_inline int
688 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
690 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
693 static __rte_always_inline void
694 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
696 /* Flush used->idx update before we read avail->flags. */
699 /* Don't kick guest if we don't reach index specified by guest. */
700 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
701 uint16_t old = vq->signalled_used;
702 uint16_t new = vq->last_used_idx;
703 bool signalled_used_valid = vq->signalled_used_valid;
705 vq->signalled_used = new;
706 vq->signalled_used_valid = true;
708 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
710 vhost_used_event(vq),
713 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
714 (vq->callfd >= 0)) ||
715 unlikely(!signalled_used_valid)) {
716 eventfd_write(vq->callfd, (eventfd_t) 1);
717 if (dev->notify_ops->guest_notified)
718 dev->notify_ops->guest_notified(dev->vid);
721 /* Kick the guest if necessary. */
722 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
723 && (vq->callfd >= 0)) {
724 eventfd_write(vq->callfd, (eventfd_t)1);
725 if (dev->notify_ops->guest_notified)
726 dev->notify_ops->guest_notified(dev->vid);
731 static __rte_always_inline void
732 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
734 uint16_t old, new, off, off_wrap;
735 bool signalled_used_valid, kick = false;
737 /* Flush used desc update. */
740 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
741 if (vq->driver_event->flags !=
742 VRING_EVENT_F_DISABLE)
747 old = vq->signalled_used;
748 new = vq->last_used_idx;
749 vq->signalled_used = new;
750 signalled_used_valid = vq->signalled_used_valid;
751 vq->signalled_used_valid = true;
753 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
754 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
759 if (unlikely(!signalled_used_valid)) {
766 off_wrap = vq->driver_event->off_wrap;
767 off = off_wrap & ~(1 << 15);
772 if (vq->used_wrap_counter != off_wrap >> 15)
775 if (vhost_need_event(off, new, old))
779 eventfd_write(vq->callfd, (eventfd_t)1);
780 if (dev->notify_ops->guest_notified)
781 dev->notify_ops->guest_notified(dev->vid);
785 static __rte_always_inline void
786 free_ind_table(void *idesc)
791 static __rte_always_inline void
792 restore_mbuf(struct rte_mbuf *m)
794 uint32_t mbuf_size, priv_size;
797 priv_size = rte_pktmbuf_priv_size(m->pool);
798 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
799 /* start of buffer is after mbuf structure and priv data */
801 m->buf_addr = (char *)m + mbuf_size;
802 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
807 static __rte_always_inline bool
808 mbuf_is_consumed(struct rte_mbuf *m)
811 if (rte_mbuf_refcnt_read(m) > 1)
819 static __rte_always_inline void
820 put_zmbuf(struct zcopy_mbuf *zmbuf)
825 #endif /* _VHOST_NET_CDEV_H_ */