1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2018 Intel Corporation
5 #ifndef _VHOST_NET_CDEV_H_
6 #define _VHOST_NET_CDEV_H_
10 #include <sys/types.h>
11 #include <sys/queue.h>
13 #include <linux/vhost.h>
14 #include <linux/virtio_net.h>
15 #include <sys/socket.h>
19 #include <rte_ether.h>
20 #include <rte_rwlock.h>
21 #include <rte_malloc.h>
23 #include "rte_vhost.h"
25 #include "rte_vdpa_dev.h"
27 #include "rte_vhost_async.h"
29 /* Used to indicate that the device is running on a data core */
30 #define VIRTIO_DEV_RUNNING ((uint32_t)1 << 0)
31 /* Used to indicate that the device is ready to operate */
32 #define VIRTIO_DEV_READY ((uint32_t)1 << 1)
33 /* Used to indicate that the built-in vhost net device backend is enabled */
34 #define VIRTIO_DEV_BUILTIN_VIRTIO_NET ((uint32_t)1 << 2)
35 /* Used to indicate that the device has its own data path and configured */
36 #define VIRTIO_DEV_VDPA_CONFIGURED ((uint32_t)1 << 3)
37 /* Used to indicate that the feature negotiation failed */
38 #define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4)
39 /* Used to indicate that the virtio_net tx code should fill TX ol_flags */
40 #define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
42 /* Backend value set by guest. */
43 #define VIRTIO_DEV_STOPPED -1
45 #define BUF_VECTOR_MAX 256
47 #define VHOST_LOG_CACHE_NR 32
49 #define MAX_PKT_BURST 32
51 #define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST)
52 #define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 2)
54 #define PACKED_DESC_ENQUEUE_USED_FLAG(w) \
55 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
57 #define PACKED_DESC_DEQUEUE_USED_FLAG(w) \
58 ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
59 #define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
60 VRING_DESC_F_INDIRECT)
62 #define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
63 sizeof(struct vring_packed_desc))
64 #define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
66 #ifdef VHOST_GCC_UNROLL_PRAGMA
67 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
68 for (iter = val; iter < size; iter++)
71 #ifdef VHOST_CLANG_UNROLL_PRAGMA
72 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
73 for (iter = val; iter < size; iter++)
76 #ifdef VHOST_ICC_UNROLL_PRAGMA
77 #define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
78 for (iter = val; iter < size; iter++)
81 #ifndef vhost_for_each_try_unroll
82 #define vhost_for_each_try_unroll(iter, val, num) \
83 for (iter = val; iter < num; iter++)
87 * Structure contains buffer address, length and descriptor index
88 * from vring to do scatter RX.
98 * Structure contains the info for each batched memory copy.
100 struct batch_copy_elem {
108 * Structure that contains the info for batched dirty logging.
110 struct log_cache_entry {
115 struct vring_used_elem_packed {
123 * inflight async packet information
125 struct async_inflight_info {
126 struct rte_mbuf *mbuf;
127 uint16_t descs; /* num of descs inflight */
128 uint16_t nr_buffers; /* num of buffers inflight for packed ring */
132 /* operation callbacks for DMA */
133 struct rte_vhost_async_channel_ops ops;
135 struct rte_vhost_iov_iter src_iov_iter[VHOST_MAX_ASYNC_IT];
136 struct rte_vhost_iov_iter dst_iov_iter[VHOST_MAX_ASYNC_IT];
137 struct iovec src_iovec[VHOST_MAX_ASYNC_VEC];
138 struct iovec dst_iovec[VHOST_MAX_ASYNC_VEC];
140 /* data transfer status */
141 struct async_inflight_info *pkts_info;
143 uint16_t pkts_inflight_n;
144 uint16_t last_pkts_n;
146 struct vring_used_elem *descs_split;
147 struct vring_used_elem_packed *buffers_packed;
150 uint16_t desc_idx_split;
151 uint16_t buffer_idx_packed;
154 uint16_t last_desc_idx_split;
155 uint16_t last_buffer_idx_packed;
160 * Structure contains variables relevant to RX/TX virtqueues.
162 struct vhost_virtqueue {
164 struct vring_desc *desc;
165 struct vring_packed_desc *desc_packed;
168 struct vring_avail *avail;
169 struct vring_packed_desc_event *driver_event;
172 struct vring_used *used;
173 struct vring_packed_desc_event *device_event;
177 uint16_t last_avail_idx;
178 uint16_t last_used_idx;
179 /* Last used index we notify to front end. */
180 uint16_t signalled_used;
181 bool signalled_used_valid;
182 #define VIRTIO_INVALID_EVENTFD (-1)
183 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
189 rte_spinlock_t access_lock;
193 struct vring_used_elem *shadow_used_split;
194 struct vring_used_elem_packed *shadow_used_packed;
196 uint16_t shadow_used_idx;
197 /* Record packed ring enqueue latest desc cache aligned index */
198 uint16_t shadow_aligned_idx;
199 /* Record packed ring first dequeue desc index */
200 uint16_t shadow_last_used_idx;
202 uint16_t batch_copy_nb_elems;
203 struct batch_copy_elem *batch_copy_elems;
205 bool used_wrap_counter;
206 bool avail_wrap_counter;
208 /* Physical address of used ring, for logging */
209 uint16_t log_cache_nb_elem;
210 uint64_t log_guest_addr;
211 struct log_cache_entry *log_cache;
213 rte_rwlock_t iotlb_lock;
214 rte_rwlock_t iotlb_pending_lock;
215 struct rte_mempool *iotlb_pool;
216 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
217 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
220 /* Used to notify the guest (trigger interrupt) */
222 /* Currently unused as polling mode is enabled */
225 /* inflight share memory info */
227 struct rte_vhost_inflight_info_split *inflight_split;
228 struct rte_vhost_inflight_info_packed *inflight_packed;
230 struct rte_vhost_resubmit_info *resubmit_inflight;
231 uint64_t global_counter;
233 struct vhost_async *async;
236 #define VIRTIO_UNINITIALIZED_NOTIF (-1)
238 struct vhost_vring_addr ring_addrs;
239 } __rte_cache_aligned;
241 /* Virtio device status as per Virtio specification */
242 #define VIRTIO_DEVICE_STATUS_RESET 0x00
243 #define VIRTIO_DEVICE_STATUS_ACK 0x01
244 #define VIRTIO_DEVICE_STATUS_DRIVER 0x02
245 #define VIRTIO_DEVICE_STATUS_DRIVER_OK 0x04
246 #define VIRTIO_DEVICE_STATUS_FEATURES_OK 0x08
247 #define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET 0x40
248 #define VIRTIO_DEVICE_STATUS_FAILED 0x80
250 #define VHOST_MAX_VRING 0x100
251 #define VHOST_MAX_QUEUE_PAIRS 0x80
253 /* Declare IOMMU related bits for older kernels */
254 #ifndef VIRTIO_F_IOMMU_PLATFORM
256 #define VIRTIO_F_IOMMU_PLATFORM 33
258 struct vhost_iotlb_msg {
262 #define VHOST_ACCESS_RO 0x1
263 #define VHOST_ACCESS_WO 0x2
264 #define VHOST_ACCESS_RW 0x3
266 #define VHOST_IOTLB_MISS 1
267 #define VHOST_IOTLB_UPDATE 2
268 #define VHOST_IOTLB_INVALIDATE 3
269 #define VHOST_IOTLB_ACCESS_FAIL 4
273 #define VHOST_IOTLB_MSG 0x1
278 struct vhost_iotlb_msg iotlb;
285 * Define virtio 1.0 for older kernels
287 #ifndef VIRTIO_F_VERSION_1
288 #define VIRTIO_F_VERSION_1 32
291 /* Declare packed ring related bits for older kernels */
292 #ifndef VIRTIO_F_RING_PACKED
294 #define VIRTIO_F_RING_PACKED 34
296 struct vring_packed_desc {
303 struct vring_packed_desc_event {
310 * Declare below packed ring defines unconditionally
311 * as Kernel header might use different names.
313 #define VRING_DESC_F_AVAIL (1ULL << 7)
314 #define VRING_DESC_F_USED (1ULL << 15)
316 #define VRING_EVENT_F_ENABLE 0x0
317 #define VRING_EVENT_F_DISABLE 0x1
318 #define VRING_EVENT_F_DESC 0x2
321 * Available and used descs are in same order
323 #ifndef VIRTIO_F_IN_ORDER
324 #define VIRTIO_F_IN_ORDER 35
327 /* Features supported by this builtin vhost-user net driver. */
328 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
329 (1ULL << VIRTIO_F_ANY_LAYOUT) | \
330 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
331 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
332 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
333 (1ULL << VIRTIO_NET_F_MQ) | \
334 (1ULL << VIRTIO_F_VERSION_1) | \
335 (1ULL << VHOST_F_LOG_ALL) | \
336 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
337 (1ULL << VIRTIO_NET_F_GSO) | \
338 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
339 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
340 (1ULL << VIRTIO_NET_F_HOST_UFO) | \
341 (1ULL << VIRTIO_NET_F_HOST_ECN) | \
342 (1ULL << VIRTIO_NET_F_CSUM) | \
343 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
344 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
345 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
346 (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
347 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
348 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
349 (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
350 (1ULL << VIRTIO_NET_F_MTU) | \
351 (1ULL << VIRTIO_F_IN_ORDER) | \
352 (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
353 (1ULL << VIRTIO_F_RING_PACKED))
357 uint64_t guest_phys_addr;
358 uint64_t host_phys_addr;
362 struct inflight_mem_info {
369 * Device structure contains all configuration information relating
373 /* Frontend (QEMU) memory and memory region information */
374 struct rte_vhost_memory *mem;
376 uint64_t protocol_features;
380 /* to tell if we need broadcast rarp packet */
381 int16_t broadcast_rarp;
387 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
388 struct inflight_mem_info *inflight_info;
389 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
390 char ifname[IF_NAME_SZ];
394 struct rte_ether_addr mac;
398 struct vhost_device_ops const *notify_ops;
400 uint32_t nr_guest_pages;
401 uint32_t max_guest_pages;
402 struct guest_page *guest_pages;
405 rte_spinlock_t slave_req_lock;
408 int postcopy_listening;
410 struct rte_vdpa_device *vdpa_dev;
412 /* context data for the external message handlers */
414 /* pre and post vhost user message handlers for the device */
415 struct rte_vhost_user_extern_ops extern_ops;
416 } __rte_cache_aligned;
418 static __rte_always_inline bool
419 vq_is_packed(struct virtio_net *dev)
421 return dev->features & (1ull << VIRTIO_F_RING_PACKED);
425 desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
427 uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
429 return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
430 wrap_counter != !!(flags & VRING_DESC_F_USED);
434 vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
436 vq->last_used_idx += num;
437 if (vq->last_used_idx >= vq->size) {
438 vq->used_wrap_counter ^= 1;
439 vq->last_used_idx -= vq->size;
444 vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
446 vq->last_avail_idx += num;
447 if (vq->last_avail_idx >= vq->size) {
448 vq->avail_wrap_counter ^= 1;
449 vq->last_avail_idx -= vq->size;
453 void __vhost_log_cache_write(struct virtio_net *dev,
454 struct vhost_virtqueue *vq,
455 uint64_t addr, uint64_t len);
456 void __vhost_log_cache_write_iova(struct virtio_net *dev,
457 struct vhost_virtqueue *vq,
458 uint64_t iova, uint64_t len);
459 void __vhost_log_cache_sync(struct virtio_net *dev,
460 struct vhost_virtqueue *vq);
461 void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
462 void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
463 uint64_t iova, uint64_t len);
465 static __rte_always_inline void
466 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
468 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
469 __vhost_log_write(dev, addr, len);
472 static __rte_always_inline void
473 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
475 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
476 __vhost_log_cache_sync(dev, vq);
479 static __rte_always_inline void
480 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
481 uint64_t addr, uint64_t len)
483 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
484 __vhost_log_cache_write(dev, vq, addr, len);
487 static __rte_always_inline void
488 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
489 uint64_t offset, uint64_t len)
491 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
492 if (unlikely(vq->log_guest_addr == 0))
494 __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
499 static __rte_always_inline void
500 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
501 uint64_t offset, uint64_t len)
503 if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
504 if (unlikely(vq->log_guest_addr == 0))
506 __vhost_log_write(dev, vq->log_guest_addr + offset, len);
510 static __rte_always_inline void
511 vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
512 uint64_t iova, uint64_t len)
514 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
517 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
518 __vhost_log_cache_write_iova(dev, vq, iova, len);
520 __vhost_log_cache_write(dev, vq, iova, len);
523 static __rte_always_inline void
524 vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
525 uint64_t iova, uint64_t len)
527 if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
530 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
531 __vhost_log_write_iova(dev, vq, iova, len);
533 __vhost_log_write(dev, iova, len);
536 extern int vhost_config_log_level;
537 extern int vhost_data_log_level;
539 #define VHOST_LOG_CONFIG(level, fmt, args...) \
540 rte_log(RTE_LOG_ ## level, vhost_config_log_level, \
541 "VHOST_CONFIG: " fmt, ##args)
543 #define VHOST_LOG_DATA(level, fmt, args...) \
544 (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ? \
545 rte_log(RTE_LOG_ ## level, vhost_data_log_level, \
546 "VHOST_DATA : " fmt, ##args) : \
549 #ifdef RTE_LIBRTE_VHOST_DEBUG
550 #define VHOST_MAX_PRINT_BUFF 6072
551 #define PRINT_PACKET(device, addr, size, header) do { \
552 char *pkt_addr = (char *)(addr); \
553 unsigned int index; \
554 char packet[VHOST_MAX_PRINT_BUFF]; \
557 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
559 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
560 for (index = 0; index < (size); index++) { \
561 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
562 "%02hhx ", pkt_addr[index]); \
564 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
566 VHOST_LOG_DATA(DEBUG, "%s", packet); \
569 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
572 #define MAX_VHOST_DEVICE 1024
573 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
575 #define VHOST_BINARY_SEARCH_THRESH 256
577 static __rte_always_inline int guest_page_addrcmp(const void *p1,
580 const struct guest_page *page1 = (const struct guest_page *)p1;
581 const struct guest_page *page2 = (const struct guest_page *)p2;
583 if (page1->guest_phys_addr > page2->guest_phys_addr)
585 if (page1->guest_phys_addr < page2->guest_phys_addr)
591 static __rte_always_inline rte_iova_t
592 gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
593 uint64_t gpa_size, uint64_t *hpa_size)
596 struct guest_page *page;
597 struct guest_page key;
599 *hpa_size = gpa_size;
600 if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
601 key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
602 page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
603 sizeof(struct guest_page), guest_page_addrcmp);
605 if (gpa + gpa_size <=
606 page->guest_phys_addr + page->size) {
607 return gpa - page->guest_phys_addr +
608 page->host_phys_addr;
609 } else if (gpa < page->guest_phys_addr +
611 *hpa_size = page->guest_phys_addr +
613 return gpa - page->guest_phys_addr +
614 page->host_phys_addr;
618 for (i = 0; i < dev->nr_guest_pages; i++) {
619 page = &dev->guest_pages[i];
621 if (gpa >= page->guest_phys_addr) {
622 if (gpa + gpa_size <=
623 page->guest_phys_addr + page->size) {
624 return gpa - page->guest_phys_addr +
625 page->host_phys_addr;
626 } else if (gpa < page->guest_phys_addr +
628 *hpa_size = page->guest_phys_addr +
630 return gpa - page->guest_phys_addr +
631 page->host_phys_addr;
641 /* Convert guest physical address to host physical address */
642 static __rte_always_inline rte_iova_t
643 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
648 hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
649 return hpa_size == size ? hpa : 0;
652 static __rte_always_inline uint64_t
653 hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
655 struct rte_vhost_mem_region *r;
658 if (unlikely(!dev || !dev->mem))
661 for (i = 0; i < dev->mem->nregions; i++) {
662 r = &dev->mem->regions[i];
664 if (vva >= r->host_user_addr &&
665 vva + len < r->host_user_addr + r->size) {
666 return r->guest_phys_addr + vva - r->host_user_addr;
672 static __rte_always_inline struct virtio_net *
675 struct virtio_net *dev = vhost_devices[vid];
677 if (unlikely(!dev)) {
678 VHOST_LOG_CONFIG(ERR,
679 "(%d) device not found.\n", vid);
685 int vhost_new_device(void);
686 void cleanup_device(struct virtio_net *dev, int destroy);
687 void reset_device(struct virtio_net *dev);
688 void vhost_destroy_device(int);
689 void vhost_destroy_device_notify(struct virtio_net *dev);
691 void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
692 void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
693 void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
695 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
697 void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
699 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
700 void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags);
701 void vhost_enable_extbuf(int vid);
702 void vhost_enable_linearbuf(int vid);
703 int vhost_enable_guest_notification(struct virtio_net *dev,
704 struct vhost_virtqueue *vq, int enable);
706 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
709 * Backend-specific cleanup.
711 * TODO: fix it; we have one backend now
713 void vhost_backend_cleanup(struct virtio_net *dev);
715 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
716 uint64_t iova, uint64_t *len, uint8_t perm);
717 void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
718 struct vhost_virtqueue *vq,
719 uint64_t desc_addr, uint64_t desc_len);
720 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
721 uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
723 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
725 static __rte_always_inline uint64_t
726 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
727 uint64_t iova, uint64_t *len, uint8_t perm)
729 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
730 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
732 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
735 #define vhost_avail_event(vr) \
736 (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
737 #define vhost_used_event(vr) \
738 (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
741 * The following is used with VIRTIO_RING_F_EVENT_IDX.
742 * Assuming a given event_idx value from the other size, if we have
743 * just incremented index from old to new_idx, should we trigger an
746 static __rte_always_inline int
747 vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
749 return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
752 static __rte_always_inline void
753 vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
755 /* Flush used->idx update before we read avail->flags. */
756 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
758 /* Don't kick guest if we don't reach index specified by guest. */
759 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
760 uint16_t old = vq->signalled_used;
761 uint16_t new = vq->last_used_idx;
762 bool signalled_used_valid = vq->signalled_used_valid;
764 vq->signalled_used = new;
765 vq->signalled_used_valid = true;
767 VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
769 vhost_used_event(vq),
772 if ((vhost_need_event(vhost_used_event(vq), new, old) &&
773 (vq->callfd >= 0)) ||
774 unlikely(!signalled_used_valid)) {
775 eventfd_write(vq->callfd, (eventfd_t) 1);
776 if (dev->notify_ops->guest_notified)
777 dev->notify_ops->guest_notified(dev->vid);
780 /* Kick the guest if necessary. */
781 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
782 && (vq->callfd >= 0)) {
783 eventfd_write(vq->callfd, (eventfd_t)1);
784 if (dev->notify_ops->guest_notified)
785 dev->notify_ops->guest_notified(dev->vid);
790 static __rte_always_inline void
791 vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
793 uint16_t old, new, off, off_wrap;
794 bool signalled_used_valid, kick = false;
796 /* Flush used desc update. */
797 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
799 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
800 if (vq->driver_event->flags !=
801 VRING_EVENT_F_DISABLE)
806 old = vq->signalled_used;
807 new = vq->last_used_idx;
808 vq->signalled_used = new;
809 signalled_used_valid = vq->signalled_used_valid;
810 vq->signalled_used_valid = true;
812 if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
813 if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
818 if (unlikely(!signalled_used_valid)) {
823 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
825 off_wrap = vq->driver_event->off_wrap;
826 off = off_wrap & ~(1 << 15);
831 if (vq->used_wrap_counter != off_wrap >> 15)
834 if (vhost_need_event(off, new, old))
838 eventfd_write(vq->callfd, (eventfd_t)1);
839 if (dev->notify_ops->guest_notified)
840 dev->notify_ops->guest_notified(dev->vid);
844 static __rte_always_inline void
845 free_ind_table(void *idesc)
850 static __rte_always_inline void
851 restore_mbuf(struct rte_mbuf *m)
853 uint32_t mbuf_size, priv_size;
856 priv_size = rte_pktmbuf_priv_size(m->pool);
857 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
858 /* start of buffer is after mbuf structure and priv data */
860 m->buf_addr = (char *)m + mbuf_size;
861 m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
866 static __rte_always_inline bool
867 mbuf_is_consumed(struct rte_mbuf *m)
870 if (rte_mbuf_refcnt_read(m) > 1)
878 #endif /* _VHOST_NET_CDEV_H_ */