1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2017 Intel Corporation
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
9 #ifdef RTE_LIBRTE_VHOST_NUMA
14 #include <rte_errno.h>
16 #include <rte_memory.h>
17 #include <rte_malloc.h>
18 #include <rte_vhost.h>
22 #include "vhost_user.h"
24 struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
25 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
27 struct vhost_vq_stats_name_off {
28 char name[RTE_VHOST_STATS_NAME_SIZE];
32 static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = {
33 {"good_packets", offsetof(struct vhost_virtqueue, stats.packets)},
34 {"good_bytes", offsetof(struct vhost_virtqueue, stats.bytes)},
35 {"multicast_packets", offsetof(struct vhost_virtqueue, stats.multicast)},
36 {"broadcast_packets", offsetof(struct vhost_virtqueue, stats.broadcast)},
37 {"undersize_packets", offsetof(struct vhost_virtqueue, stats.size_bins[0])},
38 {"size_64_packets", offsetof(struct vhost_virtqueue, stats.size_bins[1])},
39 {"size_65_127_packets", offsetof(struct vhost_virtqueue, stats.size_bins[2])},
40 {"size_128_255_packets", offsetof(struct vhost_virtqueue, stats.size_bins[3])},
41 {"size_256_511_packets", offsetof(struct vhost_virtqueue, stats.size_bins[4])},
42 {"size_512_1023_packets", offsetof(struct vhost_virtqueue, stats.size_bins[5])},
43 {"size_1024_1518_packets", offsetof(struct vhost_virtqueue, stats.size_bins[6])},
44 {"size_1519_max_packets", offsetof(struct vhost_virtqueue, stats.size_bins[7])},
47 #define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings)
49 /* Called with iotlb_lock read-locked */
51 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
52 uint64_t iova, uint64_t *size, uint8_t perm)
54 uint64_t vva, tmp_size;
61 vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
62 if (tmp_size == *size)
67 if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
69 * iotlb_lock is read-locked for a full burst,
70 * but it only protects the iotlb cache.
71 * In case of IOTLB miss, we might block on the socket,
72 * which could cause a deadlock with QEMU if an IOTLB update
73 * is being handled. We can safely unlock here to avoid it.
75 vhost_user_iotlb_rd_unlock(vq);
77 vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
78 if (vhost_user_iotlb_miss(dev, iova, perm)) {
79 VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
81 vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
84 vhost_user_iotlb_rd_lock(vq);
90 #define VHOST_LOG_PAGE 4096
93 * Atomically set a bit in memory.
95 static __rte_always_inline void
96 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
98 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
100 * __sync_ built-ins are deprecated, but __atomic_ ones
101 * are sub-optimized in older GCC versions.
103 __sync_fetch_and_or_1(addr, (1U << nr));
105 __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
109 static __rte_always_inline void
110 vhost_log_page(uint8_t *log_base, uint64_t page)
112 vhost_set_bit(page % 8, &log_base[page / 8]);
116 __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
120 if (unlikely(!dev->log_base || !len))
123 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
126 /* To make sure guest memory updates are committed before logging */
127 rte_atomic_thread_fence(__ATOMIC_RELEASE);
129 page = addr / VHOST_LOG_PAGE;
130 while (page * VHOST_LOG_PAGE < addr + len) {
131 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
137 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
138 uint64_t iova, uint64_t len)
140 uint64_t hva, gpa, map_len;
143 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
144 if (map_len != len) {
146 "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
151 gpa = hva_to_gpa(dev, hva, len);
153 __vhost_log_write(dev, gpa, len);
157 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
159 unsigned long *log_base;
162 if (unlikely(!dev->log_base))
165 /* No cache, nothing to sync */
166 if (unlikely(!vq->log_cache))
169 rte_atomic_thread_fence(__ATOMIC_RELEASE);
171 log_base = (unsigned long *)(uintptr_t)dev->log_base;
173 for (i = 0; i < vq->log_cache_nb_elem; i++) {
174 struct log_cache_entry *elem = vq->log_cache + i;
176 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
178 * '__sync' builtins are deprecated, but '__atomic' ones
179 * are sub-optimized in older GCC versions.
181 __sync_fetch_and_or(log_base + elem->offset, elem->val);
183 __atomic_fetch_or(log_base + elem->offset, elem->val,
188 rte_atomic_thread_fence(__ATOMIC_RELEASE);
190 vq->log_cache_nb_elem = 0;
193 static __rte_always_inline void
194 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
197 uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
198 uint32_t offset = page / (sizeof(unsigned long) << 3);
201 if (unlikely(!vq->log_cache)) {
202 /* No logging cache allocated, write dirty log map directly */
203 rte_atomic_thread_fence(__ATOMIC_RELEASE);
204 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
209 for (i = 0; i < vq->log_cache_nb_elem; i++) {
210 struct log_cache_entry *elem = vq->log_cache + i;
212 if (elem->offset == offset) {
213 elem->val |= (1UL << bit_nr);
218 if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
220 * No more room for a new log cache entry,
221 * so write the dirty log map directly.
223 rte_atomic_thread_fence(__ATOMIC_RELEASE);
224 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
229 vq->log_cache[i].offset = offset;
230 vq->log_cache[i].val = (1UL << bit_nr);
231 vq->log_cache_nb_elem++;
235 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
236 uint64_t addr, uint64_t len)
240 if (unlikely(!dev->log_base || !len))
243 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
246 page = addr / VHOST_LOG_PAGE;
247 while (page * VHOST_LOG_PAGE < addr + len) {
248 vhost_log_cache_page(dev, vq, page);
254 __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
255 uint64_t iova, uint64_t len)
257 uint64_t hva, gpa, map_len;
260 hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
261 if (map_len != len) {
263 "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
268 gpa = hva_to_gpa(dev, hva, len);
270 __vhost_log_cache_write(dev, vq, gpa, len);
274 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
275 uint64_t desc_addr, uint64_t desc_len)
279 uint64_t len, remain = desc_len;
281 idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node);
282 if (unlikely(!idesc))
285 dst = (uint64_t)(uintptr_t)idesc;
289 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
291 if (unlikely(!src || !len)) {
296 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
307 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
309 if ((vq->callfd >= 0) && (destroy != 0))
316 cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
318 if (!(dev->protocol_features &
319 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
322 if (vq_is_packed(dev)) {
323 if (vq->inflight_packed)
324 vq->inflight_packed = NULL;
326 if (vq->inflight_split)
327 vq->inflight_split = NULL;
330 if (vq->resubmit_inflight) {
331 if (vq->resubmit_inflight->resubmit_list) {
332 rte_free(vq->resubmit_inflight->resubmit_list);
333 vq->resubmit_inflight->resubmit_list = NULL;
335 rte_free(vq->resubmit_inflight);
336 vq->resubmit_inflight = NULL;
341 * Unmap any memory, close any file descriptors and
342 * free any memory owned by a device.
345 cleanup_device(struct virtio_net *dev, int destroy)
349 vhost_backend_cleanup(dev);
351 for (i = 0; i < dev->nr_vring; i++) {
352 cleanup_vq(dev->virtqueue[i], destroy);
353 cleanup_vq_inflight(dev, dev->virtqueue[i]);
358 vhost_free_async_mem(struct vhost_virtqueue *vq)
363 rte_free(vq->async->pkts_info);
364 rte_free(vq->async->pkts_cmpl_flag);
366 rte_free(vq->async->buffers_packed);
367 vq->async->buffers_packed = NULL;
368 rte_free(vq->async->descs_split);
369 vq->async->descs_split = NULL;
376 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
378 if (vq_is_packed(dev))
379 rte_free(vq->shadow_used_packed);
381 rte_free(vq->shadow_used_split);
383 vhost_free_async_mem(vq);
384 rte_free(vq->batch_copy_elems);
385 rte_mempool_free(vq->iotlb_pool);
386 rte_free(vq->log_cache);
391 * Release virtqueues and device memory.
394 free_device(struct virtio_net *dev)
398 for (i = 0; i < dev->nr_vring; i++)
399 free_vq(dev, dev->virtqueue[i]);
404 static __rte_always_inline int
405 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
407 if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
410 vq->log_guest_addr = translate_log_addr(dev, vq,
411 vq->ring_addrs.log_guest_addr);
412 if (vq->log_guest_addr == 0)
419 * Converts vring log address to GPA
420 * If IOMMU is enabled, the log address is IOVA
421 * If IOMMU not enabled, the log address is already GPA
423 * Caller should have iotlb_lock read-locked
426 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
429 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
430 const uint64_t exp_size = sizeof(uint64_t);
432 uint64_t size = exp_size;
434 hva = vhost_iova_to_vva(dev, vq, log_addr,
435 &size, VHOST_ACCESS_RW);
437 if (size != exp_size)
440 gpa = hva_to_gpa(dev, hva, exp_size);
443 "(%s) failed to find GPA for log_addr: 0x%"
444 PRIx64 " hva: 0x%" PRIx64 "\n",
445 dev->ifname, log_addr, hva);
454 /* Caller should have iotlb_lock read-locked */
456 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
458 uint64_t req_size, size;
460 req_size = sizeof(struct vring_desc) * vq->size;
462 vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
463 vq->ring_addrs.desc_user_addr,
464 &size, VHOST_ACCESS_RW);
465 if (!vq->desc || size != req_size)
468 req_size = sizeof(struct vring_avail);
469 req_size += sizeof(uint16_t) * vq->size;
470 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
471 req_size += sizeof(uint16_t);
473 vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
474 vq->ring_addrs.avail_user_addr,
475 &size, VHOST_ACCESS_RW);
476 if (!vq->avail || size != req_size)
479 req_size = sizeof(struct vring_used);
480 req_size += sizeof(struct vring_used_elem) * vq->size;
481 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
482 req_size += sizeof(uint16_t);
484 vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
485 vq->ring_addrs.used_user_addr,
486 &size, VHOST_ACCESS_RW);
487 if (!vq->used || size != req_size)
493 /* Caller should have iotlb_lock read-locked */
495 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
497 uint64_t req_size, size;
499 req_size = sizeof(struct vring_packed_desc) * vq->size;
501 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
502 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
503 &size, VHOST_ACCESS_RW);
504 if (!vq->desc_packed || size != req_size)
507 req_size = sizeof(struct vring_packed_desc_event);
509 vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
510 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
511 &size, VHOST_ACCESS_RW);
512 if (!vq->driver_event || size != req_size)
515 req_size = sizeof(struct vring_packed_desc_event);
517 vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
518 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
519 &size, VHOST_ACCESS_RW);
520 if (!vq->device_event || size != req_size)
527 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
530 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
533 if (vq_is_packed(dev)) {
534 if (vring_translate_packed(dev, vq) < 0)
537 if (vring_translate_split(dev, vq) < 0)
541 if (log_translate(dev, vq) < 0)
544 vq->access_ok = true;
550 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
552 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
553 vhost_user_iotlb_wr_lock(vq);
555 vq->access_ok = false;
559 vq->log_guest_addr = 0;
561 if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
562 vhost_user_iotlb_wr_unlock(vq);
566 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
568 struct vhost_virtqueue *vq;
569 int numa_node = SOCKET_ID_ANY;
571 if (vring_idx >= VHOST_MAX_VRING) {
572 VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n",
573 dev->ifname, vring_idx);
577 vq = dev->virtqueue[vring_idx];
579 VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
580 dev->ifname, vring_idx);
584 memset(vq, 0, sizeof(struct vhost_virtqueue));
586 vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
587 vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
588 vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;
590 #ifdef RTE_LIBRTE_VHOST_NUMA
591 if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
592 VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
593 dev->ifname, rte_strerror(errno));
594 numa_node = SOCKET_ID_ANY;
597 vq->numa_node = numa_node;
599 vhost_user_iotlb_init(dev, vring_idx);
603 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
605 struct vhost_virtqueue *vq;
608 if (vring_idx >= VHOST_MAX_VRING) {
609 VHOST_LOG_CONFIG(ERR,
610 "(%s) failed to reset vring, out of bound (%d)\n",
611 dev->ifname, vring_idx);
615 vq = dev->virtqueue[vring_idx];
617 VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n",
618 dev->ifname, vring_idx);
623 init_vring_queue(dev, vring_idx);
628 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
630 struct vhost_virtqueue *vq;
633 /* Also allocate holes, if any, up to requested vring index. */
634 for (i = 0; i <= vring_idx; i++) {
635 if (dev->virtqueue[i])
638 vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
640 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n",
645 dev->virtqueue[i] = vq;
646 init_vring_queue(dev, i);
647 rte_spinlock_init(&vq->access_lock);
648 vq->avail_wrap_counter = 1;
649 vq->used_wrap_counter = 1;
650 vq->signalled_used_valid = false;
653 dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1);
659 * Reset some variables in device structure, while keeping few
660 * others untouched, such as vid, ifname, nr_vring: they
661 * should be same unless the device is removed.
664 reset_device(struct virtio_net *dev)
669 dev->protocol_features = 0;
670 dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
672 for (i = 0; i < dev->nr_vring; i++)
673 reset_vring_queue(dev, i);
677 * Invoked when there is a new vhost-user connection established (when
678 * there is a new virtio device being attached).
681 vhost_new_device(void)
683 struct virtio_net *dev;
686 pthread_mutex_lock(&vhost_dev_lock);
687 for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
688 if (vhost_devices[i] == NULL)
692 if (i == RTE_MAX_VHOST_DEVICE) {
693 VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
694 pthread_mutex_unlock(&vhost_dev_lock);
698 dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
700 VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n");
701 pthread_mutex_unlock(&vhost_dev_lock);
705 vhost_devices[i] = dev;
706 pthread_mutex_unlock(&vhost_dev_lock);
709 dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
710 dev->slave_req_fd = -1;
711 dev->postcopy_ufd = -1;
712 rte_spinlock_init(&dev->slave_req_lock);
718 vhost_destroy_device_notify(struct virtio_net *dev)
720 struct rte_vdpa_device *vdpa_dev;
722 if (dev->flags & VIRTIO_DEV_RUNNING) {
723 vdpa_dev = dev->vdpa_dev;
725 vdpa_dev->ops->dev_close(dev->vid);
726 dev->flags &= ~VIRTIO_DEV_RUNNING;
727 dev->notify_ops->destroy_device(dev->vid);
732 * Invoked when there is the vhost-user connection is broken (when
733 * the virtio device is being detached).
736 vhost_destroy_device(int vid)
738 struct virtio_net *dev = get_device(vid);
743 vhost_destroy_device_notify(dev);
745 cleanup_device(dev, 1);
748 vhost_devices[vid] = NULL;
752 vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev)
754 struct virtio_net *dev = get_device(vid);
759 dev->vdpa_dev = vdpa_dev;
763 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
765 struct virtio_net *dev;
768 dev = get_device(vid);
772 len = if_len > sizeof(dev->ifname) ?
773 sizeof(dev->ifname) : if_len;
775 strncpy(dev->ifname, if_name, len);
776 dev->ifname[sizeof(dev->ifname) - 1] = '\0';
780 vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled)
782 struct virtio_net *dev = get_device(vid);
788 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
790 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
791 if (!compliant_ol_flags)
792 dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS;
794 dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS;
796 dev->flags |= VIRTIO_DEV_STATS_ENABLED;
798 dev->flags &= ~VIRTIO_DEV_STATS_ENABLED;
802 vhost_enable_extbuf(int vid)
804 struct virtio_net *dev = get_device(vid);
813 vhost_enable_linearbuf(int vid)
815 struct virtio_net *dev = get_device(vid);
824 rte_vhost_get_mtu(int vid, uint16_t *mtu)
826 struct virtio_net *dev = get_device(vid);
828 if (dev == NULL || mtu == NULL)
831 if (!(dev->flags & VIRTIO_DEV_READY))
834 if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
843 rte_vhost_get_numa_node(int vid)
845 #ifdef RTE_LIBRTE_VHOST_NUMA
846 struct virtio_net *dev = get_device(vid);
850 if (dev == NULL || numa_available() != 0)
853 ret = get_mempolicy(&numa_node, NULL, 0, dev,
854 MPOL_F_NODE | MPOL_F_ADDR);
856 VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
857 dev->ifname, rte_strerror(errno));
869 rte_vhost_get_queue_num(int vid)
871 struct virtio_net *dev = get_device(vid);
876 return dev->nr_vring / 2;
880 rte_vhost_get_vring_num(int vid)
882 struct virtio_net *dev = get_device(vid);
887 return dev->nr_vring;
891 rte_vhost_get_ifname(int vid, char *buf, size_t len)
893 struct virtio_net *dev = get_device(vid);
895 if (dev == NULL || buf == NULL)
898 len = RTE_MIN(len, sizeof(dev->ifname));
900 strncpy(buf, dev->ifname, len);
907 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
909 struct virtio_net *dev;
911 dev = get_device(vid);
912 if (dev == NULL || features == NULL)
915 *features = dev->features;
920 rte_vhost_get_negotiated_protocol_features(int vid,
921 uint64_t *protocol_features)
923 struct virtio_net *dev;
925 dev = get_device(vid);
926 if (dev == NULL || protocol_features == NULL)
929 *protocol_features = dev->protocol_features;
934 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
936 struct virtio_net *dev;
937 struct rte_vhost_memory *m;
940 dev = get_device(vid);
941 if (dev == NULL || mem == NULL)
944 size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
945 m = malloc(sizeof(struct rte_vhost_memory) + size);
949 m->nregions = dev->mem->nregions;
950 memcpy(m->regions, dev->mem->regions, size);
957 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
958 struct rte_vhost_vring *vring)
960 struct virtio_net *dev;
961 struct vhost_virtqueue *vq;
963 dev = get_device(vid);
964 if (dev == NULL || vring == NULL)
967 if (vring_idx >= VHOST_MAX_VRING)
970 vq = dev->virtqueue[vring_idx];
974 if (vq_is_packed(dev)) {
975 vring->desc_packed = vq->desc_packed;
976 vring->driver_event = vq->driver_event;
977 vring->device_event = vq->device_event;
979 vring->desc = vq->desc;
980 vring->avail = vq->avail;
981 vring->used = vq->used;
983 vring->log_guest_addr = vq->log_guest_addr;
985 vring->callfd = vq->callfd;
986 vring->kickfd = vq->kickfd;
987 vring->size = vq->size;
993 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
994 struct rte_vhost_ring_inflight *vring)
996 struct virtio_net *dev;
997 struct vhost_virtqueue *vq;
999 dev = get_device(vid);
1003 if (vring_idx >= VHOST_MAX_VRING)
1006 vq = dev->virtqueue[vring_idx];
1010 if (vq_is_packed(dev)) {
1011 if (unlikely(!vq->inflight_packed))
1014 vring->inflight_packed = vq->inflight_packed;
1016 if (unlikely(!vq->inflight_split))
1019 vring->inflight_split = vq->inflight_split;
1022 vring->resubmit_inflight = vq->resubmit_inflight;
1028 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
1031 struct vhost_virtqueue *vq;
1032 struct virtio_net *dev;
1034 dev = get_device(vid);
1038 if (unlikely(!(dev->protocol_features &
1039 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1042 if (unlikely(vq_is_packed(dev)))
1045 if (unlikely(vring_idx >= VHOST_MAX_VRING))
1048 vq = dev->virtqueue[vring_idx];
1052 if (unlikely(!vq->inflight_split))
1055 if (unlikely(idx >= vq->size))
1058 vq->inflight_split->desc[idx].counter = vq->global_counter++;
1059 vq->inflight_split->desc[idx].inflight = 1;
1064 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
1065 uint16_t head, uint16_t last,
1066 uint16_t *inflight_entry)
1068 struct rte_vhost_inflight_info_packed *inflight_info;
1069 struct virtio_net *dev;
1070 struct vhost_virtqueue *vq;
1071 struct vring_packed_desc *desc;
1072 uint16_t old_free_head, free_head;
1074 dev = get_device(vid);
1078 if (unlikely(!(dev->protocol_features &
1079 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1082 if (unlikely(!vq_is_packed(dev)))
1085 if (unlikely(vring_idx >= VHOST_MAX_VRING))
1088 vq = dev->virtqueue[vring_idx];
1092 inflight_info = vq->inflight_packed;
1093 if (unlikely(!inflight_info))
1096 if (unlikely(head >= vq->size))
1099 desc = vq->desc_packed;
1100 old_free_head = inflight_info->old_free_head;
1101 if (unlikely(old_free_head >= vq->size))
1104 free_head = old_free_head;
1106 /* init header descriptor */
1107 inflight_info->desc[old_free_head].num = 0;
1108 inflight_info->desc[old_free_head].counter = vq->global_counter++;
1109 inflight_info->desc[old_free_head].inflight = 1;
1111 /* save desc entry in flight entry */
1112 while (head != ((last + 1) % vq->size)) {
1113 inflight_info->desc[old_free_head].num++;
1114 inflight_info->desc[free_head].addr = desc[head].addr;
1115 inflight_info->desc[free_head].len = desc[head].len;
1116 inflight_info->desc[free_head].flags = desc[head].flags;
1117 inflight_info->desc[free_head].id = desc[head].id;
1119 inflight_info->desc[old_free_head].last = free_head;
1120 free_head = inflight_info->desc[free_head].next;
1121 inflight_info->free_head = free_head;
1122 head = (head + 1) % vq->size;
1125 inflight_info->old_free_head = free_head;
1126 *inflight_entry = old_free_head;
1132 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
1133 uint16_t last_used_idx, uint16_t idx)
1135 struct virtio_net *dev;
1136 struct vhost_virtqueue *vq;
1138 dev = get_device(vid);
1142 if (unlikely(!(dev->protocol_features &
1143 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1146 if (unlikely(vq_is_packed(dev)))
1149 if (unlikely(vring_idx >= VHOST_MAX_VRING))
1152 vq = dev->virtqueue[vring_idx];
1156 if (unlikely(!vq->inflight_split))
1159 if (unlikely(idx >= vq->size))
1162 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1164 vq->inflight_split->desc[idx].inflight = 0;
1166 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1168 vq->inflight_split->used_idx = last_used_idx;
1173 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
1176 struct rte_vhost_inflight_info_packed *inflight_info;
1177 struct virtio_net *dev;
1178 struct vhost_virtqueue *vq;
1180 dev = get_device(vid);
1184 if (unlikely(!(dev->protocol_features &
1185 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1188 if (unlikely(!vq_is_packed(dev)))
1191 if (unlikely(vring_idx >= VHOST_MAX_VRING))
1194 vq = dev->virtqueue[vring_idx];
1198 inflight_info = vq->inflight_packed;
1199 if (unlikely(!inflight_info))
1202 if (unlikely(head >= vq->size))
1205 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1207 inflight_info->desc[head].inflight = 0;
1209 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1211 inflight_info->old_free_head = inflight_info->free_head;
1212 inflight_info->old_used_idx = inflight_info->used_idx;
1213 inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter;
1219 rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
1222 struct virtio_net *dev;
1223 struct vhost_virtqueue *vq;
1225 dev = get_device(vid);
1229 if (unlikely(!(dev->protocol_features &
1230 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1233 if (unlikely(vq_is_packed(dev)))
1236 if (unlikely(vring_idx >= VHOST_MAX_VRING))
1239 vq = dev->virtqueue[vring_idx];
1243 if (unlikely(!vq->inflight_split))
1246 if (unlikely(idx >= vq->size))
1249 vq->inflight_split->last_inflight_io = idx;
1254 rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
1257 struct rte_vhost_inflight_info_packed *inflight_info;
1258 struct virtio_net *dev;
1259 struct vhost_virtqueue *vq;
1262 dev = get_device(vid);
1266 if (unlikely(!(dev->protocol_features &
1267 (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1270 if (unlikely(!vq_is_packed(dev)))
1273 if (unlikely(vring_idx >= VHOST_MAX_VRING))
1276 vq = dev->virtqueue[vring_idx];
1280 inflight_info = vq->inflight_packed;
1281 if (unlikely(!inflight_info))
1284 if (unlikely(head >= vq->size))
1287 last = inflight_info->desc[head].last;
1288 if (unlikely(last >= vq->size))
1291 inflight_info->desc[last].next = inflight_info->free_head;
1292 inflight_info->free_head = head;
1293 inflight_info->used_idx += inflight_info->desc[head].num;
1294 if (inflight_info->used_idx >= inflight_info->desc_num) {
1295 inflight_info->used_idx -= inflight_info->desc_num;
1296 inflight_info->used_wrap_counter =
1297 !inflight_info->used_wrap_counter;
1304 rte_vhost_vring_call(int vid, uint16_t vring_idx)
1306 struct virtio_net *dev;
1307 struct vhost_virtqueue *vq;
1309 dev = get_device(vid);
1313 if (vring_idx >= VHOST_MAX_VRING)
1316 vq = dev->virtqueue[vring_idx];
1320 rte_spinlock_lock(&vq->access_lock);
1322 if (vq_is_packed(dev))
1323 vhost_vring_call_packed(dev, vq);
1325 vhost_vring_call_split(dev, vq);
1327 rte_spinlock_unlock(&vq->access_lock);
1333 rte_vhost_avail_entries(int vid, uint16_t queue_id)
1335 struct virtio_net *dev;
1336 struct vhost_virtqueue *vq;
1339 dev = get_device(vid);
1343 if (queue_id >= VHOST_MAX_VRING)
1346 vq = dev->virtqueue[queue_id];
1350 rte_spinlock_lock(&vq->access_lock);
1352 if (unlikely(!vq->enabled || vq->avail == NULL))
1355 ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
1358 rte_spinlock_unlock(&vq->access_lock);
1363 vhost_enable_notify_split(struct virtio_net *dev,
1364 struct vhost_virtqueue *vq, int enable)
1366 if (vq->used == NULL)
1369 if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
1371 vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
1373 vq->used->flags |= VRING_USED_F_NO_NOTIFY;
1376 vhost_avail_event(vq) = vq->last_avail_idx;
1382 vhost_enable_notify_packed(struct virtio_net *dev,
1383 struct vhost_virtqueue *vq, int enable)
1387 if (vq->device_event == NULL)
1391 vq->device_event->flags = VRING_EVENT_F_DISABLE;
1395 flags = VRING_EVENT_F_ENABLE;
1396 if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
1397 flags = VRING_EVENT_F_DESC;
1398 vq->device_event->off_wrap = vq->last_avail_idx |
1399 vq->avail_wrap_counter << 15;
1402 rte_atomic_thread_fence(__ATOMIC_RELEASE);
1404 vq->device_event->flags = flags;
1409 vhost_enable_guest_notification(struct virtio_net *dev,
1410 struct vhost_virtqueue *vq, int enable)
1413 * If the virtqueue is not ready yet, it will be applied
1414 * when it will become ready.
1419 if (vq_is_packed(dev))
1420 return vhost_enable_notify_packed(dev, vq, enable);
1422 return vhost_enable_notify_split(dev, vq, enable);
1426 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
1428 struct virtio_net *dev = get_device(vid);
1429 struct vhost_virtqueue *vq;
1435 if (queue_id >= VHOST_MAX_VRING)
1438 vq = dev->virtqueue[queue_id];
1442 rte_spinlock_lock(&vq->access_lock);
1444 vq->notif_enable = enable;
1445 ret = vhost_enable_guest_notification(dev, vq, enable);
1447 rte_spinlock_unlock(&vq->access_lock);
1453 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
1455 struct virtio_net *dev = get_device(vid);
1460 vhost_log_write(dev, addr, len);
1464 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
1465 uint64_t offset, uint64_t len)
1467 struct virtio_net *dev;
1468 struct vhost_virtqueue *vq;
1470 dev = get_device(vid);
1474 if (vring_idx >= VHOST_MAX_VRING)
1476 vq = dev->virtqueue[vring_idx];
1480 vhost_log_used_vring(dev, vq, offset, len);
1484 rte_vhost_rx_queue_count(int vid, uint16_t qid)
1486 struct virtio_net *dev;
1487 struct vhost_virtqueue *vq;
1490 dev = get_device(vid);
1494 if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
1495 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
1496 dev->ifname, __func__, qid);
1500 vq = dev->virtqueue[qid];
1504 rte_spinlock_lock(&vq->access_lock);
1506 if (unlikely(!vq->enabled || vq->avail == NULL))
1509 ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
1512 rte_spinlock_unlock(&vq->access_lock);
1516 struct rte_vdpa_device *
1517 rte_vhost_get_vdpa_device(int vid)
1519 struct virtio_net *dev = get_device(vid);
1524 return dev->vdpa_dev;
1528 rte_vhost_get_log_base(int vid, uint64_t *log_base,
1531 struct virtio_net *dev = get_device(vid);
1533 if (dev == NULL || log_base == NULL || log_size == NULL)
1536 *log_base = dev->log_base;
1537 *log_size = dev->log_size;
1543 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
1544 uint16_t *last_avail_idx, uint16_t *last_used_idx)
1546 struct vhost_virtqueue *vq;
1547 struct virtio_net *dev = get_device(vid);
1549 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
1552 if (queue_id >= VHOST_MAX_VRING)
1555 vq = dev->virtqueue[queue_id];
1559 if (vq_is_packed(dev)) {
1560 *last_avail_idx = (vq->avail_wrap_counter << 15) |
1562 *last_used_idx = (vq->used_wrap_counter << 15) |
1565 *last_avail_idx = vq->last_avail_idx;
1566 *last_used_idx = vq->last_used_idx;
1573 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
1574 uint16_t last_avail_idx, uint16_t last_used_idx)
1576 struct vhost_virtqueue *vq;
1577 struct virtio_net *dev = get_device(vid);
1582 if (queue_id >= VHOST_MAX_VRING)
1585 vq = dev->virtqueue[queue_id];
1589 if (vq_is_packed(dev)) {
1590 vq->last_avail_idx = last_avail_idx & 0x7fff;
1591 vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15));
1592 vq->last_used_idx = last_used_idx & 0x7fff;
1593 vq->used_wrap_counter = !!(last_used_idx & (1 << 15));
1595 vq->last_avail_idx = last_avail_idx;
1596 vq->last_used_idx = last_used_idx;
1603 rte_vhost_get_vring_base_from_inflight(int vid,
1605 uint16_t *last_avail_idx,
1606 uint16_t *last_used_idx)
1608 struct rte_vhost_inflight_info_packed *inflight_info;
1609 struct vhost_virtqueue *vq;
1610 struct virtio_net *dev = get_device(vid);
1612 if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
1615 if (queue_id >= VHOST_MAX_VRING)
1618 vq = dev->virtqueue[queue_id];
1622 if (!vq_is_packed(dev))
1625 inflight_info = vq->inflight_packed;
1629 *last_avail_idx = (inflight_info->old_used_wrap_counter << 15) |
1630 inflight_info->old_used_idx;
1631 *last_used_idx = *last_avail_idx;
1637 rte_vhost_extern_callback_register(int vid,
1638 struct rte_vhost_user_extern_ops const * const ops, void *ctx)
1640 struct virtio_net *dev = get_device(vid);
1642 if (dev == NULL || ops == NULL)
1645 dev->extern_ops = *ops;
1646 dev->extern_data = ctx;
1650 static __rte_always_inline int
1651 async_channel_register(int vid, uint16_t queue_id)
1653 struct virtio_net *dev = get_device(vid);
1654 struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
1655 struct vhost_async *async;
1656 int node = vq->numa_node;
1658 if (unlikely(vq->async)) {
1659 VHOST_LOG_CONFIG(ERR,
1660 "(%s) async register failed: already registered (qid: %d)\n",
1661 dev->ifname, queue_id);
1665 async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
1667 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n",
1668 dev->ifname, queue_id);
1672 async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
1673 RTE_CACHE_LINE_SIZE, node);
1674 if (!async->pkts_info) {
1675 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n",
1676 dev->ifname, queue_id);
1677 goto out_free_async;
1680 async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
1681 RTE_CACHE_LINE_SIZE, node);
1682 if (!async->pkts_cmpl_flag) {
1683 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n",
1684 dev->ifname, queue_id);
1685 goto out_free_async;
1688 if (vq_is_packed(dev)) {
1689 async->buffers_packed = rte_malloc_socket(NULL,
1690 vq->size * sizeof(struct vring_used_elem_packed),
1691 RTE_CACHE_LINE_SIZE, node);
1692 if (!async->buffers_packed) {
1693 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n",
1694 dev->ifname, queue_id);
1695 goto out_free_inflight;
1698 async->descs_split = rte_malloc_socket(NULL,
1699 vq->size * sizeof(struct vring_used_elem),
1700 RTE_CACHE_LINE_SIZE, node);
1701 if (!async->descs_split) {
1702 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n",
1703 dev->ifname, queue_id);
1704 goto out_free_inflight;
1712 rte_free(async->pkts_info);
1720 rte_vhost_async_channel_register(int vid, uint16_t queue_id)
1722 struct vhost_virtqueue *vq;
1723 struct virtio_net *dev = get_device(vid);
1729 if (queue_id >= VHOST_MAX_VRING)
1732 vq = dev->virtqueue[queue_id];
1734 if (unlikely(vq == NULL || !dev->async_copy))
1737 rte_spinlock_lock(&vq->access_lock);
1738 ret = async_channel_register(vid, queue_id);
1739 rte_spinlock_unlock(&vq->access_lock);
1745 rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
1747 struct vhost_virtqueue *vq;
1748 struct virtio_net *dev = get_device(vid);
1753 if (queue_id >= VHOST_MAX_VRING)
1756 vq = dev->virtqueue[queue_id];
1758 if (unlikely(vq == NULL || !dev->async_copy))
1761 return async_channel_register(vid, queue_id);
1765 rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
1767 struct vhost_virtqueue *vq;
1768 struct virtio_net *dev = get_device(vid);
1774 if (queue_id >= VHOST_MAX_VRING)
1777 vq = dev->virtqueue[queue_id];
1782 if (!rte_spinlock_trylock(&vq->access_lock)) {
1783 VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
1790 } else if (vq->async->pkts_inflight_n) {
1791 VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
1792 VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
1795 vhost_free_async_mem(vq);
1799 rte_spinlock_unlock(&vq->access_lock);
1805 rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
1807 struct vhost_virtqueue *vq;
1808 struct virtio_net *dev = get_device(vid);
1813 if (queue_id >= VHOST_MAX_VRING)
1816 vq = dev->virtqueue[queue_id];
1824 if (vq->async->pkts_inflight_n) {
1825 VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
1826 VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
1831 vhost_free_async_mem(vq);
1837 rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
1839 struct rte_dma_info info;
1840 void *pkts_cmpl_flag_addr;
1843 if (!rte_dma_is_valid(dma_id)) {
1844 VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
1848 rte_dma_info_get(dma_id, &info);
1849 if (vchan_id >= info.max_vchans) {
1850 VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
1854 if (!dma_copy_track[dma_id].vchans) {
1855 struct async_dma_vchan_info *vchans;
1857 vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
1858 RTE_CACHE_LINE_SIZE);
1859 if (vchans == NULL) {
1860 VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n",
1865 dma_copy_track[dma_id].vchans = vchans;
1868 if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
1869 VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id,
1874 max_desc = info.max_desc;
1875 if (!rte_is_power_of_2(max_desc))
1876 max_desc = rte_align32pow2(max_desc);
1878 pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
1879 if (!pkts_cmpl_flag_addr) {
1880 VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d "
1881 "vChannel %u.\n", dma_id, vchan_id);
1883 if (dma_copy_track[dma_id].nr_vchans == 0) {
1884 rte_free(dma_copy_track[dma_id].vchans);
1885 dma_copy_track[dma_id].vchans = NULL;
1890 dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr;
1891 dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc;
1892 dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
1893 dma_copy_track[dma_id].nr_vchans++;
1899 rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
1901 struct vhost_virtqueue *vq;
1902 struct virtio_net *dev = get_device(vid);
1908 if (queue_id >= VHOST_MAX_VRING)
1911 vq = dev->virtqueue[queue_id];
1916 if (!rte_spinlock_trylock(&vq->access_lock)) {
1917 VHOST_LOG_CONFIG(DEBUG,
1918 "(%s) failed to check in-flight packets. virtqueue busy.\n",
1924 ret = vq->async->pkts_inflight_n;
1926 rte_spinlock_unlock(&vq->access_lock);
1932 rte_vhost_async_get_inflight_thread_unsafe(int vid, uint16_t queue_id)
1934 struct vhost_virtqueue *vq;
1935 struct virtio_net *dev = get_device(vid);
1941 if (queue_id >= VHOST_MAX_VRING)
1944 vq = dev->virtqueue[queue_id];
1952 ret = vq->async->pkts_inflight_n;
1958 rte_vhost_get_monitor_addr(int vid, uint16_t queue_id,
1959 struct rte_vhost_power_monitor_cond *pmc)
1961 struct virtio_net *dev = get_device(vid);
1962 struct vhost_virtqueue *vq;
1966 if (queue_id >= VHOST_MAX_VRING)
1969 vq = dev->virtqueue[queue_id];
1973 if (vq_is_packed(dev)) {
1974 struct vring_packed_desc *desc;
1975 desc = vq->desc_packed;
1976 pmc->addr = &desc[vq->last_avail_idx].flags;
1977 if (vq->avail_wrap_counter)
1978 pmc->val = VRING_DESC_F_AVAIL;
1980 pmc->val = VRING_DESC_F_USED;
1981 pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED;
1982 pmc->size = sizeof(desc[vq->last_avail_idx].flags);
1985 pmc->addr = &vq->avail->idx;
1986 pmc->val = vq->last_avail_idx & (vq->size - 1);
1987 pmc->mask = vq->size - 1;
1988 pmc->size = sizeof(vq->avail->idx);
1997 rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id,
1998 struct rte_vhost_stat_name *name, unsigned int size)
2000 struct virtio_net *dev = get_device(vid);
2006 if (queue_id >= dev->nr_vring)
2009 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2012 if (name == NULL || size < VHOST_NB_VQ_STATS)
2013 return VHOST_NB_VQ_STATS;
2015 for (i = 0; i < VHOST_NB_VQ_STATS; i++)
2016 snprintf(name[i].name, sizeof(name[i].name), "%s_q%u_%s",
2017 (queue_id & 1) ? "rx" : "tx",
2018 queue_id / 2, vhost_vq_stat_strings[i].name);
2020 return VHOST_NB_VQ_STATS;
2024 rte_vhost_vring_stats_get(int vid, uint16_t queue_id,
2025 struct rte_vhost_stat *stats, unsigned int n)
2027 struct virtio_net *dev = get_device(vid);
2028 struct vhost_virtqueue *vq;
2034 if (queue_id >= dev->nr_vring)
2037 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2040 if (stats == NULL || n < VHOST_NB_VQ_STATS)
2041 return VHOST_NB_VQ_STATS;
2043 vq = dev->virtqueue[queue_id];
2045 rte_spinlock_lock(&vq->access_lock);
2046 for (i = 0; i < VHOST_NB_VQ_STATS; i++) {
2048 *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset);
2051 rte_spinlock_unlock(&vq->access_lock);
2053 return VHOST_NB_VQ_STATS;
2056 int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)
2058 struct virtio_net *dev = get_device(vid);
2059 struct vhost_virtqueue *vq;
2064 if (queue_id >= dev->nr_vring)
2067 if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2070 vq = dev->virtqueue[queue_id];
2072 rte_spinlock_lock(&vq->access_lock);
2073 memset(&vq->stats, 0, sizeof(vq->stats));
2074 rte_spinlock_unlock(&vq->access_lock);
2079 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
2080 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);