vhost: add statistics for guest notification
[dpdk.git] / lib / vhost / vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
7 #include <stdint.h>
8 #include <stdlib.h>
9 #ifdef RTE_LIBRTE_VHOST_NUMA
10 #include <numa.h>
11 #include <numaif.h>
12 #endif
13
14 #include <rte_errno.h>
15 #include <rte_log.h>
16 #include <rte_memory.h>
17 #include <rte_malloc.h>
18 #include <rte_vhost.h>
19
20 #include "iotlb.h"
21 #include "vhost.h"
22 #include "vhost_user.h"
23
24 struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
25 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
26
27 struct vhost_vq_stats_name_off {
28         char name[RTE_VHOST_STATS_NAME_SIZE];
29         unsigned int offset;
30 };
31
32 static const struct vhost_vq_stats_name_off vhost_vq_stat_strings[] = {
33         {"good_packets",           offsetof(struct vhost_virtqueue, stats.packets)},
34         {"good_bytes",             offsetof(struct vhost_virtqueue, stats.bytes)},
35         {"multicast_packets",      offsetof(struct vhost_virtqueue, stats.multicast)},
36         {"broadcast_packets",      offsetof(struct vhost_virtqueue, stats.broadcast)},
37         {"undersize_packets",      offsetof(struct vhost_virtqueue, stats.size_bins[0])},
38         {"size_64_packets",        offsetof(struct vhost_virtqueue, stats.size_bins[1])},
39         {"size_65_127_packets",    offsetof(struct vhost_virtqueue, stats.size_bins[2])},
40         {"size_128_255_packets",   offsetof(struct vhost_virtqueue, stats.size_bins[3])},
41         {"size_256_511_packets",   offsetof(struct vhost_virtqueue, stats.size_bins[4])},
42         {"size_512_1023_packets",  offsetof(struct vhost_virtqueue, stats.size_bins[5])},
43         {"size_1024_1518_packets", offsetof(struct vhost_virtqueue, stats.size_bins[6])},
44         {"size_1519_max_packets",  offsetof(struct vhost_virtqueue, stats.size_bins[7])},
45         {"guest_notifications",    offsetof(struct vhost_virtqueue, stats.guest_notifications)},
46 };
47
48 #define VHOST_NB_VQ_STATS RTE_DIM(vhost_vq_stat_strings)
49
50 /* Called with iotlb_lock read-locked */
51 uint64_t
52 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
53                     uint64_t iova, uint64_t *size, uint8_t perm)
54 {
55         uint64_t vva, tmp_size;
56
57         if (unlikely(!*size))
58                 return 0;
59
60         tmp_size = *size;
61
62         vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
63         if (tmp_size == *size)
64                 return vva;
65
66         iova += tmp_size;
67
68         if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
69                 /*
70                  * iotlb_lock is read-locked for a full burst,
71                  * but it only protects the iotlb cache.
72                  * In case of IOTLB miss, we might block on the socket,
73                  * which could cause a deadlock with QEMU if an IOTLB update
74                  * is being handled. We can safely unlock here to avoid it.
75                  */
76                 vhost_user_iotlb_rd_unlock(vq);
77
78                 vhost_user_iotlb_pending_insert(dev, vq, iova, perm);
79                 if (vhost_user_iotlb_miss(dev, iova, perm)) {
80                         VHOST_LOG_DATA(ERR, "(%s) IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
81                                 dev->ifname, iova);
82                         vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
83                 }
84
85                 vhost_user_iotlb_rd_lock(vq);
86         }
87
88         return 0;
89 }
90
91 #define VHOST_LOG_PAGE  4096
92
93 /*
94  * Atomically set a bit in memory.
95  */
96 static __rte_always_inline void
97 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
98 {
99 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
100         /*
101          * __sync_ built-ins are deprecated, but __atomic_ ones
102          * are sub-optimized in older GCC versions.
103          */
104         __sync_fetch_and_or_1(addr, (1U << nr));
105 #else
106         __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
107 #endif
108 }
109
110 static __rte_always_inline void
111 vhost_log_page(uint8_t *log_base, uint64_t page)
112 {
113         vhost_set_bit(page % 8, &log_base[page / 8]);
114 }
115
116 void
117 __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
118 {
119         uint64_t page;
120
121         if (unlikely(!dev->log_base || !len))
122                 return;
123
124         if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
125                 return;
126
127         /* To make sure guest memory updates are committed before logging */
128         rte_atomic_thread_fence(__ATOMIC_RELEASE);
129
130         page = addr / VHOST_LOG_PAGE;
131         while (page * VHOST_LOG_PAGE < addr + len) {
132                 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
133                 page += 1;
134         }
135 }
136
137 void
138 __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
139                              uint64_t iova, uint64_t len)
140 {
141         uint64_t hva, gpa, map_len;
142         map_len = len;
143
144         hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
145         if (map_len != len) {
146                 VHOST_LOG_DATA(ERR,
147                         "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
148                         dev->ifname, iova);
149                 return;
150         }
151
152         gpa = hva_to_gpa(dev, hva, len);
153         if (gpa)
154                 __vhost_log_write(dev, gpa, len);
155 }
156
157 void
158 __vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
159 {
160         unsigned long *log_base;
161         int i;
162
163         if (unlikely(!dev->log_base))
164                 return;
165
166         /* No cache, nothing to sync */
167         if (unlikely(!vq->log_cache))
168                 return;
169
170         rte_atomic_thread_fence(__ATOMIC_RELEASE);
171
172         log_base = (unsigned long *)(uintptr_t)dev->log_base;
173
174         for (i = 0; i < vq->log_cache_nb_elem; i++) {
175                 struct log_cache_entry *elem = vq->log_cache + i;
176
177 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
178                 /*
179                  * '__sync' builtins are deprecated, but '__atomic' ones
180                  * are sub-optimized in older GCC versions.
181                  */
182                 __sync_fetch_and_or(log_base + elem->offset, elem->val);
183 #else
184                 __atomic_fetch_or(log_base + elem->offset, elem->val,
185                                 __ATOMIC_RELAXED);
186 #endif
187         }
188
189         rte_atomic_thread_fence(__ATOMIC_RELEASE);
190
191         vq->log_cache_nb_elem = 0;
192 }
193
194 static __rte_always_inline void
195 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
196                         uint64_t page)
197 {
198         uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
199         uint32_t offset = page / (sizeof(unsigned long) << 3);
200         int i;
201
202         if (unlikely(!vq->log_cache)) {
203                 /* No logging cache allocated, write dirty log map directly */
204                 rte_atomic_thread_fence(__ATOMIC_RELEASE);
205                 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
206
207                 return;
208         }
209
210         for (i = 0; i < vq->log_cache_nb_elem; i++) {
211                 struct log_cache_entry *elem = vq->log_cache + i;
212
213                 if (elem->offset == offset) {
214                         elem->val |= (1UL << bit_nr);
215                         return;
216                 }
217         }
218
219         if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
220                 /*
221                  * No more room for a new log cache entry,
222                  * so write the dirty log map directly.
223                  */
224                 rte_atomic_thread_fence(__ATOMIC_RELEASE);
225                 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
226
227                 return;
228         }
229
230         vq->log_cache[i].offset = offset;
231         vq->log_cache[i].val = (1UL << bit_nr);
232         vq->log_cache_nb_elem++;
233 }
234
235 void
236 __vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
237                         uint64_t addr, uint64_t len)
238 {
239         uint64_t page;
240
241         if (unlikely(!dev->log_base || !len))
242                 return;
243
244         if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
245                 return;
246
247         page = addr / VHOST_LOG_PAGE;
248         while (page * VHOST_LOG_PAGE < addr + len) {
249                 vhost_log_cache_page(dev, vq, page);
250                 page += 1;
251         }
252 }
253
254 void
255 __vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
256                              uint64_t iova, uint64_t len)
257 {
258         uint64_t hva, gpa, map_len;
259         map_len = len;
260
261         hva = __vhost_iova_to_vva(dev, vq, iova, &map_len, VHOST_ACCESS_RW);
262         if (map_len != len) {
263                 VHOST_LOG_DATA(ERR,
264                         "(%s) failed to write log for IOVA 0x%" PRIx64 ". No IOTLB entry found\n",
265                         dev->ifname, iova);
266                 return;
267         }
268
269         gpa = hva_to_gpa(dev, hva, len);
270         if (gpa)
271                 __vhost_log_cache_write(dev, vq, gpa, len);
272 }
273
274 void *
275 vhost_alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
276                 uint64_t desc_addr, uint64_t desc_len)
277 {
278         void *idesc;
279         uint64_t src, dst;
280         uint64_t len, remain = desc_len;
281
282         idesc = rte_malloc_socket(__func__, desc_len, 0, vq->numa_node);
283         if (unlikely(!idesc))
284                 return NULL;
285
286         dst = (uint64_t)(uintptr_t)idesc;
287
288         while (remain) {
289                 len = remain;
290                 src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
291                                 VHOST_ACCESS_RO);
292                 if (unlikely(!src || !len)) {
293                         rte_free(idesc);
294                         return NULL;
295                 }
296
297                 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
298
299                 remain -= len;
300                 dst += len;
301                 desc_addr += len;
302         }
303
304         return idesc;
305 }
306
307 void
308 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
309 {
310         if ((vq->callfd >= 0) && (destroy != 0))
311                 close(vq->callfd);
312         if (vq->kickfd >= 0)
313                 close(vq->kickfd);
314 }
315
316 void
317 cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq)
318 {
319         if (!(dev->protocol_features &
320             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
321                 return;
322
323         if (vq_is_packed(dev)) {
324                 if (vq->inflight_packed)
325                         vq->inflight_packed = NULL;
326         } else {
327                 if (vq->inflight_split)
328                         vq->inflight_split = NULL;
329         }
330
331         if (vq->resubmit_inflight) {
332                 if (vq->resubmit_inflight->resubmit_list) {
333                         rte_free(vq->resubmit_inflight->resubmit_list);
334                         vq->resubmit_inflight->resubmit_list = NULL;
335                 }
336                 rte_free(vq->resubmit_inflight);
337                 vq->resubmit_inflight = NULL;
338         }
339 }
340
341 /*
342  * Unmap any memory, close any file descriptors and
343  * free any memory owned by a device.
344  */
345 void
346 cleanup_device(struct virtio_net *dev, int destroy)
347 {
348         uint32_t i;
349
350         vhost_backend_cleanup(dev);
351
352         for (i = 0; i < dev->nr_vring; i++) {
353                 cleanup_vq(dev->virtqueue[i], destroy);
354                 cleanup_vq_inflight(dev, dev->virtqueue[i]);
355         }
356 }
357
358 static void
359 vhost_free_async_mem(struct vhost_virtqueue *vq)
360 {
361         if (!vq->async)
362                 return;
363
364         rte_free(vq->async->pkts_info);
365         rte_free(vq->async->pkts_cmpl_flag);
366
367         rte_free(vq->async->buffers_packed);
368         vq->async->buffers_packed = NULL;
369         rte_free(vq->async->descs_split);
370         vq->async->descs_split = NULL;
371
372         rte_free(vq->async);
373         vq->async = NULL;
374 }
375
376 void
377 free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq)
378 {
379         if (vq_is_packed(dev))
380                 rte_free(vq->shadow_used_packed);
381         else
382                 rte_free(vq->shadow_used_split);
383
384         vhost_free_async_mem(vq);
385         rte_free(vq->batch_copy_elems);
386         rte_mempool_free(vq->iotlb_pool);
387         rte_free(vq->log_cache);
388         rte_free(vq);
389 }
390
391 /*
392  * Release virtqueues and device memory.
393  */
394 static void
395 free_device(struct virtio_net *dev)
396 {
397         uint32_t i;
398
399         for (i = 0; i < dev->nr_vring; i++)
400                 free_vq(dev, dev->virtqueue[i]);
401
402         rte_free(dev);
403 }
404
405 static __rte_always_inline int
406 log_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
407 {
408         if (likely(!(vq->ring_addrs.flags & (1 << VHOST_VRING_F_LOG))))
409                 return 0;
410
411         vq->log_guest_addr = translate_log_addr(dev, vq,
412                                                 vq->ring_addrs.log_guest_addr);
413         if (vq->log_guest_addr == 0)
414                 return -1;
415
416         return 0;
417 }
418
419 /*
420  * Converts vring log address to GPA
421  * If IOMMU is enabled, the log address is IOVA
422  * If IOMMU not enabled, the log address is already GPA
423  *
424  * Caller should have iotlb_lock read-locked
425  */
426 uint64_t
427 translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
428                 uint64_t log_addr)
429 {
430         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
431                 const uint64_t exp_size = sizeof(uint64_t);
432                 uint64_t hva, gpa;
433                 uint64_t size = exp_size;
434
435                 hva = vhost_iova_to_vva(dev, vq, log_addr,
436                                         &size, VHOST_ACCESS_RW);
437
438                 if (size != exp_size)
439                         return 0;
440
441                 gpa = hva_to_gpa(dev, hva, exp_size);
442                 if (!gpa) {
443                         VHOST_LOG_DATA(ERR,
444                                 "(%s) failed to find GPA for log_addr: 0x%"
445                                 PRIx64 " hva: 0x%" PRIx64 "\n",
446                                 dev->ifname, log_addr, hva);
447                         return 0;
448                 }
449                 return gpa;
450
451         } else
452                 return log_addr;
453 }
454
455 /* Caller should have iotlb_lock read-locked */
456 static int
457 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
458 {
459         uint64_t req_size, size;
460
461         req_size = sizeof(struct vring_desc) * vq->size;
462         size = req_size;
463         vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
464                                                 vq->ring_addrs.desc_user_addr,
465                                                 &size, VHOST_ACCESS_RW);
466         if (!vq->desc || size != req_size)
467                 return -1;
468
469         req_size = sizeof(struct vring_avail);
470         req_size += sizeof(uint16_t) * vq->size;
471         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
472                 req_size += sizeof(uint16_t);
473         size = req_size;
474         vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
475                                                 vq->ring_addrs.avail_user_addr,
476                                                 &size, VHOST_ACCESS_RW);
477         if (!vq->avail || size != req_size)
478                 return -1;
479
480         req_size = sizeof(struct vring_used);
481         req_size += sizeof(struct vring_used_elem) * vq->size;
482         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
483                 req_size += sizeof(uint16_t);
484         size = req_size;
485         vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
486                                                 vq->ring_addrs.used_user_addr,
487                                                 &size, VHOST_ACCESS_RW);
488         if (!vq->used || size != req_size)
489                 return -1;
490
491         return 0;
492 }
493
494 /* Caller should have iotlb_lock read-locked */
495 static int
496 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
497 {
498         uint64_t req_size, size;
499
500         req_size = sizeof(struct vring_packed_desc) * vq->size;
501         size = req_size;
502         vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
503                 vhost_iova_to_vva(dev, vq, vq->ring_addrs.desc_user_addr,
504                                 &size, VHOST_ACCESS_RW);
505         if (!vq->desc_packed || size != req_size)
506                 return -1;
507
508         req_size = sizeof(struct vring_packed_desc_event);
509         size = req_size;
510         vq->driver_event = (struct vring_packed_desc_event *)(uintptr_t)
511                 vhost_iova_to_vva(dev, vq, vq->ring_addrs.avail_user_addr,
512                                 &size, VHOST_ACCESS_RW);
513         if (!vq->driver_event || size != req_size)
514                 return -1;
515
516         req_size = sizeof(struct vring_packed_desc_event);
517         size = req_size;
518         vq->device_event = (struct vring_packed_desc_event *)(uintptr_t)
519                 vhost_iova_to_vva(dev, vq, vq->ring_addrs.used_user_addr,
520                                 &size, VHOST_ACCESS_RW);
521         if (!vq->device_event || size != req_size)
522                 return -1;
523
524         return 0;
525 }
526
527 int
528 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
529 {
530
531         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
532                 return -1;
533
534         if (vq_is_packed(dev)) {
535                 if (vring_translate_packed(dev, vq) < 0)
536                         return -1;
537         } else {
538                 if (vring_translate_split(dev, vq) < 0)
539                         return -1;
540         }
541
542         if (log_translate(dev, vq) < 0)
543                 return -1;
544
545         vq->access_ok = true;
546
547         return 0;
548 }
549
550 void
551 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
552 {
553         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
554                 vhost_user_iotlb_wr_lock(vq);
555
556         vq->access_ok = false;
557         vq->desc = NULL;
558         vq->avail = NULL;
559         vq->used = NULL;
560         vq->log_guest_addr = 0;
561
562         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
563                 vhost_user_iotlb_wr_unlock(vq);
564 }
565
566 static void
567 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
568 {
569         struct vhost_virtqueue *vq;
570         int numa_node = SOCKET_ID_ANY;
571
572         if (vring_idx >= VHOST_MAX_VRING) {
573                 VHOST_LOG_CONFIG(ERR, "(%s) failed to init vring, out of bound (%d)\n",
574                                 dev->ifname, vring_idx);
575                 return;
576         }
577
578         vq = dev->virtqueue[vring_idx];
579         if (!vq) {
580                 VHOST_LOG_CONFIG(ERR, "(%s) virtqueue not allocated (%d)\n",
581                                 dev->ifname, vring_idx);
582                 return;
583         }
584
585         memset(vq, 0, sizeof(struct vhost_virtqueue));
586
587         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
588         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
589         vq->notif_enable = VIRTIO_UNINITIALIZED_NOTIF;
590
591 #ifdef RTE_LIBRTE_VHOST_NUMA
592         if (get_mempolicy(&numa_node, NULL, 0, vq, MPOL_F_NODE | MPOL_F_ADDR)) {
593                 VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
594                         dev->ifname, rte_strerror(errno));
595                 numa_node = SOCKET_ID_ANY;
596         }
597 #endif
598         vq->numa_node = numa_node;
599
600         vhost_user_iotlb_init(dev, vring_idx);
601 }
602
603 static void
604 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
605 {
606         struct vhost_virtqueue *vq;
607         int callfd;
608
609         if (vring_idx >= VHOST_MAX_VRING) {
610                 VHOST_LOG_CONFIG(ERR,
611                                 "(%s) failed to reset vring, out of bound (%d)\n",
612                                 dev->ifname, vring_idx);
613                 return;
614         }
615
616         vq = dev->virtqueue[vring_idx];
617         if (!vq) {
618                 VHOST_LOG_CONFIG(ERR, "(%s) failed to reset vring, virtqueue not allocated (%d)\n",
619                                 dev->ifname, vring_idx);
620                 return;
621         }
622
623         callfd = vq->callfd;
624         init_vring_queue(dev, vring_idx);
625         vq->callfd = callfd;
626 }
627
628 int
629 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
630 {
631         struct vhost_virtqueue *vq;
632         uint32_t i;
633
634         /* Also allocate holes, if any, up to requested vring index. */
635         for (i = 0; i <= vring_idx; i++) {
636                 if (dev->virtqueue[i])
637                         continue;
638
639                 vq = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), 0);
640                 if (vq == NULL) {
641                         VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate memory for vring %u.\n",
642                                         dev->ifname, i);
643                         return -1;
644                 }
645
646                 dev->virtqueue[i] = vq;
647                 init_vring_queue(dev, i);
648                 rte_spinlock_init(&vq->access_lock);
649                 vq->avail_wrap_counter = 1;
650                 vq->used_wrap_counter = 1;
651                 vq->signalled_used_valid = false;
652         }
653
654         dev->nr_vring = RTE_MAX(dev->nr_vring, vring_idx + 1);
655
656         return 0;
657 }
658
659 /*
660  * Reset some variables in device structure, while keeping few
661  * others untouched, such as vid, ifname, nr_vring: they
662  * should be same unless the device is removed.
663  */
664 void
665 reset_device(struct virtio_net *dev)
666 {
667         uint32_t i;
668
669         dev->features = 0;
670         dev->protocol_features = 0;
671         dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
672
673         for (i = 0; i < dev->nr_vring; i++)
674                 reset_vring_queue(dev, i);
675 }
676
677 /*
678  * Invoked when there is a new vhost-user connection established (when
679  * there is a new virtio device being attached).
680  */
681 int
682 vhost_new_device(void)
683 {
684         struct virtio_net *dev;
685         int i;
686
687         pthread_mutex_lock(&vhost_dev_lock);
688         for (i = 0; i < RTE_MAX_VHOST_DEVICE; i++) {
689                 if (vhost_devices[i] == NULL)
690                         break;
691         }
692
693         if (i == RTE_MAX_VHOST_DEVICE) {
694                 VHOST_LOG_CONFIG(ERR, "failed to find a free slot for new device.\n");
695                 pthread_mutex_unlock(&vhost_dev_lock);
696                 return -1;
697         }
698
699         dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
700         if (dev == NULL) {
701                 VHOST_LOG_CONFIG(ERR, "failed to allocate memory for new device.\n");
702                 pthread_mutex_unlock(&vhost_dev_lock);
703                 return -1;
704         }
705
706         vhost_devices[i] = dev;
707         pthread_mutex_unlock(&vhost_dev_lock);
708
709         dev->vid = i;
710         dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
711         dev->slave_req_fd = -1;
712         dev->postcopy_ufd = -1;
713         rte_spinlock_init(&dev->slave_req_lock);
714
715         return i;
716 }
717
718 void
719 vhost_destroy_device_notify(struct virtio_net *dev)
720 {
721         struct rte_vdpa_device *vdpa_dev;
722
723         if (dev->flags & VIRTIO_DEV_RUNNING) {
724                 vdpa_dev = dev->vdpa_dev;
725                 if (vdpa_dev)
726                         vdpa_dev->ops->dev_close(dev->vid);
727                 dev->flags &= ~VIRTIO_DEV_RUNNING;
728                 dev->notify_ops->destroy_device(dev->vid);
729         }
730 }
731
732 /*
733  * Invoked when there is the vhost-user connection is broken (when
734  * the virtio device is being detached).
735  */
736 void
737 vhost_destroy_device(int vid)
738 {
739         struct virtio_net *dev = get_device(vid);
740
741         if (dev == NULL)
742                 return;
743
744         vhost_destroy_device_notify(dev);
745
746         cleanup_device(dev, 1);
747         free_device(dev);
748
749         vhost_devices[vid] = NULL;
750 }
751
752 void
753 vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev)
754 {
755         struct virtio_net *dev = get_device(vid);
756
757         if (dev == NULL)
758                 return;
759
760         dev->vdpa_dev = vdpa_dev;
761 }
762
763 void
764 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
765 {
766         struct virtio_net *dev;
767         unsigned int len;
768
769         dev = get_device(vid);
770         if (dev == NULL)
771                 return;
772
773         len = if_len > sizeof(dev->ifname) ?
774                 sizeof(dev->ifname) : if_len;
775
776         strncpy(dev->ifname, if_name, len);
777         dev->ifname[sizeof(dev->ifname) - 1] = '\0';
778 }
779
780 void
781 vhost_setup_virtio_net(int vid, bool enable, bool compliant_ol_flags, bool stats_enabled)
782 {
783         struct virtio_net *dev = get_device(vid);
784
785         if (dev == NULL)
786                 return;
787
788         if (enable)
789                 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
790         else
791                 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
792         if (!compliant_ol_flags)
793                 dev->flags |= VIRTIO_DEV_LEGACY_OL_FLAGS;
794         else
795                 dev->flags &= ~VIRTIO_DEV_LEGACY_OL_FLAGS;
796         if (stats_enabled)
797                 dev->flags |= VIRTIO_DEV_STATS_ENABLED;
798         else
799                 dev->flags &= ~VIRTIO_DEV_STATS_ENABLED;
800 }
801
802 void
803 vhost_enable_extbuf(int vid)
804 {
805         struct virtio_net *dev = get_device(vid);
806
807         if (dev == NULL)
808                 return;
809
810         dev->extbuf = 1;
811 }
812
813 void
814 vhost_enable_linearbuf(int vid)
815 {
816         struct virtio_net *dev = get_device(vid);
817
818         if (dev == NULL)
819                 return;
820
821         dev->linearbuf = 1;
822 }
823
824 int
825 rte_vhost_get_mtu(int vid, uint16_t *mtu)
826 {
827         struct virtio_net *dev = get_device(vid);
828
829         if (dev == NULL || mtu == NULL)
830                 return -ENODEV;
831
832         if (!(dev->flags & VIRTIO_DEV_READY))
833                 return -EAGAIN;
834
835         if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
836                 return -ENOTSUP;
837
838         *mtu = dev->mtu;
839
840         return 0;
841 }
842
843 int
844 rte_vhost_get_numa_node(int vid)
845 {
846 #ifdef RTE_LIBRTE_VHOST_NUMA
847         struct virtio_net *dev = get_device(vid);
848         int numa_node;
849         int ret;
850
851         if (dev == NULL || numa_available() != 0)
852                 return -1;
853
854         ret = get_mempolicy(&numa_node, NULL, 0, dev,
855                             MPOL_F_NODE | MPOL_F_ADDR);
856         if (ret < 0) {
857                 VHOST_LOG_CONFIG(ERR, "(%s) failed to query numa node: %s\n",
858                         dev->ifname, rte_strerror(errno));
859                 return -1;
860         }
861
862         return numa_node;
863 #else
864         RTE_SET_USED(vid);
865         return -1;
866 #endif
867 }
868
869 uint32_t
870 rte_vhost_get_queue_num(int vid)
871 {
872         struct virtio_net *dev = get_device(vid);
873
874         if (dev == NULL)
875                 return 0;
876
877         return dev->nr_vring / 2;
878 }
879
880 uint16_t
881 rte_vhost_get_vring_num(int vid)
882 {
883         struct virtio_net *dev = get_device(vid);
884
885         if (dev == NULL)
886                 return 0;
887
888         return dev->nr_vring;
889 }
890
891 int
892 rte_vhost_get_ifname(int vid, char *buf, size_t len)
893 {
894         struct virtio_net *dev = get_device(vid);
895
896         if (dev == NULL || buf == NULL)
897                 return -1;
898
899         len = RTE_MIN(len, sizeof(dev->ifname));
900
901         strncpy(buf, dev->ifname, len);
902         buf[len - 1] = '\0';
903
904         return 0;
905 }
906
907 int
908 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
909 {
910         struct virtio_net *dev;
911
912         dev = get_device(vid);
913         if (dev == NULL || features == NULL)
914                 return -1;
915
916         *features = dev->features;
917         return 0;
918 }
919
920 int
921 rte_vhost_get_negotiated_protocol_features(int vid,
922                                            uint64_t *protocol_features)
923 {
924         struct virtio_net *dev;
925
926         dev = get_device(vid);
927         if (dev == NULL || protocol_features == NULL)
928                 return -1;
929
930         *protocol_features = dev->protocol_features;
931         return 0;
932 }
933
934 int
935 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
936 {
937         struct virtio_net *dev;
938         struct rte_vhost_memory *m;
939         size_t size;
940
941         dev = get_device(vid);
942         if (dev == NULL || mem == NULL)
943                 return -1;
944
945         size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
946         m = malloc(sizeof(struct rte_vhost_memory) + size);
947         if (!m)
948                 return -1;
949
950         m->nregions = dev->mem->nregions;
951         memcpy(m->regions, dev->mem->regions, size);
952         *mem = m;
953
954         return 0;
955 }
956
957 int
958 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
959                           struct rte_vhost_vring *vring)
960 {
961         struct virtio_net *dev;
962         struct vhost_virtqueue *vq;
963
964         dev = get_device(vid);
965         if (dev == NULL || vring == NULL)
966                 return -1;
967
968         if (vring_idx >= VHOST_MAX_VRING)
969                 return -1;
970
971         vq = dev->virtqueue[vring_idx];
972         if (!vq)
973                 return -1;
974
975         if (vq_is_packed(dev)) {
976                 vring->desc_packed = vq->desc_packed;
977                 vring->driver_event = vq->driver_event;
978                 vring->device_event = vq->device_event;
979         } else {
980                 vring->desc = vq->desc;
981                 vring->avail = vq->avail;
982                 vring->used = vq->used;
983         }
984         vring->log_guest_addr  = vq->log_guest_addr;
985
986         vring->callfd  = vq->callfd;
987         vring->kickfd  = vq->kickfd;
988         vring->size    = vq->size;
989
990         return 0;
991 }
992
993 int
994 rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx,
995                                   struct rte_vhost_ring_inflight *vring)
996 {
997         struct virtio_net *dev;
998         struct vhost_virtqueue *vq;
999
1000         dev = get_device(vid);
1001         if (unlikely(!dev))
1002                 return -1;
1003
1004         if (vring_idx >= VHOST_MAX_VRING)
1005                 return -1;
1006
1007         vq = dev->virtqueue[vring_idx];
1008         if (unlikely(!vq))
1009                 return -1;
1010
1011         if (vq_is_packed(dev)) {
1012                 if (unlikely(!vq->inflight_packed))
1013                         return -1;
1014
1015                 vring->inflight_packed = vq->inflight_packed;
1016         } else {
1017                 if (unlikely(!vq->inflight_split))
1018                         return -1;
1019
1020                 vring->inflight_split = vq->inflight_split;
1021         }
1022
1023         vring->resubmit_inflight = vq->resubmit_inflight;
1024
1025         return 0;
1026 }
1027
1028 int
1029 rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx,
1030                                   uint16_t idx)
1031 {
1032         struct vhost_virtqueue *vq;
1033         struct virtio_net *dev;
1034
1035         dev = get_device(vid);
1036         if (unlikely(!dev))
1037                 return -1;
1038
1039         if (unlikely(!(dev->protocol_features &
1040             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1041                 return 0;
1042
1043         if (unlikely(vq_is_packed(dev)))
1044                 return -1;
1045
1046         if (unlikely(vring_idx >= VHOST_MAX_VRING))
1047                 return -1;
1048
1049         vq = dev->virtqueue[vring_idx];
1050         if (unlikely(!vq))
1051                 return -1;
1052
1053         if (unlikely(!vq->inflight_split))
1054                 return -1;
1055
1056         if (unlikely(idx >= vq->size))
1057                 return -1;
1058
1059         vq->inflight_split->desc[idx].counter = vq->global_counter++;
1060         vq->inflight_split->desc[idx].inflight = 1;
1061         return 0;
1062 }
1063
1064 int
1065 rte_vhost_set_inflight_desc_packed(int vid, uint16_t vring_idx,
1066                                    uint16_t head, uint16_t last,
1067                                    uint16_t *inflight_entry)
1068 {
1069         struct rte_vhost_inflight_info_packed *inflight_info;
1070         struct virtio_net *dev;
1071         struct vhost_virtqueue *vq;
1072         struct vring_packed_desc *desc;
1073         uint16_t old_free_head, free_head;
1074
1075         dev = get_device(vid);
1076         if (unlikely(!dev))
1077                 return -1;
1078
1079         if (unlikely(!(dev->protocol_features &
1080             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1081                 return 0;
1082
1083         if (unlikely(!vq_is_packed(dev)))
1084                 return -1;
1085
1086         if (unlikely(vring_idx >= VHOST_MAX_VRING))
1087                 return -1;
1088
1089         vq = dev->virtqueue[vring_idx];
1090         if (unlikely(!vq))
1091                 return -1;
1092
1093         inflight_info = vq->inflight_packed;
1094         if (unlikely(!inflight_info))
1095                 return -1;
1096
1097         if (unlikely(head >= vq->size))
1098                 return -1;
1099
1100         desc = vq->desc_packed;
1101         old_free_head = inflight_info->old_free_head;
1102         if (unlikely(old_free_head >= vq->size))
1103                 return -1;
1104
1105         free_head = old_free_head;
1106
1107         /* init header descriptor */
1108         inflight_info->desc[old_free_head].num = 0;
1109         inflight_info->desc[old_free_head].counter = vq->global_counter++;
1110         inflight_info->desc[old_free_head].inflight = 1;
1111
1112         /* save desc entry in flight entry */
1113         while (head != ((last + 1) % vq->size)) {
1114                 inflight_info->desc[old_free_head].num++;
1115                 inflight_info->desc[free_head].addr = desc[head].addr;
1116                 inflight_info->desc[free_head].len = desc[head].len;
1117                 inflight_info->desc[free_head].flags = desc[head].flags;
1118                 inflight_info->desc[free_head].id = desc[head].id;
1119
1120                 inflight_info->desc[old_free_head].last = free_head;
1121                 free_head = inflight_info->desc[free_head].next;
1122                 inflight_info->free_head = free_head;
1123                 head = (head + 1) % vq->size;
1124         }
1125
1126         inflight_info->old_free_head = free_head;
1127         *inflight_entry = old_free_head;
1128
1129         return 0;
1130 }
1131
1132 int
1133 rte_vhost_clr_inflight_desc_split(int vid, uint16_t vring_idx,
1134                                   uint16_t last_used_idx, uint16_t idx)
1135 {
1136         struct virtio_net *dev;
1137         struct vhost_virtqueue *vq;
1138
1139         dev = get_device(vid);
1140         if (unlikely(!dev))
1141                 return -1;
1142
1143         if (unlikely(!(dev->protocol_features &
1144             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1145                 return 0;
1146
1147         if (unlikely(vq_is_packed(dev)))
1148                 return -1;
1149
1150         if (unlikely(vring_idx >= VHOST_MAX_VRING))
1151                 return -1;
1152
1153         vq = dev->virtqueue[vring_idx];
1154         if (unlikely(!vq))
1155                 return -1;
1156
1157         if (unlikely(!vq->inflight_split))
1158                 return -1;
1159
1160         if (unlikely(idx >= vq->size))
1161                 return -1;
1162
1163         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1164
1165         vq->inflight_split->desc[idx].inflight = 0;
1166
1167         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1168
1169         vq->inflight_split->used_idx = last_used_idx;
1170         return 0;
1171 }
1172
1173 int
1174 rte_vhost_clr_inflight_desc_packed(int vid, uint16_t vring_idx,
1175                                    uint16_t head)
1176 {
1177         struct rte_vhost_inflight_info_packed *inflight_info;
1178         struct virtio_net *dev;
1179         struct vhost_virtqueue *vq;
1180
1181         dev = get_device(vid);
1182         if (unlikely(!dev))
1183                 return -1;
1184
1185         if (unlikely(!(dev->protocol_features &
1186             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1187                 return 0;
1188
1189         if (unlikely(!vq_is_packed(dev)))
1190                 return -1;
1191
1192         if (unlikely(vring_idx >= VHOST_MAX_VRING))
1193                 return -1;
1194
1195         vq = dev->virtqueue[vring_idx];
1196         if (unlikely(!vq))
1197                 return -1;
1198
1199         inflight_info = vq->inflight_packed;
1200         if (unlikely(!inflight_info))
1201                 return -1;
1202
1203         if (unlikely(head >= vq->size))
1204                 return -1;
1205
1206         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1207
1208         inflight_info->desc[head].inflight = 0;
1209
1210         rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1211
1212         inflight_info->old_free_head = inflight_info->free_head;
1213         inflight_info->old_used_idx = inflight_info->used_idx;
1214         inflight_info->old_used_wrap_counter = inflight_info->used_wrap_counter;
1215
1216         return 0;
1217 }
1218
1219 int
1220 rte_vhost_set_last_inflight_io_split(int vid, uint16_t vring_idx,
1221                                      uint16_t idx)
1222 {
1223         struct virtio_net *dev;
1224         struct vhost_virtqueue *vq;
1225
1226         dev = get_device(vid);
1227         if (unlikely(!dev))
1228                 return -1;
1229
1230         if (unlikely(!(dev->protocol_features &
1231             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1232                 return 0;
1233
1234         if (unlikely(vq_is_packed(dev)))
1235                 return -1;
1236
1237         if (unlikely(vring_idx >= VHOST_MAX_VRING))
1238                 return -1;
1239
1240         vq = dev->virtqueue[vring_idx];
1241         if (unlikely(!vq))
1242                 return -1;
1243
1244         if (unlikely(!vq->inflight_split))
1245                 return -1;
1246
1247         if (unlikely(idx >= vq->size))
1248                 return -1;
1249
1250         vq->inflight_split->last_inflight_io = idx;
1251         return 0;
1252 }
1253
1254 int
1255 rte_vhost_set_last_inflight_io_packed(int vid, uint16_t vring_idx,
1256                                       uint16_t head)
1257 {
1258         struct rte_vhost_inflight_info_packed *inflight_info;
1259         struct virtio_net *dev;
1260         struct vhost_virtqueue *vq;
1261         uint16_t last;
1262
1263         dev = get_device(vid);
1264         if (unlikely(!dev))
1265                 return -1;
1266
1267         if (unlikely(!(dev->protocol_features &
1268             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))))
1269                 return 0;
1270
1271         if (unlikely(!vq_is_packed(dev)))
1272                 return -1;
1273
1274         if (unlikely(vring_idx >= VHOST_MAX_VRING))
1275                 return -1;
1276
1277         vq = dev->virtqueue[vring_idx];
1278         if (unlikely(!vq))
1279                 return -1;
1280
1281         inflight_info = vq->inflight_packed;
1282         if (unlikely(!inflight_info))
1283                 return -1;
1284
1285         if (unlikely(head >= vq->size))
1286                 return -1;
1287
1288         last = inflight_info->desc[head].last;
1289         if (unlikely(last >= vq->size))
1290                 return -1;
1291
1292         inflight_info->desc[last].next = inflight_info->free_head;
1293         inflight_info->free_head = head;
1294         inflight_info->used_idx += inflight_info->desc[head].num;
1295         if (inflight_info->used_idx >= inflight_info->desc_num) {
1296                 inflight_info->used_idx -= inflight_info->desc_num;
1297                 inflight_info->used_wrap_counter =
1298                         !inflight_info->used_wrap_counter;
1299         }
1300
1301         return 0;
1302 }
1303
1304 int
1305 rte_vhost_vring_call(int vid, uint16_t vring_idx)
1306 {
1307         struct virtio_net *dev;
1308         struct vhost_virtqueue *vq;
1309
1310         dev = get_device(vid);
1311         if (!dev)
1312                 return -1;
1313
1314         if (vring_idx >= VHOST_MAX_VRING)
1315                 return -1;
1316
1317         vq = dev->virtqueue[vring_idx];
1318         if (!vq)
1319                 return -1;
1320
1321         rte_spinlock_lock(&vq->access_lock);
1322
1323         if (vq_is_packed(dev))
1324                 vhost_vring_call_packed(dev, vq);
1325         else
1326                 vhost_vring_call_split(dev, vq);
1327
1328         rte_spinlock_unlock(&vq->access_lock);
1329
1330         return 0;
1331 }
1332
1333 uint16_t
1334 rte_vhost_avail_entries(int vid, uint16_t queue_id)
1335 {
1336         struct virtio_net *dev;
1337         struct vhost_virtqueue *vq;
1338         uint16_t ret = 0;
1339
1340         dev = get_device(vid);
1341         if (!dev)
1342                 return 0;
1343
1344         if (queue_id >= VHOST_MAX_VRING)
1345                 return 0;
1346
1347         vq = dev->virtqueue[queue_id];
1348         if (!vq)
1349                 return 0;
1350
1351         rte_spinlock_lock(&vq->access_lock);
1352
1353         if (unlikely(!vq->enabled || vq->avail == NULL))
1354                 goto out;
1355
1356         ret = *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
1357
1358 out:
1359         rte_spinlock_unlock(&vq->access_lock);
1360         return ret;
1361 }
1362
1363 static inline int
1364 vhost_enable_notify_split(struct virtio_net *dev,
1365                 struct vhost_virtqueue *vq, int enable)
1366 {
1367         if (vq->used == NULL)
1368                 return -1;
1369
1370         if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
1371                 if (enable)
1372                         vq->used->flags &= ~VRING_USED_F_NO_NOTIFY;
1373                 else
1374                         vq->used->flags |= VRING_USED_F_NO_NOTIFY;
1375         } else {
1376                 if (enable)
1377                         vhost_avail_event(vq) = vq->last_avail_idx;
1378         }
1379         return 0;
1380 }
1381
1382 static inline int
1383 vhost_enable_notify_packed(struct virtio_net *dev,
1384                 struct vhost_virtqueue *vq, int enable)
1385 {
1386         uint16_t flags;
1387
1388         if (vq->device_event == NULL)
1389                 return -1;
1390
1391         if (!enable) {
1392                 vq->device_event->flags = VRING_EVENT_F_DISABLE;
1393                 return 0;
1394         }
1395
1396         flags = VRING_EVENT_F_ENABLE;
1397         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
1398                 flags = VRING_EVENT_F_DESC;
1399                 vq->device_event->off_wrap = vq->last_avail_idx |
1400                         vq->avail_wrap_counter << 15;
1401         }
1402
1403         rte_atomic_thread_fence(__ATOMIC_RELEASE);
1404
1405         vq->device_event->flags = flags;
1406         return 0;
1407 }
1408
1409 int
1410 vhost_enable_guest_notification(struct virtio_net *dev,
1411                 struct vhost_virtqueue *vq, int enable)
1412 {
1413         /*
1414          * If the virtqueue is not ready yet, it will be applied
1415          * when it will become ready.
1416          */
1417         if (!vq->ready)
1418                 return 0;
1419
1420         if (vq_is_packed(dev))
1421                 return vhost_enable_notify_packed(dev, vq, enable);
1422         else
1423                 return vhost_enable_notify_split(dev, vq, enable);
1424 }
1425
1426 int
1427 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
1428 {
1429         struct virtio_net *dev = get_device(vid);
1430         struct vhost_virtqueue *vq;
1431         int ret;
1432
1433         if (!dev)
1434                 return -1;
1435
1436         if (queue_id >= VHOST_MAX_VRING)
1437                 return -1;
1438
1439         vq = dev->virtqueue[queue_id];
1440         if (!vq)
1441                 return -1;
1442
1443         rte_spinlock_lock(&vq->access_lock);
1444
1445         vq->notif_enable = enable;
1446         ret = vhost_enable_guest_notification(dev, vq, enable);
1447
1448         rte_spinlock_unlock(&vq->access_lock);
1449
1450         return ret;
1451 }
1452
1453 void
1454 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
1455 {
1456         struct virtio_net *dev = get_device(vid);
1457
1458         if (dev == NULL)
1459                 return;
1460
1461         vhost_log_write(dev, addr, len);
1462 }
1463
1464 void
1465 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
1466                          uint64_t offset, uint64_t len)
1467 {
1468         struct virtio_net *dev;
1469         struct vhost_virtqueue *vq;
1470
1471         dev = get_device(vid);
1472         if (dev == NULL)
1473                 return;
1474
1475         if (vring_idx >= VHOST_MAX_VRING)
1476                 return;
1477         vq = dev->virtqueue[vring_idx];
1478         if (!vq)
1479                 return;
1480
1481         vhost_log_used_vring(dev, vq, offset, len);
1482 }
1483
1484 uint32_t
1485 rte_vhost_rx_queue_count(int vid, uint16_t qid)
1486 {
1487         struct virtio_net *dev;
1488         struct vhost_virtqueue *vq;
1489         uint32_t ret = 0;
1490
1491         dev = get_device(vid);
1492         if (dev == NULL)
1493                 return 0;
1494
1495         if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
1496                 VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
1497                         dev->ifname, __func__, qid);
1498                 return 0;
1499         }
1500
1501         vq = dev->virtqueue[qid];
1502         if (vq == NULL)
1503                 return 0;
1504
1505         rte_spinlock_lock(&vq->access_lock);
1506
1507         if (unlikely(!vq->enabled || vq->avail == NULL))
1508                 goto out;
1509
1510         ret = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
1511
1512 out:
1513         rte_spinlock_unlock(&vq->access_lock);
1514         return ret;
1515 }
1516
1517 struct rte_vdpa_device *
1518 rte_vhost_get_vdpa_device(int vid)
1519 {
1520         struct virtio_net *dev = get_device(vid);
1521
1522         if (dev == NULL)
1523                 return NULL;
1524
1525         return dev->vdpa_dev;
1526 }
1527
1528 int
1529 rte_vhost_get_log_base(int vid, uint64_t *log_base,
1530                 uint64_t *log_size)
1531 {
1532         struct virtio_net *dev = get_device(vid);
1533
1534         if (dev == NULL || log_base == NULL || log_size == NULL)
1535                 return -1;
1536
1537         *log_base = dev->log_base;
1538         *log_size = dev->log_size;
1539
1540         return 0;
1541 }
1542
1543 int
1544 rte_vhost_get_vring_base(int vid, uint16_t queue_id,
1545                 uint16_t *last_avail_idx, uint16_t *last_used_idx)
1546 {
1547         struct vhost_virtqueue *vq;
1548         struct virtio_net *dev = get_device(vid);
1549
1550         if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
1551                 return -1;
1552
1553         if (queue_id >= VHOST_MAX_VRING)
1554                 return -1;
1555
1556         vq = dev->virtqueue[queue_id];
1557         if (!vq)
1558                 return -1;
1559
1560         if (vq_is_packed(dev)) {
1561                 *last_avail_idx = (vq->avail_wrap_counter << 15) |
1562                                   vq->last_avail_idx;
1563                 *last_used_idx = (vq->used_wrap_counter << 15) |
1564                                  vq->last_used_idx;
1565         } else {
1566                 *last_avail_idx = vq->last_avail_idx;
1567                 *last_used_idx = vq->last_used_idx;
1568         }
1569
1570         return 0;
1571 }
1572
1573 int
1574 rte_vhost_set_vring_base(int vid, uint16_t queue_id,
1575                 uint16_t last_avail_idx, uint16_t last_used_idx)
1576 {
1577         struct vhost_virtqueue *vq;
1578         struct virtio_net *dev = get_device(vid);
1579
1580         if (!dev)
1581                 return -1;
1582
1583         if (queue_id >= VHOST_MAX_VRING)
1584                 return -1;
1585
1586         vq = dev->virtqueue[queue_id];
1587         if (!vq)
1588                 return -1;
1589
1590         if (vq_is_packed(dev)) {
1591                 vq->last_avail_idx = last_avail_idx & 0x7fff;
1592                 vq->avail_wrap_counter = !!(last_avail_idx & (1 << 15));
1593                 vq->last_used_idx = last_used_idx & 0x7fff;
1594                 vq->used_wrap_counter = !!(last_used_idx & (1 << 15));
1595         } else {
1596                 vq->last_avail_idx = last_avail_idx;
1597                 vq->last_used_idx = last_used_idx;
1598         }
1599
1600         return 0;
1601 }
1602
1603 int
1604 rte_vhost_get_vring_base_from_inflight(int vid,
1605                                        uint16_t queue_id,
1606                                        uint16_t *last_avail_idx,
1607                                        uint16_t *last_used_idx)
1608 {
1609         struct rte_vhost_inflight_info_packed *inflight_info;
1610         struct vhost_virtqueue *vq;
1611         struct virtio_net *dev = get_device(vid);
1612
1613         if (dev == NULL || last_avail_idx == NULL || last_used_idx == NULL)
1614                 return -1;
1615
1616         if (queue_id >= VHOST_MAX_VRING)
1617                 return -1;
1618
1619         vq = dev->virtqueue[queue_id];
1620         if (!vq)
1621                 return -1;
1622
1623         if (!vq_is_packed(dev))
1624                 return -1;
1625
1626         inflight_info = vq->inflight_packed;
1627         if (!inflight_info)
1628                 return -1;
1629
1630         *last_avail_idx = (inflight_info->old_used_wrap_counter << 15) |
1631                           inflight_info->old_used_idx;
1632         *last_used_idx = *last_avail_idx;
1633
1634         return 0;
1635 }
1636
1637 int
1638 rte_vhost_extern_callback_register(int vid,
1639                 struct rte_vhost_user_extern_ops const * const ops, void *ctx)
1640 {
1641         struct virtio_net *dev = get_device(vid);
1642
1643         if (dev == NULL || ops == NULL)
1644                 return -1;
1645
1646         dev->extern_ops = *ops;
1647         dev->extern_data = ctx;
1648         return 0;
1649 }
1650
1651 static __rte_always_inline int
1652 async_channel_register(int vid, uint16_t queue_id)
1653 {
1654         struct virtio_net *dev = get_device(vid);
1655         struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
1656         struct vhost_async *async;
1657         int node = vq->numa_node;
1658
1659         if (unlikely(vq->async)) {
1660                 VHOST_LOG_CONFIG(ERR,
1661                                 "(%s) async register failed: already registered (qid: %d)\n",
1662                                 dev->ifname, queue_id);
1663                 return -1;
1664         }
1665
1666         async = rte_zmalloc_socket(NULL, sizeof(struct vhost_async), 0, node);
1667         if (!async) {
1668                 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async metadata (qid: %d)\n",
1669                                 dev->ifname, queue_id);
1670                 return -1;
1671         }
1672
1673         async->pkts_info = rte_malloc_socket(NULL, vq->size * sizeof(struct async_inflight_info),
1674                         RTE_CACHE_LINE_SIZE, node);
1675         if (!async->pkts_info) {
1676                 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async_pkts_info (qid: %d)\n",
1677                                 dev->ifname, queue_id);
1678                 goto out_free_async;
1679         }
1680
1681         async->pkts_cmpl_flag = rte_zmalloc_socket(NULL, vq->size * sizeof(bool),
1682                         RTE_CACHE_LINE_SIZE, node);
1683         if (!async->pkts_cmpl_flag) {
1684                 VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async pkts_cmpl_flag (qid: %d)\n",
1685                                 dev->ifname, queue_id);
1686                 goto out_free_async;
1687         }
1688
1689         if (vq_is_packed(dev)) {
1690                 async->buffers_packed = rte_malloc_socket(NULL,
1691                                 vq->size * sizeof(struct vring_used_elem_packed),
1692                                 RTE_CACHE_LINE_SIZE, node);
1693                 if (!async->buffers_packed) {
1694                         VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async buffers (qid: %d)\n",
1695                                         dev->ifname, queue_id);
1696                         goto out_free_inflight;
1697                 }
1698         } else {
1699                 async->descs_split = rte_malloc_socket(NULL,
1700                                 vq->size * sizeof(struct vring_used_elem),
1701                                 RTE_CACHE_LINE_SIZE, node);
1702                 if (!async->descs_split) {
1703                         VHOST_LOG_CONFIG(ERR, "(%s) failed to allocate async descs (qid: %d)\n",
1704                                         dev->ifname, queue_id);
1705                         goto out_free_inflight;
1706                 }
1707         }
1708
1709         vq->async = async;
1710
1711         return 0;
1712 out_free_inflight:
1713         rte_free(async->pkts_info);
1714 out_free_async:
1715         rte_free(async);
1716
1717         return -1;
1718 }
1719
1720 int
1721 rte_vhost_async_channel_register(int vid, uint16_t queue_id)
1722 {
1723         struct vhost_virtqueue *vq;
1724         struct virtio_net *dev = get_device(vid);
1725         int ret;
1726
1727         if (dev == NULL)
1728                 return -1;
1729
1730         if (queue_id >= VHOST_MAX_VRING)
1731                 return -1;
1732
1733         vq = dev->virtqueue[queue_id];
1734
1735         if (unlikely(vq == NULL || !dev->async_copy))
1736                 return -1;
1737
1738         rte_spinlock_lock(&vq->access_lock);
1739         ret = async_channel_register(vid, queue_id);
1740         rte_spinlock_unlock(&vq->access_lock);
1741
1742         return ret;
1743 }
1744
1745 int
1746 rte_vhost_async_channel_register_thread_unsafe(int vid, uint16_t queue_id)
1747 {
1748         struct vhost_virtqueue *vq;
1749         struct virtio_net *dev = get_device(vid);
1750
1751         if (dev == NULL)
1752                 return -1;
1753
1754         if (queue_id >= VHOST_MAX_VRING)
1755                 return -1;
1756
1757         vq = dev->virtqueue[queue_id];
1758
1759         if (unlikely(vq == NULL || !dev->async_copy))
1760                 return -1;
1761
1762         return async_channel_register(vid, queue_id);
1763 }
1764
1765 int
1766 rte_vhost_async_channel_unregister(int vid, uint16_t queue_id)
1767 {
1768         struct vhost_virtqueue *vq;
1769         struct virtio_net *dev = get_device(vid);
1770         int ret = -1;
1771
1772         if (dev == NULL)
1773                 return ret;
1774
1775         if (queue_id >= VHOST_MAX_VRING)
1776                 return ret;
1777
1778         vq = dev->virtqueue[queue_id];
1779
1780         if (vq == NULL)
1781                 return ret;
1782
1783         if (!rte_spinlock_trylock(&vq->access_lock)) {
1784                 VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel, virtqueue busy.\n",
1785                                 dev->ifname);
1786                 return ret;
1787         }
1788
1789         if (!vq->async) {
1790                 ret = 0;
1791         } else if (vq->async->pkts_inflight_n) {
1792                 VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
1793                 VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
1794                         dev->ifname);
1795         } else {
1796                 vhost_free_async_mem(vq);
1797                 ret = 0;
1798         }
1799
1800         rte_spinlock_unlock(&vq->access_lock);
1801
1802         return ret;
1803 }
1804
1805 int
1806 rte_vhost_async_channel_unregister_thread_unsafe(int vid, uint16_t queue_id)
1807 {
1808         struct vhost_virtqueue *vq;
1809         struct virtio_net *dev = get_device(vid);
1810
1811         if (dev == NULL)
1812                 return -1;
1813
1814         if (queue_id >= VHOST_MAX_VRING)
1815                 return -1;
1816
1817         vq = dev->virtqueue[queue_id];
1818
1819         if (vq == NULL)
1820                 return -1;
1821
1822         if (!vq->async)
1823                 return 0;
1824
1825         if (vq->async->pkts_inflight_n) {
1826                 VHOST_LOG_CONFIG(ERR, "(%s) failed to unregister async channel.\n", dev->ifname);
1827                 VHOST_LOG_CONFIG(ERR, "(%s) inflight packets must be completed before unregistration.\n",
1828                         dev->ifname);
1829                 return -1;
1830         }
1831
1832         vhost_free_async_mem(vq);
1833
1834         return 0;
1835 }
1836
1837 int
1838 rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
1839 {
1840         struct rte_dma_info info;
1841         void *pkts_cmpl_flag_addr;
1842         uint16_t max_desc;
1843
1844         if (!rte_dma_is_valid(dma_id)) {
1845                 VHOST_LOG_CONFIG(ERR, "DMA %d is not found.\n", dma_id);
1846                 return -1;
1847         }
1848
1849         rte_dma_info_get(dma_id, &info);
1850         if (vchan_id >= info.max_vchans) {
1851                 VHOST_LOG_CONFIG(ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
1852                 return -1;
1853         }
1854
1855         if (!dma_copy_track[dma_id].vchans) {
1856                 struct async_dma_vchan_info *vchans;
1857
1858                 vchans = rte_zmalloc(NULL, sizeof(struct async_dma_vchan_info) * info.max_vchans,
1859                                 RTE_CACHE_LINE_SIZE);
1860                 if (vchans == NULL) {
1861                         VHOST_LOG_CONFIG(ERR, "Failed to allocate vchans for DMA %d vChannel %u.\n",
1862                                         dma_id, vchan_id);
1863                         return -1;
1864                 }
1865
1866                 dma_copy_track[dma_id].vchans = vchans;
1867         }
1868
1869         if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
1870                 VHOST_LOG_CONFIG(INFO, "DMA %d vChannel %u already registered.\n", dma_id,
1871                                 vchan_id);
1872                 return 0;
1873         }
1874
1875         max_desc = info.max_desc;
1876         if (!rte_is_power_of_2(max_desc))
1877                 max_desc = rte_align32pow2(max_desc);
1878
1879         pkts_cmpl_flag_addr = rte_zmalloc(NULL, sizeof(bool *) * max_desc, RTE_CACHE_LINE_SIZE);
1880         if (!pkts_cmpl_flag_addr) {
1881                 VHOST_LOG_CONFIG(ERR, "Failed to allocate pkts_cmpl_flag_addr for DMA %d "
1882                                 "vChannel %u.\n", dma_id, vchan_id);
1883
1884                 if (dma_copy_track[dma_id].nr_vchans == 0) {
1885                         rte_free(dma_copy_track[dma_id].vchans);
1886                         dma_copy_track[dma_id].vchans = NULL;
1887                 }
1888                 return -1;
1889         }
1890
1891         dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr;
1892         dma_copy_track[dma_id].vchans[vchan_id].ring_size = max_desc;
1893         dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
1894         dma_copy_track[dma_id].nr_vchans++;
1895
1896         return 0;
1897 }
1898
1899 int
1900 rte_vhost_async_get_inflight(int vid, uint16_t queue_id)
1901 {
1902         struct vhost_virtqueue *vq;
1903         struct virtio_net *dev = get_device(vid);
1904         int ret = -1;
1905
1906         if (dev == NULL)
1907                 return ret;
1908
1909         if (queue_id >= VHOST_MAX_VRING)
1910                 return ret;
1911
1912         vq = dev->virtqueue[queue_id];
1913
1914         if (vq == NULL)
1915                 return ret;
1916
1917         if (!rte_spinlock_trylock(&vq->access_lock)) {
1918                 VHOST_LOG_CONFIG(DEBUG,
1919                         "(%s) failed to check in-flight packets. virtqueue busy.\n",
1920                         dev->ifname);
1921                 return ret;
1922         }
1923
1924         if (vq->async)
1925                 ret = vq->async->pkts_inflight_n;
1926
1927         rte_spinlock_unlock(&vq->access_lock);
1928
1929         return ret;
1930 }
1931
1932 int
1933 rte_vhost_async_get_inflight_thread_unsafe(int vid, uint16_t queue_id)
1934 {
1935         struct vhost_virtqueue *vq;
1936         struct virtio_net *dev = get_device(vid);
1937         int ret = -1;
1938
1939         if (dev == NULL)
1940                 return ret;
1941
1942         if (queue_id >= VHOST_MAX_VRING)
1943                 return ret;
1944
1945         vq = dev->virtqueue[queue_id];
1946
1947         if (vq == NULL)
1948                 return ret;
1949
1950         if (!vq->async)
1951                 return ret;
1952
1953         ret = vq->async->pkts_inflight_n;
1954
1955         return ret;
1956 }
1957
1958 int
1959 rte_vhost_get_monitor_addr(int vid, uint16_t queue_id,
1960                 struct rte_vhost_power_monitor_cond *pmc)
1961 {
1962         struct virtio_net *dev = get_device(vid);
1963         struct vhost_virtqueue *vq;
1964
1965         if (dev == NULL)
1966                 return -1;
1967         if (queue_id >= VHOST_MAX_VRING)
1968                 return -1;
1969
1970         vq = dev->virtqueue[queue_id];
1971         if (vq == NULL)
1972                 return -1;
1973
1974         if (vq_is_packed(dev)) {
1975                 struct vring_packed_desc *desc;
1976                 desc = vq->desc_packed;
1977                 pmc->addr = &desc[vq->last_avail_idx].flags;
1978                 if (vq->avail_wrap_counter)
1979                         pmc->val = VRING_DESC_F_AVAIL;
1980                 else
1981                         pmc->val = VRING_DESC_F_USED;
1982                 pmc->mask = VRING_DESC_F_AVAIL | VRING_DESC_F_USED;
1983                 pmc->size = sizeof(desc[vq->last_avail_idx].flags);
1984                 pmc->match = 1;
1985         } else {
1986                 pmc->addr = &vq->avail->idx;
1987                 pmc->val = vq->last_avail_idx & (vq->size - 1);
1988                 pmc->mask = vq->size - 1;
1989                 pmc->size = sizeof(vq->avail->idx);
1990                 pmc->match = 0;
1991         }
1992
1993         return 0;
1994 }
1995
1996
1997 int
1998 rte_vhost_vring_stats_get_names(int vid, uint16_t queue_id,
1999                 struct rte_vhost_stat_name *name, unsigned int size)
2000 {
2001         struct virtio_net *dev = get_device(vid);
2002         unsigned int i;
2003
2004         if (dev == NULL)
2005                 return -1;
2006
2007         if (queue_id >= dev->nr_vring)
2008                 return -1;
2009
2010         if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2011                 return -1;
2012
2013         if (name == NULL || size < VHOST_NB_VQ_STATS)
2014                 return VHOST_NB_VQ_STATS;
2015
2016         for (i = 0; i < VHOST_NB_VQ_STATS; i++)
2017                 snprintf(name[i].name, sizeof(name[i].name), "%s_q%u_%s",
2018                                 (queue_id & 1) ? "rx" : "tx",
2019                                 queue_id / 2, vhost_vq_stat_strings[i].name);
2020
2021         return VHOST_NB_VQ_STATS;
2022 }
2023
2024 int
2025 rte_vhost_vring_stats_get(int vid, uint16_t queue_id,
2026                 struct rte_vhost_stat *stats, unsigned int n)
2027 {
2028         struct virtio_net *dev = get_device(vid);
2029         struct vhost_virtqueue *vq;
2030         unsigned int i;
2031
2032         if (dev == NULL)
2033                 return -1;
2034
2035         if (queue_id >= dev->nr_vring)
2036                 return -1;
2037
2038         if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2039                 return -1;
2040
2041         if (stats == NULL || n < VHOST_NB_VQ_STATS)
2042                 return VHOST_NB_VQ_STATS;
2043
2044         vq = dev->virtqueue[queue_id];
2045
2046         rte_spinlock_lock(&vq->access_lock);
2047         for (i = 0; i < VHOST_NB_VQ_STATS; i++) {
2048                 stats[i].value =
2049                         *(uint64_t *)(((char *)vq) + vhost_vq_stat_strings[i].offset);
2050                 stats[i].id = i;
2051         }
2052         rte_spinlock_unlock(&vq->access_lock);
2053
2054         return VHOST_NB_VQ_STATS;
2055 }
2056
2057 int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)
2058 {
2059         struct virtio_net *dev = get_device(vid);
2060         struct vhost_virtqueue *vq;
2061
2062         if (dev == NULL)
2063                 return -1;
2064
2065         if (queue_id >= dev->nr_vring)
2066                 return -1;
2067
2068         if (!(dev->flags & VIRTIO_DEV_STATS_ENABLED))
2069                 return -1;
2070
2071         vq = dev->virtqueue[queue_id];
2072
2073         rte_spinlock_lock(&vq->access_lock);
2074         memset(&vq->stats, 0, sizeof(vq->stats));
2075         rte_spinlock_unlock(&vq->access_lock);
2076
2077         return 0;
2078 }
2079
2080 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
2081 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);