vhost: support host notifier
[dpdk.git] / lib / librte_vhost / vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <stdlib.h>
10 #ifdef RTE_LIBRTE_VHOST_NUMA
11 #include <numaif.h>
12 #endif
13
14 #include <rte_errno.h>
15 #include <rte_ethdev.h>
16 #include <rte_log.h>
17 #include <rte_string_fns.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost.h>
21 #include <rte_rwlock.h>
22
23 #include "iotlb.h"
24 #include "vhost.h"
25 #include "vhost_user.h"
26
27 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
28
29 /* Called with iotlb_lock read-locked */
30 uint64_t
31 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
32                     uint64_t iova, uint64_t *size, uint8_t perm)
33 {
34         uint64_t vva, tmp_size;
35
36         if (unlikely(!*size))
37                 return 0;
38
39         tmp_size = *size;
40
41         vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
42         if (tmp_size == *size)
43                 return vva;
44
45         iova += tmp_size;
46
47         if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
48                 /*
49                  * iotlb_lock is read-locked for a full burst,
50                  * but it only protects the iotlb cache.
51                  * In case of IOTLB miss, we might block on the socket,
52                  * which could cause a deadlock with QEMU if an IOTLB update
53                  * is being handled. We can safely unlock here to avoid it.
54                  */
55                 vhost_user_iotlb_rd_unlock(vq);
56
57                 vhost_user_iotlb_pending_insert(vq, iova, perm);
58                 if (vhost_user_iotlb_miss(dev, iova, perm)) {
59                         RTE_LOG(ERR, VHOST_CONFIG,
60                                 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
61                                 iova);
62                         vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
63                 }
64
65                 vhost_user_iotlb_rd_lock(vq);
66         }
67
68         return 0;
69 }
70
71 void
72 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
73 {
74         if ((vq->callfd >= 0) && (destroy != 0))
75                 close(vq->callfd);
76         if (vq->kickfd >= 0)
77                 close(vq->kickfd);
78 }
79
80 /*
81  * Unmap any memory, close any file descriptors and
82  * free any memory owned by a device.
83  */
84 void
85 cleanup_device(struct virtio_net *dev, int destroy)
86 {
87         uint32_t i;
88
89         vhost_backend_cleanup(dev);
90
91         for (i = 0; i < dev->nr_vring; i++)
92                 cleanup_vq(dev->virtqueue[i], destroy);
93 }
94
95 void
96 free_vq(struct vhost_virtqueue *vq)
97 {
98         rte_free(vq->shadow_used_ring);
99         rte_free(vq->batch_copy_elems);
100         rte_mempool_free(vq->iotlb_pool);
101         rte_free(vq);
102 }
103
104 /*
105  * Release virtqueues and device memory.
106  */
107 static void
108 free_device(struct virtio_net *dev)
109 {
110         uint32_t i;
111
112         for (i = 0; i < dev->nr_vring; i++)
113                 free_vq(dev->virtqueue[i]);
114
115         rte_free(dev);
116 }
117
118 int
119 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
120 {
121         uint64_t req_size, size;
122
123         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
124                 goto out;
125
126         req_size = sizeof(struct vring_desc) * vq->size;
127         size = req_size;
128         vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
129                                                 vq->ring_addrs.desc_user_addr,
130                                                 &size, VHOST_ACCESS_RW);
131         if (!vq->desc || size != req_size)
132                 return -1;
133
134         req_size = sizeof(struct vring_avail);
135         req_size += sizeof(uint16_t) * vq->size;
136         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
137                 req_size += sizeof(uint16_t);
138         size = req_size;
139         vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
140                                                 vq->ring_addrs.avail_user_addr,
141                                                 &size, VHOST_ACCESS_RW);
142         if (!vq->avail || size != req_size)
143                 return -1;
144
145         req_size = sizeof(struct vring_used);
146         req_size += sizeof(struct vring_used_elem) * vq->size;
147         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
148                 req_size += sizeof(uint16_t);
149         size = req_size;
150         vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
151                                                 vq->ring_addrs.used_user_addr,
152                                                 &size, VHOST_ACCESS_RW);
153         if (!vq->used || size != req_size)
154                 return -1;
155
156 out:
157         vq->access_ok = 1;
158
159         return 0;
160 }
161
162 void
163 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
164 {
165         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
166                 vhost_user_iotlb_wr_lock(vq);
167
168         vq->access_ok = 0;
169         vq->desc = NULL;
170         vq->avail = NULL;
171         vq->used = NULL;
172
173         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
174                 vhost_user_iotlb_wr_unlock(vq);
175 }
176
177 static void
178 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
179 {
180         struct vhost_virtqueue *vq;
181
182         if (vring_idx >= VHOST_MAX_VRING) {
183                 RTE_LOG(ERR, VHOST_CONFIG,
184                                 "Failed not init vring, out of bound (%d)\n",
185                                 vring_idx);
186                 return;
187         }
188
189         vq = dev->virtqueue[vring_idx];
190
191         memset(vq, 0, sizeof(struct vhost_virtqueue));
192
193         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
194         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
195
196         vhost_user_iotlb_init(dev, vring_idx);
197         /* Backends are set to -1 indicating an inactive device. */
198         vq->backend = -1;
199
200         TAILQ_INIT(&vq->zmbuf_list);
201 }
202
203 static void
204 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
205 {
206         struct vhost_virtqueue *vq;
207         int callfd;
208
209         if (vring_idx >= VHOST_MAX_VRING) {
210                 RTE_LOG(ERR, VHOST_CONFIG,
211                                 "Failed not init vring, out of bound (%d)\n",
212                                 vring_idx);
213                 return;
214         }
215
216         vq = dev->virtqueue[vring_idx];
217         callfd = vq->callfd;
218         init_vring_queue(dev, vring_idx);
219         vq->callfd = callfd;
220 }
221
222 int
223 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
224 {
225         struct vhost_virtqueue *vq;
226
227         vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
228         if (vq == NULL) {
229                 RTE_LOG(ERR, VHOST_CONFIG,
230                         "Failed to allocate memory for vring:%u.\n", vring_idx);
231                 return -1;
232         }
233
234         dev->virtqueue[vring_idx] = vq;
235         init_vring_queue(dev, vring_idx);
236         rte_spinlock_init(&vq->access_lock);
237
238         dev->nr_vring += 1;
239
240         return 0;
241 }
242
243 /*
244  * Reset some variables in device structure, while keeping few
245  * others untouched, such as vid, ifname, nr_vring: they
246  * should be same unless the device is removed.
247  */
248 void
249 reset_device(struct virtio_net *dev)
250 {
251         uint32_t i;
252
253         dev->features = 0;
254         dev->protocol_features = 0;
255         dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
256
257         for (i = 0; i < dev->nr_vring; i++)
258                 reset_vring_queue(dev, i);
259 }
260
261 /*
262  * Invoked when there is a new vhost-user connection established (when
263  * there is a new virtio device being attached).
264  */
265 int
266 vhost_new_device(void)
267 {
268         struct virtio_net *dev;
269         int i;
270
271         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
272                 if (vhost_devices[i] == NULL)
273                         break;
274         }
275
276         if (i == MAX_VHOST_DEVICE) {
277                 RTE_LOG(ERR, VHOST_CONFIG,
278                         "Failed to find a free slot for new device.\n");
279                 return -1;
280         }
281
282         dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
283         if (dev == NULL) {
284                 RTE_LOG(ERR, VHOST_CONFIG,
285                         "Failed to allocate memory for new dev.\n");
286                 return -1;
287         }
288
289         vhost_devices[i] = dev;
290         dev->vid = i;
291         dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
292         dev->slave_req_fd = -1;
293         dev->vdpa_dev_id = -1;
294         rte_spinlock_init(&dev->slave_req_lock);
295
296         return i;
297 }
298
299 /*
300  * Invoked when there is the vhost-user connection is broken (when
301  * the virtio device is being detached).
302  */
303 void
304 vhost_destroy_device(int vid)
305 {
306         struct virtio_net *dev = get_device(vid);
307         struct rte_vdpa_device *vdpa_dev;
308         int did = -1;
309
310         if (dev == NULL)
311                 return;
312
313         if (dev->flags & VIRTIO_DEV_RUNNING) {
314                 did = dev->vdpa_dev_id;
315                 vdpa_dev = rte_vdpa_get_device(did);
316                 if (vdpa_dev && vdpa_dev->ops->dev_close)
317                         vdpa_dev->ops->dev_close(dev->vid);
318                 dev->flags &= ~VIRTIO_DEV_RUNNING;
319                 dev->notify_ops->destroy_device(vid);
320         }
321
322         cleanup_device(dev, 1);
323         free_device(dev);
324
325         vhost_devices[vid] = NULL;
326 }
327
328 void
329 vhost_attach_vdpa_device(int vid, int did)
330 {
331         struct virtio_net *dev = get_device(vid);
332
333         if (dev == NULL)
334                 return;
335
336         if (rte_vdpa_get_device(did) == NULL)
337                 return;
338
339         dev->vdpa_dev_id = did;
340 }
341
342 void
343 vhost_detach_vdpa_device(int vid)
344 {
345         struct virtio_net *dev = get_device(vid);
346
347         if (dev == NULL)
348                 return;
349
350         vhost_user_host_notifier_ctrl(vid, false);
351
352         dev->vdpa_dev_id = -1;
353 }
354
355 void
356 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
357 {
358         struct virtio_net *dev;
359         unsigned int len;
360
361         dev = get_device(vid);
362         if (dev == NULL)
363                 return;
364
365         len = if_len > sizeof(dev->ifname) ?
366                 sizeof(dev->ifname) : if_len;
367
368         strncpy(dev->ifname, if_name, len);
369         dev->ifname[sizeof(dev->ifname) - 1] = '\0';
370 }
371
372 void
373 vhost_enable_dequeue_zero_copy(int vid)
374 {
375         struct virtio_net *dev = get_device(vid);
376
377         if (dev == NULL)
378                 return;
379
380         dev->dequeue_zero_copy = 1;
381 }
382
383 void
384 vhost_set_builtin_virtio_net(int vid, bool enable)
385 {
386         struct virtio_net *dev = get_device(vid);
387
388         if (dev == NULL)
389                 return;
390
391         if (enable)
392                 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
393         else
394                 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
395 }
396
397 int
398 rte_vhost_get_mtu(int vid, uint16_t *mtu)
399 {
400         struct virtio_net *dev = get_device(vid);
401
402         if (!dev)
403                 return -ENODEV;
404
405         if (!(dev->flags & VIRTIO_DEV_READY))
406                 return -EAGAIN;
407
408         if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
409                 return -ENOTSUP;
410
411         *mtu = dev->mtu;
412
413         return 0;
414 }
415
416 int
417 rte_vhost_get_numa_node(int vid)
418 {
419 #ifdef RTE_LIBRTE_VHOST_NUMA
420         struct virtio_net *dev = get_device(vid);
421         int numa_node;
422         int ret;
423
424         if (dev == NULL)
425                 return -1;
426
427         ret = get_mempolicy(&numa_node, NULL, 0, dev,
428                             MPOL_F_NODE | MPOL_F_ADDR);
429         if (ret < 0) {
430                 RTE_LOG(ERR, VHOST_CONFIG,
431                         "(%d) failed to query numa node: %s\n",
432                         vid, rte_strerror(errno));
433                 return -1;
434         }
435
436         return numa_node;
437 #else
438         RTE_SET_USED(vid);
439         return -1;
440 #endif
441 }
442
443 uint32_t
444 rte_vhost_get_queue_num(int vid)
445 {
446         struct virtio_net *dev = get_device(vid);
447
448         if (dev == NULL)
449                 return 0;
450
451         return dev->nr_vring / 2;
452 }
453
454 uint16_t
455 rte_vhost_get_vring_num(int vid)
456 {
457         struct virtio_net *dev = get_device(vid);
458
459         if (dev == NULL)
460                 return 0;
461
462         return dev->nr_vring;
463 }
464
465 int
466 rte_vhost_get_ifname(int vid, char *buf, size_t len)
467 {
468         struct virtio_net *dev = get_device(vid);
469
470         if (dev == NULL)
471                 return -1;
472
473         len = RTE_MIN(len, sizeof(dev->ifname));
474
475         strncpy(buf, dev->ifname, len);
476         buf[len - 1] = '\0';
477
478         return 0;
479 }
480
481 int
482 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
483 {
484         struct virtio_net *dev;
485
486         dev = get_device(vid);
487         if (!dev)
488                 return -1;
489
490         *features = dev->features;
491         return 0;
492 }
493
494 int
495 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
496 {
497         struct virtio_net *dev;
498         struct rte_vhost_memory *m;
499         size_t size;
500
501         dev = get_device(vid);
502         if (!dev)
503                 return -1;
504
505         size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
506         m = malloc(sizeof(struct rte_vhost_memory) + size);
507         if (!m)
508                 return -1;
509
510         m->nregions = dev->mem->nregions;
511         memcpy(m->regions, dev->mem->regions, size);
512         *mem = m;
513
514         return 0;
515 }
516
517 int
518 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
519                           struct rte_vhost_vring *vring)
520 {
521         struct virtio_net *dev;
522         struct vhost_virtqueue *vq;
523
524         dev = get_device(vid);
525         if (!dev)
526                 return -1;
527
528         if (vring_idx >= VHOST_MAX_VRING)
529                 return -1;
530
531         vq = dev->virtqueue[vring_idx];
532         if (!vq)
533                 return -1;
534
535         vring->desc  = vq->desc;
536         vring->avail = vq->avail;
537         vring->used  = vq->used;
538         vring->log_guest_addr  = vq->log_guest_addr;
539
540         vring->callfd  = vq->callfd;
541         vring->kickfd  = vq->kickfd;
542         vring->size    = vq->size;
543
544         return 0;
545 }
546
547 int
548 rte_vhost_vring_call(int vid, uint16_t vring_idx)
549 {
550         struct virtio_net *dev;
551         struct vhost_virtqueue *vq;
552
553         dev = get_device(vid);
554         if (!dev)
555                 return -1;
556
557         if (vring_idx >= VHOST_MAX_VRING)
558                 return -1;
559
560         vq = dev->virtqueue[vring_idx];
561         if (!vq)
562                 return -1;
563
564         vhost_vring_call(dev, vq);
565         return 0;
566 }
567
568 uint16_t
569 rte_vhost_avail_entries(int vid, uint16_t queue_id)
570 {
571         struct virtio_net *dev;
572         struct vhost_virtqueue *vq;
573
574         dev = get_device(vid);
575         if (!dev)
576                 return 0;
577
578         vq = dev->virtqueue[queue_id];
579         if (!vq->enabled)
580                 return 0;
581
582         return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
583 }
584
585 int
586 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
587 {
588         struct virtio_net *dev = get_device(vid);
589
590         if (!dev)
591                 return -1;
592
593         if (enable)
594                 dev->virtqueue[queue_id]->used->flags &=
595                         ~VRING_USED_F_NO_NOTIFY;
596         else
597                 dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY;
598         return 0;
599 }
600
601 void
602 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
603 {
604         struct virtio_net *dev = get_device(vid);
605
606         if (dev == NULL)
607                 return;
608
609         vhost_log_write(dev, addr, len);
610 }
611
612 void
613 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
614                          uint64_t offset, uint64_t len)
615 {
616         struct virtio_net *dev;
617         struct vhost_virtqueue *vq;
618
619         dev = get_device(vid);
620         if (dev == NULL)
621                 return;
622
623         if (vring_idx >= VHOST_MAX_VRING)
624                 return;
625         vq = dev->virtqueue[vring_idx];
626         if (!vq)
627                 return;
628
629         vhost_log_used_vring(dev, vq, offset, len);
630 }
631
632 uint32_t
633 rte_vhost_rx_queue_count(int vid, uint16_t qid)
634 {
635         struct virtio_net *dev;
636         struct vhost_virtqueue *vq;
637
638         dev = get_device(vid);
639         if (dev == NULL)
640                 return 0;
641
642         if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
643                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
644                         dev->vid, __func__, qid);
645                 return 0;
646         }
647
648         vq = dev->virtqueue[qid];
649         if (vq == NULL)
650                 return 0;
651
652         if (unlikely(vq->enabled == 0 || vq->avail == NULL))
653                 return 0;
654
655         return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
656 }
657
658 int rte_vhost_get_vdpa_device_id(int vid)
659 {
660         struct virtio_net *dev = get_device(vid);
661
662         if (dev == NULL)
663                 return -1;
664
665         return dev->vdpa_dev_id;
666 }
667
668 int rte_vhost_get_log_base(int vid, uint64_t *log_base,
669                 uint64_t *log_size)
670 {
671         struct virtio_net *dev = get_device(vid);
672
673         if (!dev)
674                 return -1;
675
676         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
677                 RTE_LOG(ERR, VHOST_DATA,
678                         "(%d) %s: built-in vhost net backend is disabled.\n",
679                         dev->vid, __func__);
680                 return -1;
681         }
682
683         *log_base = dev->log_base;
684         *log_size = dev->log_size;
685
686         return 0;
687 }
688
689 int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
690                 uint16_t *last_avail_idx, uint16_t *last_used_idx)
691 {
692         struct virtio_net *dev = get_device(vid);
693
694         if (!dev)
695                 return -1;
696
697         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
698                 RTE_LOG(ERR, VHOST_DATA,
699                         "(%d) %s: built-in vhost net backend is disabled.\n",
700                         dev->vid, __func__);
701                 return -1;
702         }
703
704         *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
705         *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
706
707         return 0;
708 }
709
710 int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
711                 uint16_t last_avail_idx, uint16_t last_used_idx)
712 {
713         struct virtio_net *dev = get_device(vid);
714
715         if (!dev)
716                 return -1;
717
718         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
719                 RTE_LOG(ERR, VHOST_DATA,
720                         "(%d) %s: built-in vhost net backend is disabled.\n",
721                         dev->vid, __func__);
722                 return -1;
723         }
724
725         dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
726         dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
727
728         return 0;
729 }