vhost: add vring address setup for packed queues
[dpdk.git] / lib / librte_vhost / vhost.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <linux/vhost.h>
6 #include <linux/virtio_net.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <stdlib.h>
10 #ifdef RTE_LIBRTE_VHOST_NUMA
11 #include <numaif.h>
12 #endif
13
14 #include <rte_errno.h>
15 #include <rte_ethdev.h>
16 #include <rte_log.h>
17 #include <rte_string_fns.h>
18 #include <rte_memory.h>
19 #include <rte_malloc.h>
20 #include <rte_vhost.h>
21 #include <rte_rwlock.h>
22
23 #include "iotlb.h"
24 #include "vhost.h"
25 #include "vhost_user.h"
26
27 struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
28
29 /* Called with iotlb_lock read-locked */
30 uint64_t
31 __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
32                     uint64_t iova, uint64_t *size, uint8_t perm)
33 {
34         uint64_t vva, tmp_size;
35
36         if (unlikely(!*size))
37                 return 0;
38
39         tmp_size = *size;
40
41         vva = vhost_user_iotlb_cache_find(vq, iova, &tmp_size, perm);
42         if (tmp_size == *size)
43                 return vva;
44
45         iova += tmp_size;
46
47         if (!vhost_user_iotlb_pending_miss(vq, iova, perm)) {
48                 /*
49                  * iotlb_lock is read-locked for a full burst,
50                  * but it only protects the iotlb cache.
51                  * In case of IOTLB miss, we might block on the socket,
52                  * which could cause a deadlock with QEMU if an IOTLB update
53                  * is being handled. We can safely unlock here to avoid it.
54                  */
55                 vhost_user_iotlb_rd_unlock(vq);
56
57                 vhost_user_iotlb_pending_insert(vq, iova, perm);
58                 if (vhost_user_iotlb_miss(dev, iova, perm)) {
59                         RTE_LOG(ERR, VHOST_CONFIG,
60                                 "IOTLB miss req failed for IOVA 0x%" PRIx64 "\n",
61                                 iova);
62                         vhost_user_iotlb_pending_remove(vq, iova, 1, perm);
63                 }
64
65                 vhost_user_iotlb_rd_lock(vq);
66         }
67
68         return 0;
69 }
70
71 void
72 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
73 {
74         if ((vq->callfd >= 0) && (destroy != 0))
75                 close(vq->callfd);
76         if (vq->kickfd >= 0)
77                 close(vq->kickfd);
78 }
79
80 /*
81  * Unmap any memory, close any file descriptors and
82  * free any memory owned by a device.
83  */
84 void
85 cleanup_device(struct virtio_net *dev, int destroy)
86 {
87         uint32_t i;
88
89         vhost_backend_cleanup(dev);
90
91         for (i = 0; i < dev->nr_vring; i++)
92                 cleanup_vq(dev->virtqueue[i], destroy);
93 }
94
95 void
96 free_vq(struct vhost_virtqueue *vq)
97 {
98         rte_free(vq->shadow_used_ring);
99         rte_free(vq->batch_copy_elems);
100         rte_mempool_free(vq->iotlb_pool);
101         rte_free(vq);
102 }
103
104 /*
105  * Release virtqueues and device memory.
106  */
107 static void
108 free_device(struct virtio_net *dev)
109 {
110         uint32_t i;
111
112         for (i = 0; i < dev->nr_vring; i++)
113                 free_vq(dev->virtqueue[i]);
114
115         rte_free(dev);
116 }
117
118 static int
119 vring_translate_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
120 {
121         uint64_t req_size, size;
122
123         req_size = sizeof(struct vring_desc) * vq->size;
124         size = req_size;
125         vq->desc = (struct vring_desc *)(uintptr_t)vhost_iova_to_vva(dev, vq,
126                                                 vq->ring_addrs.desc_user_addr,
127                                                 &size, VHOST_ACCESS_RW);
128         if (!vq->desc || size != req_size)
129                 return -1;
130
131         req_size = sizeof(struct vring_avail);
132         req_size += sizeof(uint16_t) * vq->size;
133         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
134                 req_size += sizeof(uint16_t);
135         size = req_size;
136         vq->avail = (struct vring_avail *)(uintptr_t)vhost_iova_to_vva(dev, vq,
137                                                 vq->ring_addrs.avail_user_addr,
138                                                 &size, VHOST_ACCESS_RW);
139         if (!vq->avail || size != req_size)
140                 return -1;
141
142         req_size = sizeof(struct vring_used);
143         req_size += sizeof(struct vring_used_elem) * vq->size;
144         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
145                 req_size += sizeof(uint16_t);
146         size = req_size;
147         vq->used = (struct vring_used *)(uintptr_t)vhost_iova_to_vva(dev, vq,
148                                                 vq->ring_addrs.used_user_addr,
149                                                 &size, VHOST_ACCESS_RW);
150         if (!vq->used || size != req_size)
151                 return -1;
152
153         return 0;
154 }
155
156 static int
157 vring_translate_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
158 {
159         uint64_t req_size, size;
160
161         req_size = sizeof(struct vring_packed_desc) * vq->size;
162         size = req_size;
163         vq->desc_packed =
164                 (struct vring_packed_desc *)(uintptr_t)vhost_iova_to_vva(dev,
165                                         vq, vq->ring_addrs.desc_user_addr,
166                                         &size, VHOST_ACCESS_RW);
167         if (!vq->desc_packed || size != req_size)
168                 return -1;
169
170         return 0;
171 }
172
173 int
174 vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq)
175 {
176
177         if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
178                 goto out;
179
180         if (vq_is_packed(dev)) {
181                 if (vring_translate_packed(dev, vq) < 0)
182                         return -1;
183         } else {
184                 if (vring_translate_split(dev, vq) < 0)
185                         return -1;
186         }
187 out:
188         vq->access_ok = 1;
189
190         return 0;
191 }
192
193 void
194 vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq)
195 {
196         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
197                 vhost_user_iotlb_wr_lock(vq);
198
199         vq->access_ok = 0;
200         vq->desc = NULL;
201         vq->avail = NULL;
202         vq->used = NULL;
203
204         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
205                 vhost_user_iotlb_wr_unlock(vq);
206 }
207
208 static void
209 init_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
210 {
211         struct vhost_virtqueue *vq;
212
213         if (vring_idx >= VHOST_MAX_VRING) {
214                 RTE_LOG(ERR, VHOST_CONFIG,
215                                 "Failed not init vring, out of bound (%d)\n",
216                                 vring_idx);
217                 return;
218         }
219
220         vq = dev->virtqueue[vring_idx];
221
222         memset(vq, 0, sizeof(struct vhost_virtqueue));
223
224         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
225         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
226
227         vhost_user_iotlb_init(dev, vring_idx);
228         /* Backends are set to -1 indicating an inactive device. */
229         vq->backend = -1;
230
231         TAILQ_INIT(&vq->zmbuf_list);
232 }
233
234 static void
235 reset_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
236 {
237         struct vhost_virtqueue *vq;
238         int callfd;
239
240         if (vring_idx >= VHOST_MAX_VRING) {
241                 RTE_LOG(ERR, VHOST_CONFIG,
242                                 "Failed not init vring, out of bound (%d)\n",
243                                 vring_idx);
244                 return;
245         }
246
247         vq = dev->virtqueue[vring_idx];
248         callfd = vq->callfd;
249         init_vring_queue(dev, vring_idx);
250         vq->callfd = callfd;
251 }
252
253 int
254 alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx)
255 {
256         struct vhost_virtqueue *vq;
257
258         vq = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
259         if (vq == NULL) {
260                 RTE_LOG(ERR, VHOST_CONFIG,
261                         "Failed to allocate memory for vring:%u.\n", vring_idx);
262                 return -1;
263         }
264
265         dev->virtqueue[vring_idx] = vq;
266         init_vring_queue(dev, vring_idx);
267         rte_spinlock_init(&vq->access_lock);
268         vq->avail_wrap_counter = 1;
269         vq->used_wrap_counter = 1;
270
271         dev->nr_vring += 1;
272
273         return 0;
274 }
275
276 /*
277  * Reset some variables in device structure, while keeping few
278  * others untouched, such as vid, ifname, nr_vring: they
279  * should be same unless the device is removed.
280  */
281 void
282 reset_device(struct virtio_net *dev)
283 {
284         uint32_t i;
285
286         dev->features = 0;
287         dev->protocol_features = 0;
288         dev->flags &= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
289
290         for (i = 0; i < dev->nr_vring; i++)
291                 reset_vring_queue(dev, i);
292 }
293
294 /*
295  * Invoked when there is a new vhost-user connection established (when
296  * there is a new virtio device being attached).
297  */
298 int
299 vhost_new_device(void)
300 {
301         struct virtio_net *dev;
302         int i;
303
304         for (i = 0; i < MAX_VHOST_DEVICE; i++) {
305                 if (vhost_devices[i] == NULL)
306                         break;
307         }
308
309         if (i == MAX_VHOST_DEVICE) {
310                 RTE_LOG(ERR, VHOST_CONFIG,
311                         "Failed to find a free slot for new device.\n");
312                 return -1;
313         }
314
315         dev = rte_zmalloc(NULL, sizeof(struct virtio_net), 0);
316         if (dev == NULL) {
317                 RTE_LOG(ERR, VHOST_CONFIG,
318                         "Failed to allocate memory for new dev.\n");
319                 return -1;
320         }
321
322         vhost_devices[i] = dev;
323         dev->vid = i;
324         dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
325         dev->slave_req_fd = -1;
326         dev->vdpa_dev_id = -1;
327         rte_spinlock_init(&dev->slave_req_lock);
328
329         return i;
330 }
331
332 void
333 vhost_destroy_device_notify(struct virtio_net *dev)
334 {
335         struct rte_vdpa_device *vdpa_dev;
336         int did;
337
338         if (dev->flags & VIRTIO_DEV_RUNNING) {
339                 did = dev->vdpa_dev_id;
340                 vdpa_dev = rte_vdpa_get_device(did);
341                 if (vdpa_dev && vdpa_dev->ops->dev_close)
342                         vdpa_dev->ops->dev_close(dev->vid);
343                 dev->flags &= ~VIRTIO_DEV_RUNNING;
344                 dev->notify_ops->destroy_device(dev->vid);
345         }
346 }
347
348 /*
349  * Invoked when there is the vhost-user connection is broken (when
350  * the virtio device is being detached).
351  */
352 void
353 vhost_destroy_device(int vid)
354 {
355         struct virtio_net *dev = get_device(vid);
356
357         if (dev == NULL)
358                 return;
359
360         vhost_destroy_device_notify(dev);
361
362         cleanup_device(dev, 1);
363         free_device(dev);
364
365         vhost_devices[vid] = NULL;
366 }
367
368 void
369 vhost_attach_vdpa_device(int vid, int did)
370 {
371         struct virtio_net *dev = get_device(vid);
372
373         if (dev == NULL)
374                 return;
375
376         if (rte_vdpa_get_device(did) == NULL)
377                 return;
378
379         dev->vdpa_dev_id = did;
380 }
381
382 void
383 vhost_detach_vdpa_device(int vid)
384 {
385         struct virtio_net *dev = get_device(vid);
386
387         if (dev == NULL)
388                 return;
389
390         vhost_user_host_notifier_ctrl(vid, false);
391
392         dev->vdpa_dev_id = -1;
393 }
394
395 void
396 vhost_set_ifname(int vid, const char *if_name, unsigned int if_len)
397 {
398         struct virtio_net *dev;
399         unsigned int len;
400
401         dev = get_device(vid);
402         if (dev == NULL)
403                 return;
404
405         len = if_len > sizeof(dev->ifname) ?
406                 sizeof(dev->ifname) : if_len;
407
408         strncpy(dev->ifname, if_name, len);
409         dev->ifname[sizeof(dev->ifname) - 1] = '\0';
410 }
411
412 void
413 vhost_enable_dequeue_zero_copy(int vid)
414 {
415         struct virtio_net *dev = get_device(vid);
416
417         if (dev == NULL)
418                 return;
419
420         dev->dequeue_zero_copy = 1;
421 }
422
423 void
424 vhost_set_builtin_virtio_net(int vid, bool enable)
425 {
426         struct virtio_net *dev = get_device(vid);
427
428         if (dev == NULL)
429                 return;
430
431         if (enable)
432                 dev->flags |= VIRTIO_DEV_BUILTIN_VIRTIO_NET;
433         else
434                 dev->flags &= ~VIRTIO_DEV_BUILTIN_VIRTIO_NET;
435 }
436
437 int
438 rte_vhost_get_mtu(int vid, uint16_t *mtu)
439 {
440         struct virtio_net *dev = get_device(vid);
441
442         if (!dev)
443                 return -ENODEV;
444
445         if (!(dev->flags & VIRTIO_DEV_READY))
446                 return -EAGAIN;
447
448         if (!(dev->features & (1ULL << VIRTIO_NET_F_MTU)))
449                 return -ENOTSUP;
450
451         *mtu = dev->mtu;
452
453         return 0;
454 }
455
456 int
457 rte_vhost_get_numa_node(int vid)
458 {
459 #ifdef RTE_LIBRTE_VHOST_NUMA
460         struct virtio_net *dev = get_device(vid);
461         int numa_node;
462         int ret;
463
464         if (dev == NULL)
465                 return -1;
466
467         ret = get_mempolicy(&numa_node, NULL, 0, dev,
468                             MPOL_F_NODE | MPOL_F_ADDR);
469         if (ret < 0) {
470                 RTE_LOG(ERR, VHOST_CONFIG,
471                         "(%d) failed to query numa node: %s\n",
472                         vid, rte_strerror(errno));
473                 return -1;
474         }
475
476         return numa_node;
477 #else
478         RTE_SET_USED(vid);
479         return -1;
480 #endif
481 }
482
483 uint32_t
484 rte_vhost_get_queue_num(int vid)
485 {
486         struct virtio_net *dev = get_device(vid);
487
488         if (dev == NULL)
489                 return 0;
490
491         return dev->nr_vring / 2;
492 }
493
494 uint16_t
495 rte_vhost_get_vring_num(int vid)
496 {
497         struct virtio_net *dev = get_device(vid);
498
499         if (dev == NULL)
500                 return 0;
501
502         return dev->nr_vring;
503 }
504
505 int
506 rte_vhost_get_ifname(int vid, char *buf, size_t len)
507 {
508         struct virtio_net *dev = get_device(vid);
509
510         if (dev == NULL)
511                 return -1;
512
513         len = RTE_MIN(len, sizeof(dev->ifname));
514
515         strncpy(buf, dev->ifname, len);
516         buf[len - 1] = '\0';
517
518         return 0;
519 }
520
521 int
522 rte_vhost_get_negotiated_features(int vid, uint64_t *features)
523 {
524         struct virtio_net *dev;
525
526         dev = get_device(vid);
527         if (!dev)
528                 return -1;
529
530         *features = dev->features;
531         return 0;
532 }
533
534 int
535 rte_vhost_get_mem_table(int vid, struct rte_vhost_memory **mem)
536 {
537         struct virtio_net *dev;
538         struct rte_vhost_memory *m;
539         size_t size;
540
541         dev = get_device(vid);
542         if (!dev)
543                 return -1;
544
545         size = dev->mem->nregions * sizeof(struct rte_vhost_mem_region);
546         m = malloc(sizeof(struct rte_vhost_memory) + size);
547         if (!m)
548                 return -1;
549
550         m->nregions = dev->mem->nregions;
551         memcpy(m->regions, dev->mem->regions, size);
552         *mem = m;
553
554         return 0;
555 }
556
557 int
558 rte_vhost_get_vhost_vring(int vid, uint16_t vring_idx,
559                           struct rte_vhost_vring *vring)
560 {
561         struct virtio_net *dev;
562         struct vhost_virtqueue *vq;
563
564         dev = get_device(vid);
565         if (!dev)
566                 return -1;
567
568         if (vring_idx >= VHOST_MAX_VRING)
569                 return -1;
570
571         vq = dev->virtqueue[vring_idx];
572         if (!vq)
573                 return -1;
574
575         vring->desc  = vq->desc;
576         vring->avail = vq->avail;
577         vring->used  = vq->used;
578         vring->log_guest_addr  = vq->log_guest_addr;
579
580         vring->callfd  = vq->callfd;
581         vring->kickfd  = vq->kickfd;
582         vring->size    = vq->size;
583
584         return 0;
585 }
586
587 int
588 rte_vhost_vring_call(int vid, uint16_t vring_idx)
589 {
590         struct virtio_net *dev;
591         struct vhost_virtqueue *vq;
592
593         dev = get_device(vid);
594         if (!dev)
595                 return -1;
596
597         if (vring_idx >= VHOST_MAX_VRING)
598                 return -1;
599
600         vq = dev->virtqueue[vring_idx];
601         if (!vq)
602                 return -1;
603
604         vhost_vring_call(dev, vq);
605         return 0;
606 }
607
608 uint16_t
609 rte_vhost_avail_entries(int vid, uint16_t queue_id)
610 {
611         struct virtio_net *dev;
612         struct vhost_virtqueue *vq;
613
614         dev = get_device(vid);
615         if (!dev)
616                 return 0;
617
618         vq = dev->virtqueue[queue_id];
619         if (!vq->enabled)
620                 return 0;
621
622         return *(volatile uint16_t *)&vq->avail->idx - vq->last_used_idx;
623 }
624
625 int
626 rte_vhost_enable_guest_notification(int vid, uint16_t queue_id, int enable)
627 {
628         struct virtio_net *dev = get_device(vid);
629
630         if (!dev)
631                 return -1;
632
633         if (enable)
634                 dev->virtqueue[queue_id]->used->flags &=
635                         ~VRING_USED_F_NO_NOTIFY;
636         else
637                 dev->virtqueue[queue_id]->used->flags |= VRING_USED_F_NO_NOTIFY;
638         return 0;
639 }
640
641 void
642 rte_vhost_log_write(int vid, uint64_t addr, uint64_t len)
643 {
644         struct virtio_net *dev = get_device(vid);
645
646         if (dev == NULL)
647                 return;
648
649         vhost_log_write(dev, addr, len);
650 }
651
652 void
653 rte_vhost_log_used_vring(int vid, uint16_t vring_idx,
654                          uint64_t offset, uint64_t len)
655 {
656         struct virtio_net *dev;
657         struct vhost_virtqueue *vq;
658
659         dev = get_device(vid);
660         if (dev == NULL)
661                 return;
662
663         if (vring_idx >= VHOST_MAX_VRING)
664                 return;
665         vq = dev->virtqueue[vring_idx];
666         if (!vq)
667                 return;
668
669         vhost_log_used_vring(dev, vq, offset, len);
670 }
671
672 uint32_t
673 rte_vhost_rx_queue_count(int vid, uint16_t qid)
674 {
675         struct virtio_net *dev;
676         struct vhost_virtqueue *vq;
677
678         dev = get_device(vid);
679         if (dev == NULL)
680                 return 0;
681
682         if (unlikely(qid >= dev->nr_vring || (qid & 1) == 0)) {
683                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
684                         dev->vid, __func__, qid);
685                 return 0;
686         }
687
688         vq = dev->virtqueue[qid];
689         if (vq == NULL)
690                 return 0;
691
692         if (unlikely(vq->enabled == 0 || vq->avail == NULL))
693                 return 0;
694
695         return *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx;
696 }
697
698 int rte_vhost_get_vdpa_device_id(int vid)
699 {
700         struct virtio_net *dev = get_device(vid);
701
702         if (dev == NULL)
703                 return -1;
704
705         return dev->vdpa_dev_id;
706 }
707
708 int rte_vhost_get_log_base(int vid, uint64_t *log_base,
709                 uint64_t *log_size)
710 {
711         struct virtio_net *dev = get_device(vid);
712
713         if (!dev)
714                 return -1;
715
716         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
717                 RTE_LOG(ERR, VHOST_DATA,
718                         "(%d) %s: built-in vhost net backend is disabled.\n",
719                         dev->vid, __func__);
720                 return -1;
721         }
722
723         *log_base = dev->log_base;
724         *log_size = dev->log_size;
725
726         return 0;
727 }
728
729 int rte_vhost_get_vring_base(int vid, uint16_t queue_id,
730                 uint16_t *last_avail_idx, uint16_t *last_used_idx)
731 {
732         struct virtio_net *dev = get_device(vid);
733
734         if (!dev)
735                 return -1;
736
737         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
738                 RTE_LOG(ERR, VHOST_DATA,
739                         "(%d) %s: built-in vhost net backend is disabled.\n",
740                         dev->vid, __func__);
741                 return -1;
742         }
743
744         *last_avail_idx = dev->virtqueue[queue_id]->last_avail_idx;
745         *last_used_idx = dev->virtqueue[queue_id]->last_used_idx;
746
747         return 0;
748 }
749
750 int rte_vhost_set_vring_base(int vid, uint16_t queue_id,
751                 uint16_t last_avail_idx, uint16_t last_used_idx)
752 {
753         struct virtio_net *dev = get_device(vid);
754
755         if (!dev)
756                 return -1;
757
758         if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
759                 RTE_LOG(ERR, VHOST_DATA,
760                         "(%d) %s: built-in vhost net backend is disabled.\n",
761                         dev->vid, __func__);
762                 return -1;
763         }
764
765         dev->virtqueue[queue_id]->last_avail_idx = last_avail_idx;
766         dev->virtqueue[queue_id]->last_used_idx = last_used_idx;
767
768         return 0;
769 }