vhost: validate untrusted memory regions number field
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70 };
71
72 static uint64_t
73 get_blk_size(int fd)
74 {
75         struct stat stat;
76         int ret;
77
78         ret = fstat(fd, &stat);
79         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
80 }
81
82 static void
83 free_mem_region(struct virtio_net *dev)
84 {
85         uint32_t i;
86         struct rte_vhost_mem_region *reg;
87
88         if (!dev || !dev->mem)
89                 return;
90
91         for (i = 0; i < dev->mem->nregions; i++) {
92                 reg = &dev->mem->regions[i];
93                 if (reg->host_user_addr) {
94                         munmap(reg->mmap_addr, reg->mmap_size);
95                         close(reg->fd);
96                 }
97         }
98 }
99
100 void
101 vhost_backend_cleanup(struct virtio_net *dev)
102 {
103         if (dev->mem) {
104                 free_mem_region(dev);
105                 rte_free(dev->mem);
106                 dev->mem = NULL;
107         }
108
109         free(dev->guest_pages);
110         dev->guest_pages = NULL;
111
112         if (dev->log_addr) {
113                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
114                 dev->log_addr = 0;
115         }
116
117         if (dev->slave_req_fd >= 0) {
118                 close(dev->slave_req_fd);
119                 dev->slave_req_fd = -1;
120         }
121 }
122
123 /*
124  * This function just returns success at the moment unless
125  * the device hasn't been initialised.
126  */
127 static int
128 vhost_user_set_owner(void)
129 {
130         return 0;
131 }
132
133 static int
134 vhost_user_reset_owner(struct virtio_net *dev)
135 {
136         if (dev->flags & VIRTIO_DEV_RUNNING) {
137                 dev->flags &= ~VIRTIO_DEV_RUNNING;
138                 dev->notify_ops->destroy_device(dev->vid);
139         }
140
141         cleanup_device(dev, 0);
142         reset_device(dev);
143         return 0;
144 }
145
146 /*
147  * The features that we support are requested.
148  */
149 static uint64_t
150 vhost_user_get_features(struct virtio_net *dev)
151 {
152         uint64_t features = 0;
153
154         rte_vhost_driver_get_features(dev->ifname, &features);
155         return features;
156 }
157
158 /*
159  * We receive the negotiated features supported by us and the virtio device.
160  */
161 static int
162 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
163 {
164         uint64_t vhost_features = 0;
165
166         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
167         if (features & ~vhost_features) {
168                 RTE_LOG(ERR, VHOST_CONFIG,
169                         "(%d) received invalid negotiated features.\n",
170                         dev->vid);
171                 return -1;
172         }
173
174         if (dev->flags & VIRTIO_DEV_RUNNING) {
175                 if (dev->features == features)
176                         return 0;
177
178                 /*
179                  * Error out if master tries to change features while device is
180                  * in running state. The exception being VHOST_F_LOG_ALL, which
181                  * is enabled when the live-migration starts.
182                  */
183                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
184                         RTE_LOG(ERR, VHOST_CONFIG,
185                                 "(%d) features changed while device is running.\n",
186                                 dev->vid);
187                         return -1;
188                 }
189
190                 if (dev->notify_ops->features_changed)
191                         dev->notify_ops->features_changed(dev->vid, features);
192         }
193
194         dev->features = features;
195         if (dev->features &
196                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
197                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
198         } else {
199                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
200         }
201         LOG_DEBUG(VHOST_CONFIG,
202                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
203                 dev->vid,
204                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
205                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
206
207         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
208             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
209                 /*
210                  * Remove all but first queue pair if MQ hasn't been
211                  * negotiated. This is safe because the device is not
212                  * running at this stage.
213                  */
214                 while (dev->nr_vring > 2) {
215                         struct vhost_virtqueue *vq;
216
217                         vq = dev->virtqueue[--dev->nr_vring];
218                         if (!vq)
219                                 continue;
220
221                         dev->virtqueue[dev->nr_vring] = NULL;
222                         cleanup_vq(vq, 1);
223                         free_vq(vq);
224                 }
225         }
226
227         return 0;
228 }
229
230 /*
231  * The virtio device sends us the size of the descriptor ring.
232  */
233 static int
234 vhost_user_set_vring_num(struct virtio_net *dev,
235                          VhostUserMsg *msg)
236 {
237         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
238
239         vq->size = msg->payload.state.num;
240
241         if (dev->dequeue_zero_copy) {
242                 vq->nr_zmbuf = 0;
243                 vq->last_zmbuf_idx = 0;
244                 vq->zmbuf_size = vq->size;
245                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
246                                          sizeof(struct zcopy_mbuf), 0);
247                 if (vq->zmbufs == NULL) {
248                         RTE_LOG(WARNING, VHOST_CONFIG,
249                                 "failed to allocate mem for zero copy; "
250                                 "zero copy is force disabled\n");
251                         dev->dequeue_zero_copy = 0;
252                 }
253                 TAILQ_INIT(&vq->zmbuf_list);
254         }
255
256         vq->shadow_used_ring = rte_malloc(NULL,
257                                 vq->size * sizeof(struct vring_used_elem),
258                                 RTE_CACHE_LINE_SIZE);
259         if (!vq->shadow_used_ring) {
260                 RTE_LOG(ERR, VHOST_CONFIG,
261                         "failed to allocate memory for shadow used ring.\n");
262                 return -1;
263         }
264
265         vq->batch_copy_elems = rte_malloc(NULL,
266                                 vq->size * sizeof(struct batch_copy_elem),
267                                 RTE_CACHE_LINE_SIZE);
268         if (!vq->batch_copy_elems) {
269                 RTE_LOG(ERR, VHOST_CONFIG,
270                         "failed to allocate memory for batching copy.\n");
271                 return -1;
272         }
273
274         return 0;
275 }
276
277 /*
278  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
279  * same numa node as the memory of vring descriptor.
280  */
281 #ifdef RTE_LIBRTE_VHOST_NUMA
282 static struct virtio_net*
283 numa_realloc(struct virtio_net *dev, int index)
284 {
285         int oldnode, newnode;
286         struct virtio_net *old_dev;
287         struct vhost_virtqueue *old_vq, *vq;
288         struct zcopy_mbuf *new_zmbuf;
289         struct vring_used_elem *new_shadow_used_ring;
290         struct batch_copy_elem *new_batch_copy_elems;
291         int ret;
292
293         old_dev = dev;
294         vq = old_vq = dev->virtqueue[index];
295
296         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
297                             MPOL_F_NODE | MPOL_F_ADDR);
298
299         /* check if we need to reallocate vq */
300         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
301                              MPOL_F_NODE | MPOL_F_ADDR);
302         if (ret) {
303                 RTE_LOG(ERR, VHOST_CONFIG,
304                         "Unable to get vq numa information.\n");
305                 return dev;
306         }
307         if (oldnode != newnode) {
308                 RTE_LOG(INFO, VHOST_CONFIG,
309                         "reallocate vq from %d to %d node\n", oldnode, newnode);
310                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
311                 if (!vq)
312                         return dev;
313
314                 memcpy(vq, old_vq, sizeof(*vq));
315                 TAILQ_INIT(&vq->zmbuf_list);
316
317                 new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
318                         sizeof(struct zcopy_mbuf), 0, newnode);
319                 if (new_zmbuf) {
320                         rte_free(vq->zmbufs);
321                         vq->zmbufs = new_zmbuf;
322                 }
323
324                 new_shadow_used_ring = rte_malloc_socket(NULL,
325                         vq->size * sizeof(struct vring_used_elem),
326                         RTE_CACHE_LINE_SIZE,
327                         newnode);
328                 if (new_shadow_used_ring) {
329                         rte_free(vq->shadow_used_ring);
330                         vq->shadow_used_ring = new_shadow_used_ring;
331                 }
332
333                 new_batch_copy_elems = rte_malloc_socket(NULL,
334                         vq->size * sizeof(struct batch_copy_elem),
335                         RTE_CACHE_LINE_SIZE,
336                         newnode);
337                 if (new_batch_copy_elems) {
338                         rte_free(vq->batch_copy_elems);
339                         vq->batch_copy_elems = new_batch_copy_elems;
340                 }
341
342                 rte_free(old_vq);
343         }
344
345         /* check if we need to reallocate dev */
346         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
347                             MPOL_F_NODE | MPOL_F_ADDR);
348         if (ret) {
349                 RTE_LOG(ERR, VHOST_CONFIG,
350                         "Unable to get dev numa information.\n");
351                 goto out;
352         }
353         if (oldnode != newnode) {
354                 RTE_LOG(INFO, VHOST_CONFIG,
355                         "reallocate dev from %d to %d node\n",
356                         oldnode, newnode);
357                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
358                 if (!dev) {
359                         dev = old_dev;
360                         goto out;
361                 }
362
363                 memcpy(dev, old_dev, sizeof(*dev));
364                 rte_free(old_dev);
365         }
366
367 out:
368         dev->virtqueue[index] = vq;
369         vhost_devices[dev->vid] = dev;
370
371         if (old_vq != vq)
372                 vhost_user_iotlb_init(dev, index);
373
374         return dev;
375 }
376 #else
377 static struct virtio_net*
378 numa_realloc(struct virtio_net *dev, int index __rte_unused)
379 {
380         return dev;
381 }
382 #endif
383
384 /* Converts QEMU virtual address to Vhost virtual address. */
385 static uint64_t
386 qva_to_vva(struct virtio_net *dev, uint64_t qva)
387 {
388         struct rte_vhost_mem_region *reg;
389         uint32_t i;
390
391         /* Find the region where the address lives. */
392         for (i = 0; i < dev->mem->nregions; i++) {
393                 reg = &dev->mem->regions[i];
394
395                 if (qva >= reg->guest_user_addr &&
396                     qva <  reg->guest_user_addr + reg->size) {
397                         return qva - reg->guest_user_addr +
398                                reg->host_user_addr;
399                 }
400         }
401
402         return 0;
403 }
404
405
406 /*
407  * Converts ring address to Vhost virtual address.
408  * If IOMMU is enabled, the ring address is a guest IO virtual address,
409  * else it is a QEMU virtual address.
410  */
411 static uint64_t
412 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
413                 uint64_t ra, uint64_t size)
414 {
415         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
416                 uint64_t vva;
417
418                 vva = vhost_user_iotlb_cache_find(vq, ra,
419                                         &size, VHOST_ACCESS_RW);
420                 if (!vva)
421                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
422
423                 return vva;
424         }
425
426         return qva_to_vva(dev, ra);
427 }
428
429 static struct virtio_net *
430 translate_ring_addresses(struct virtio_net *dev, int vq_index)
431 {
432         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
433         struct vhost_vring_addr *addr = &vq->ring_addrs;
434
435         /* The addresses are converted from QEMU virtual to Vhost virtual. */
436         if (vq->desc && vq->avail && vq->used)
437                 return dev;
438
439         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
440                         vq, addr->desc_user_addr, sizeof(struct vring_desc));
441         if (vq->desc == 0) {
442                 RTE_LOG(DEBUG, VHOST_CONFIG,
443                         "(%d) failed to find desc ring address.\n",
444                         dev->vid);
445                 return dev;
446         }
447
448         dev = numa_realloc(dev, vq_index);
449         vq = dev->virtqueue[vq_index];
450         addr = &vq->ring_addrs;
451
452         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
453                         vq, addr->avail_user_addr, sizeof(struct vring_avail));
454         if (vq->avail == 0) {
455                 RTE_LOG(DEBUG, VHOST_CONFIG,
456                         "(%d) failed to find avail ring address.\n",
457                         dev->vid);
458                 return dev;
459         }
460
461         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
462                         vq, addr->used_user_addr, sizeof(struct vring_used));
463         if (vq->used == 0) {
464                 RTE_LOG(DEBUG, VHOST_CONFIG,
465                         "(%d) failed to find used ring address.\n",
466                         dev->vid);
467                 return dev;
468         }
469
470         if (vq->last_used_idx != vq->used->idx) {
471                 RTE_LOG(WARNING, VHOST_CONFIG,
472                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
473                         "some packets maybe resent for Tx and dropped for Rx\n",
474                         vq->last_used_idx, vq->used->idx);
475                 vq->last_used_idx  = vq->used->idx;
476                 vq->last_avail_idx = vq->used->idx;
477         }
478
479         vq->log_guest_addr = addr->log_guest_addr;
480
481         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
482                         dev->vid, vq->desc);
483         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
484                         dev->vid, vq->avail);
485         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
486                         dev->vid, vq->used);
487         LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
488                         dev->vid, vq->log_guest_addr);
489
490         return dev;
491 }
492
493 /*
494  * The virtio device sends us the desc, used and avail ring addresses.
495  * This function then converts these to our address space.
496  */
497 static int
498 vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
499 {
500         struct vhost_virtqueue *vq;
501         struct vhost_vring_addr *addr = &msg->payload.addr;
502         struct virtio_net *dev = *pdev;
503
504         if (dev->mem == NULL)
505                 return -1;
506
507         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
508         vq = dev->virtqueue[msg->payload.addr.index];
509
510         /*
511          * Rings addresses should not be interpreted as long as the ring is not
512          * started and enabled
513          */
514         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
515
516         vring_invalidate(dev, vq);
517
518         if (vq->enabled && (dev->features &
519                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
520                 dev = translate_ring_addresses(dev, msg->payload.state.index);
521                 if (!dev)
522                         return -1;
523
524                 *pdev = dev;
525         }
526
527         return 0;
528 }
529
530 /*
531  * The virtio device sends us the available ring last used index.
532  */
533 static int
534 vhost_user_set_vring_base(struct virtio_net *dev,
535                           VhostUserMsg *msg)
536 {
537         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
538                         msg->payload.state.num;
539         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
540                         msg->payload.state.num;
541
542         return 0;
543 }
544
545 static void
546 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
547                    uint64_t host_phys_addr, uint64_t size)
548 {
549         struct guest_page *page, *last_page;
550
551         if (dev->nr_guest_pages == dev->max_guest_pages) {
552                 dev->max_guest_pages *= 2;
553                 dev->guest_pages = realloc(dev->guest_pages,
554                                         dev->max_guest_pages * sizeof(*page));
555         }
556
557         if (dev->nr_guest_pages > 0) {
558                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
559                 /* merge if the two pages are continuous */
560                 if (host_phys_addr == last_page->host_phys_addr +
561                                       last_page->size) {
562                         last_page->size += size;
563                         return;
564                 }
565         }
566
567         page = &dev->guest_pages[dev->nr_guest_pages++];
568         page->guest_phys_addr = guest_phys_addr;
569         page->host_phys_addr  = host_phys_addr;
570         page->size = size;
571 }
572
573 static void
574 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
575                 uint64_t page_size)
576 {
577         uint64_t reg_size = reg->size;
578         uint64_t host_user_addr  = reg->host_user_addr;
579         uint64_t guest_phys_addr = reg->guest_phys_addr;
580         uint64_t host_phys_addr;
581         uint64_t size;
582
583         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
584         size = page_size - (guest_phys_addr & (page_size - 1));
585         size = RTE_MIN(size, reg_size);
586
587         add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
588         host_user_addr  += size;
589         guest_phys_addr += size;
590         reg_size -= size;
591
592         while (reg_size > 0) {
593                 size = RTE_MIN(reg_size, page_size);
594                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
595                                                   host_user_addr);
596                 add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
597
598                 host_user_addr  += size;
599                 guest_phys_addr += size;
600                 reg_size -= size;
601         }
602 }
603
604 #ifdef RTE_LIBRTE_VHOST_DEBUG
605 /* TODO: enable it only in debug mode? */
606 static void
607 dump_guest_pages(struct virtio_net *dev)
608 {
609         uint32_t i;
610         struct guest_page *page;
611
612         for (i = 0; i < dev->nr_guest_pages; i++) {
613                 page = &dev->guest_pages[i];
614
615                 RTE_LOG(INFO, VHOST_CONFIG,
616                         "guest physical page region %u\n"
617                         "\t guest_phys_addr: %" PRIx64 "\n"
618                         "\t host_phys_addr : %" PRIx64 "\n"
619                         "\t size           : %" PRIx64 "\n",
620                         i,
621                         page->guest_phys_addr,
622                         page->host_phys_addr,
623                         page->size);
624         }
625 }
626 #else
627 #define dump_guest_pages(dev)
628 #endif
629
630 static bool
631 vhost_memory_changed(struct VhostUserMemory *new,
632                      struct rte_vhost_memory *old)
633 {
634         uint32_t i;
635
636         if (new->nregions != old->nregions)
637                 return true;
638
639         for (i = 0; i < new->nregions; ++i) {
640                 VhostUserMemoryRegion *new_r = &new->regions[i];
641                 struct rte_vhost_mem_region *old_r = &old->regions[i];
642
643                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
644                         return true;
645                 if (new_r->memory_size != old_r->size)
646                         return true;
647                 if (new_r->userspace_addr != old_r->guest_user_addr)
648                         return true;
649         }
650
651         return false;
652 }
653
654 static int
655 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
656 {
657         struct VhostUserMemory memory = pmsg->payload.memory;
658         struct rte_vhost_mem_region *reg;
659         void *mmap_addr;
660         uint64_t mmap_size;
661         uint64_t mmap_offset;
662         uint64_t alignment;
663         uint32_t i;
664         int fd;
665
666         if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
667                 RTE_LOG(ERR, VHOST_CONFIG,
668                         "too many memory regions (%u)\n", memory.nregions);
669                 return -1;
670         }
671
672         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
673                 RTE_LOG(INFO, VHOST_CONFIG,
674                         "(%d) memory regions not changed\n", dev->vid);
675
676                 for (i = 0; i < memory.nregions; i++)
677                         close(pmsg->fds[i]);
678
679                 return 0;
680         }
681
682         if (dev->mem) {
683                 free_mem_region(dev);
684                 rte_free(dev->mem);
685                 dev->mem = NULL;
686         }
687
688         dev->nr_guest_pages = 0;
689         if (!dev->guest_pages) {
690                 dev->max_guest_pages = 8;
691                 dev->guest_pages = malloc(dev->max_guest_pages *
692                                                 sizeof(struct guest_page));
693                 if (dev->guest_pages == NULL) {
694                         RTE_LOG(ERR, VHOST_CONFIG,
695                                 "(%d) failed to allocate memory "
696                                 "for dev->guest_pages\n",
697                                 dev->vid);
698                         return -1;
699                 }
700         }
701
702         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
703                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
704         if (dev->mem == NULL) {
705                 RTE_LOG(ERR, VHOST_CONFIG,
706                         "(%d) failed to allocate memory for dev->mem\n",
707                         dev->vid);
708                 return -1;
709         }
710         dev->mem->nregions = memory.nregions;
711
712         for (i = 0; i < memory.nregions; i++) {
713                 fd  = pmsg->fds[i];
714                 reg = &dev->mem->regions[i];
715
716                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
717                 reg->guest_user_addr = memory.regions[i].userspace_addr;
718                 reg->size            = memory.regions[i].memory_size;
719                 reg->fd              = fd;
720
721                 mmap_offset = memory.regions[i].mmap_offset;
722                 mmap_size   = reg->size + mmap_offset;
723
724                 /* mmap() without flag of MAP_ANONYMOUS, should be called
725                  * with length argument aligned with hugepagesz at older
726                  * longterm version Linux, like 2.6.32 and 3.2.72, or
727                  * mmap() will fail with EINVAL.
728                  *
729                  * to avoid failure, make sure in caller to keep length
730                  * aligned.
731                  */
732                 alignment = get_blk_size(fd);
733                 if (alignment == (uint64_t)-1) {
734                         RTE_LOG(ERR, VHOST_CONFIG,
735                                 "couldn't get hugepage size through fstat\n");
736                         goto err_mmap;
737                 }
738                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
739
740                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
741                                  MAP_SHARED | MAP_POPULATE, fd, 0);
742
743                 if (mmap_addr == MAP_FAILED) {
744                         RTE_LOG(ERR, VHOST_CONFIG,
745                                 "mmap region %u failed.\n", i);
746                         goto err_mmap;
747                 }
748
749                 reg->mmap_addr = mmap_addr;
750                 reg->mmap_size = mmap_size;
751                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
752                                       mmap_offset;
753
754                 if (dev->dequeue_zero_copy)
755                         add_guest_pages(dev, reg, alignment);
756
757                 RTE_LOG(INFO, VHOST_CONFIG,
758                         "guest memory region %u, size: 0x%" PRIx64 "\n"
759                         "\t guest physical addr: 0x%" PRIx64 "\n"
760                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
761                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
762                         "\t mmap addr : 0x%" PRIx64 "\n"
763                         "\t mmap size : 0x%" PRIx64 "\n"
764                         "\t mmap align: 0x%" PRIx64 "\n"
765                         "\t mmap off  : 0x%" PRIx64 "\n",
766                         i, reg->size,
767                         reg->guest_phys_addr,
768                         reg->guest_user_addr,
769                         reg->host_user_addr,
770                         (uint64_t)(uintptr_t)mmap_addr,
771                         mmap_size,
772                         alignment,
773                         mmap_offset);
774         }
775
776         dump_guest_pages(dev);
777
778         return 0;
779
780 err_mmap:
781         free_mem_region(dev);
782         rte_free(dev->mem);
783         dev->mem = NULL;
784         return -1;
785 }
786
787 static int
788 vq_is_ready(struct vhost_virtqueue *vq)
789 {
790         return vq && vq->desc && vq->avail && vq->used &&
791                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
792                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
793 }
794
795 static int
796 virtio_is_ready(struct virtio_net *dev)
797 {
798         struct vhost_virtqueue *vq;
799         uint32_t i;
800
801         if (dev->nr_vring == 0)
802                 return 0;
803
804         for (i = 0; i < dev->nr_vring; i++) {
805                 vq = dev->virtqueue[i];
806
807                 if (!vq_is_ready(vq))
808                         return 0;
809         }
810
811         RTE_LOG(INFO, VHOST_CONFIG,
812                 "virtio is now ready for processing.\n");
813         return 1;
814 }
815
816 static void
817 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
818 {
819         struct vhost_vring_file file;
820         struct vhost_virtqueue *vq;
821
822         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
823         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
824                 file.fd = VIRTIO_INVALID_EVENTFD;
825         else
826                 file.fd = pmsg->fds[0];
827         RTE_LOG(INFO, VHOST_CONFIG,
828                 "vring call idx:%d file:%d\n", file.index, file.fd);
829
830         vq = dev->virtqueue[file.index];
831         if (vq->callfd >= 0)
832                 close(vq->callfd);
833
834         vq->callfd = file.fd;
835 }
836
837 static void
838 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
839 {
840         struct vhost_vring_file file;
841         struct vhost_virtqueue *vq;
842         struct virtio_net *dev = *pdev;
843
844         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
845         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
846                 file.fd = VIRTIO_INVALID_EVENTFD;
847         else
848                 file.fd = pmsg->fds[0];
849         RTE_LOG(INFO, VHOST_CONFIG,
850                 "vring kick idx:%d file:%d\n", file.index, file.fd);
851
852         /* Interpret ring addresses only when ring is started. */
853         dev = translate_ring_addresses(dev, file.index);
854         if (!dev)
855                 return;
856
857         *pdev = dev;
858
859         vq = dev->virtqueue[file.index];
860
861         /*
862          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
863          * the ring starts already enabled. Otherwise, it is enabled via
864          * the SET_VRING_ENABLE message.
865          */
866         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
867                 vq->enabled = 1;
868
869         if (vq->kickfd >= 0)
870                 close(vq->kickfd);
871         vq->kickfd = file.fd;
872 }
873
874 static void
875 free_zmbufs(struct vhost_virtqueue *vq)
876 {
877         struct zcopy_mbuf *zmbuf, *next;
878
879         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
880              zmbuf != NULL; zmbuf = next) {
881                 next = TAILQ_NEXT(zmbuf, next);
882
883                 rte_pktmbuf_free(zmbuf->mbuf);
884                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
885         }
886
887         rte_free(vq->zmbufs);
888 }
889
890 /*
891  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
892  */
893 static int
894 vhost_user_get_vring_base(struct virtio_net *dev,
895                           VhostUserMsg *msg)
896 {
897         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
898
899         /* We have to stop the queue (virtio) if it is running. */
900         if (dev->flags & VIRTIO_DEV_RUNNING) {
901                 dev->flags &= ~VIRTIO_DEV_RUNNING;
902                 dev->notify_ops->destroy_device(dev->vid);
903         }
904
905         dev->flags &= ~VIRTIO_DEV_READY;
906
907         /* Here we are safe to get the last used index */
908         msg->payload.state.num = vq->last_used_idx;
909
910         RTE_LOG(INFO, VHOST_CONFIG,
911                 "vring base idx:%d file:%d\n", msg->payload.state.index,
912                 msg->payload.state.num);
913         /*
914          * Based on current qemu vhost-user implementation, this message is
915          * sent and only sent in vhost_vring_stop.
916          * TODO: cleanup the vring, it isn't usable since here.
917          */
918         if (vq->kickfd >= 0)
919                 close(vq->kickfd);
920
921         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
922
923         if (dev->dequeue_zero_copy)
924                 free_zmbufs(vq);
925         rte_free(vq->shadow_used_ring);
926         vq->shadow_used_ring = NULL;
927
928         rte_free(vq->batch_copy_elems);
929         vq->batch_copy_elems = NULL;
930
931         return 0;
932 }
933
934 /*
935  * when virtio queues are ready to work, qemu will send us to
936  * enable the virtio queue pair.
937  */
938 static int
939 vhost_user_set_vring_enable(struct virtio_net *dev,
940                             VhostUserMsg *msg)
941 {
942         int enable = (int)msg->payload.state.num;
943
944         RTE_LOG(INFO, VHOST_CONFIG,
945                 "set queue enable: %d to qp idx: %d\n",
946                 enable, msg->payload.state.index);
947
948         if (dev->notify_ops->vring_state_changed)
949                 dev->notify_ops->vring_state_changed(dev->vid,
950                                 msg->payload.state.index, enable);
951
952         dev->virtqueue[msg->payload.state.index]->enabled = enable;
953
954         return 0;
955 }
956
957 static void
958 vhost_user_get_protocol_features(struct virtio_net *dev,
959                                  struct VhostUserMsg *msg)
960 {
961         uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
962
963         rte_vhost_driver_get_features(dev->ifname, &features);
964
965         /*
966          * REPLY_ACK protocol feature is only mandatory for now
967          * for IOMMU feature. If IOMMU is explicitly disabled by the
968          * application, disable also REPLY_ACK feature for older buggy
969          * Qemu versions (from v2.7.0 to v2.9.0).
970          */
971         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
972                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
973
974         msg->payload.u64 = protocol_features;
975         msg->size = sizeof(msg->payload.u64);
976 }
977
978 static void
979 vhost_user_set_protocol_features(struct virtio_net *dev,
980                                  uint64_t protocol_features)
981 {
982         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
983                 return;
984
985         dev->protocol_features = protocol_features;
986 }
987
988 static int
989 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
990 {
991         int fd = msg->fds[0];
992         uint64_t size, off;
993         void *addr;
994
995         if (fd < 0) {
996                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
997                 return -1;
998         }
999
1000         if (msg->size != sizeof(VhostUserLog)) {
1001                 RTE_LOG(ERR, VHOST_CONFIG,
1002                         "invalid log base msg size: %"PRId32" != %d\n",
1003                         msg->size, (int)sizeof(VhostUserLog));
1004                 return -1;
1005         }
1006
1007         size = msg->payload.log.mmap_size;
1008         off  = msg->payload.log.mmap_offset;
1009         RTE_LOG(INFO, VHOST_CONFIG,
1010                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1011                 size, off);
1012
1013         /*
1014          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1015          * fail when offset is not page size aligned.
1016          */
1017         addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1018         close(fd);
1019         if (addr == MAP_FAILED) {
1020                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1021                 return -1;
1022         }
1023
1024         /*
1025          * Free previously mapped log memory on occasionally
1026          * multiple VHOST_USER_SET_LOG_BASE.
1027          */
1028         if (dev->log_addr) {
1029                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1030         }
1031         dev->log_addr = (uint64_t)(uintptr_t)addr;
1032         dev->log_base = dev->log_addr + off;
1033         dev->log_size = size;
1034
1035         return 0;
1036 }
1037
1038 /*
1039  * An rarp packet is constructed and broadcasted to notify switches about
1040  * the new location of the migrated VM, so that packets from outside will
1041  * not be lost after migration.
1042  *
1043  * However, we don't actually "send" a rarp packet here, instead, we set
1044  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1045  */
1046 static int
1047 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
1048 {
1049         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1050
1051         RTE_LOG(DEBUG, VHOST_CONFIG,
1052                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1053                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1054         memcpy(dev->mac.addr_bytes, mac, 6);
1055
1056         /*
1057          * Set the flag to inject a RARP broadcast packet at
1058          * rte_vhost_dequeue_burst().
1059          *
1060          * rte_smp_wmb() is for making sure the mac is copied
1061          * before the flag is set.
1062          */
1063         rte_smp_wmb();
1064         rte_atomic16_set(&dev->broadcast_rarp, 1);
1065
1066         return 0;
1067 }
1068
1069 static int
1070 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
1071 {
1072         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1073                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1074                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1075                                 msg->payload.u64);
1076
1077                 return -1;
1078         }
1079
1080         dev->mtu = msg->payload.u64;
1081
1082         return 0;
1083 }
1084
1085 static int
1086 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1087 {
1088         int fd = msg->fds[0];
1089
1090         if (fd < 0) {
1091                 RTE_LOG(ERR, VHOST_CONFIG,
1092                                 "Invalid file descriptor for slave channel (%d)\n",
1093                                 fd);
1094                 return -1;
1095         }
1096
1097         dev->slave_req_fd = fd;
1098
1099         return 0;
1100 }
1101
1102 static int
1103 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1104 {
1105         struct vhost_vring_addr *ra;
1106         uint64_t start, end;
1107
1108         start = imsg->iova;
1109         end = start + imsg->size;
1110
1111         ra = &vq->ring_addrs;
1112         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1113                 return 1;
1114         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1115                 return 1;
1116         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1117                 return 1;
1118
1119         return 0;
1120 }
1121
1122 static int
1123 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1124                                 struct vhost_iotlb_msg *imsg)
1125 {
1126         uint64_t istart, iend, vstart, vend;
1127
1128         istart = imsg->iova;
1129         iend = istart + imsg->size - 1;
1130
1131         vstart = (uintptr_t)vq->desc;
1132         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1133         if (vstart <= iend && istart <= vend)
1134                 return 1;
1135
1136         vstart = (uintptr_t)vq->avail;
1137         vend = vstart + sizeof(struct vring_avail);
1138         vend += sizeof(uint16_t) * vq->size - 1;
1139         if (vstart <= iend && istart <= vend)
1140                 return 1;
1141
1142         vstart = (uintptr_t)vq->used;
1143         vend = vstart + sizeof(struct vring_used);
1144         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1145         if (vstart <= iend && istart <= vend)
1146                 return 1;
1147
1148         return 0;
1149 }
1150
1151 static int
1152 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1153 {
1154         struct virtio_net *dev = *pdev;
1155         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1156         uint16_t i;
1157         uint64_t vva;
1158
1159         switch (imsg->type) {
1160         case VHOST_IOTLB_UPDATE:
1161                 vva = qva_to_vva(dev, imsg->uaddr);
1162                 if (!vva)
1163                         return -1;
1164
1165                 for (i = 0; i < dev->nr_vring; i++) {
1166                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1167
1168                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1169                                         imsg->size, imsg->perm);
1170
1171                         if (is_vring_iotlb_update(vq, imsg))
1172                                 *pdev = dev = translate_ring_addresses(dev, i);
1173                 }
1174                 break;
1175         case VHOST_IOTLB_INVALIDATE:
1176                 for (i = 0; i < dev->nr_vring; i++) {
1177                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1178
1179                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1180                                         imsg->size);
1181
1182                         if (is_vring_iotlb_invalidate(vq, imsg))
1183                                 vring_invalidate(dev, vq);
1184                 }
1185                 break;
1186         default:
1187                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1188                                 imsg->type);
1189                 return -1;
1190         }
1191
1192         return 0;
1193 }
1194
1195 /* return bytes# of read on success or negative val on failure. */
1196 static int
1197 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1198 {
1199         int ret;
1200
1201         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1202                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1203         if (ret <= 0)
1204                 return ret;
1205
1206         if (msg && msg->size) {
1207                 if (msg->size > sizeof(msg->payload)) {
1208                         RTE_LOG(ERR, VHOST_CONFIG,
1209                                 "invalid msg size: %d\n", msg->size);
1210                         return -1;
1211                 }
1212                 ret = read(sockfd, &msg->payload, msg->size);
1213                 if (ret <= 0)
1214                         return ret;
1215                 if (ret != (int)msg->size) {
1216                         RTE_LOG(ERR, VHOST_CONFIG,
1217                                 "read control message failed\n");
1218                         return -1;
1219                 }
1220         }
1221
1222         return ret;
1223 }
1224
1225 static int
1226 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
1227 {
1228         if (!msg)
1229                 return 0;
1230
1231         return send_fd_message(sockfd, (char *)msg,
1232                 VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
1233 }
1234
1235 static int
1236 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1237 {
1238         if (!msg)
1239                 return 0;
1240
1241         msg->flags &= ~VHOST_USER_VERSION_MASK;
1242         msg->flags &= ~VHOST_USER_NEED_REPLY;
1243         msg->flags |= VHOST_USER_VERSION;
1244         msg->flags |= VHOST_USER_REPLY_MASK;
1245
1246         return send_vhost_message(sockfd, msg);
1247 }
1248
1249 /*
1250  * Allocate a queue pair if it hasn't been allocated yet
1251  */
1252 static int
1253 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
1254 {
1255         uint16_t vring_idx;
1256
1257         switch (msg->request.master) {
1258         case VHOST_USER_SET_VRING_KICK:
1259         case VHOST_USER_SET_VRING_CALL:
1260         case VHOST_USER_SET_VRING_ERR:
1261                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1262                 break;
1263         case VHOST_USER_SET_VRING_NUM:
1264         case VHOST_USER_SET_VRING_BASE:
1265         case VHOST_USER_SET_VRING_ENABLE:
1266                 vring_idx = msg->payload.state.index;
1267                 break;
1268         case VHOST_USER_SET_VRING_ADDR:
1269                 vring_idx = msg->payload.addr.index;
1270                 break;
1271         default:
1272                 return 0;
1273         }
1274
1275         if (vring_idx >= VHOST_MAX_VRING) {
1276                 RTE_LOG(ERR, VHOST_CONFIG,
1277                         "invalid vring index: %u\n", vring_idx);
1278                 return -1;
1279         }
1280
1281         if (dev->virtqueue[vring_idx])
1282                 return 0;
1283
1284         return alloc_vring_queue(dev, vring_idx);
1285 }
1286
1287 static void
1288 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1289 {
1290         unsigned int i = 0;
1291         unsigned int vq_num = 0;
1292
1293         while (vq_num < dev->nr_vring) {
1294                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1295
1296                 if (vq) {
1297                         rte_spinlock_lock(&vq->access_lock);
1298                         vq_num++;
1299                 }
1300                 i++;
1301         }
1302 }
1303
1304 static void
1305 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1306 {
1307         unsigned int i = 0;
1308         unsigned int vq_num = 0;
1309
1310         while (vq_num < dev->nr_vring) {
1311                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1312
1313                 if (vq) {
1314                         rte_spinlock_unlock(&vq->access_lock);
1315                         vq_num++;
1316                 }
1317                 i++;
1318         }
1319 }
1320
1321 int
1322 vhost_user_msg_handler(int vid, int fd)
1323 {
1324         struct virtio_net *dev;
1325         struct VhostUserMsg msg;
1326         int ret;
1327         int unlock_required = 0;
1328
1329         dev = get_device(vid);
1330         if (dev == NULL)
1331                 return -1;
1332
1333         if (!dev->notify_ops) {
1334                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1335                 if (!dev->notify_ops) {
1336                         RTE_LOG(ERR, VHOST_CONFIG,
1337                                 "failed to get callback ops for driver %s\n",
1338                                 dev->ifname);
1339                         return -1;
1340                 }
1341         }
1342
1343         ret = read_vhost_message(fd, &msg);
1344         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1345                 if (ret < 0)
1346                         RTE_LOG(ERR, VHOST_CONFIG,
1347                                 "vhost read message failed\n");
1348                 else if (ret == 0)
1349                         RTE_LOG(INFO, VHOST_CONFIG,
1350                                 "vhost peer closed\n");
1351                 else
1352                         RTE_LOG(ERR, VHOST_CONFIG,
1353                                 "vhost read incorrect message\n");
1354
1355                 return -1;
1356         }
1357
1358         ret = 0;
1359         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1360                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1361                         vhost_message_str[msg.request.master]);
1362         else
1363                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1364                         vhost_message_str[msg.request.master]);
1365
1366         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1367         if (ret < 0) {
1368                 RTE_LOG(ERR, VHOST_CONFIG,
1369                         "failed to alloc queue\n");
1370                 return -1;
1371         }
1372
1373         /*
1374          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1375          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1376          * and device is destroyed. destroy_device waits for queues to be
1377          * inactive, so it is safe. Otherwise taking the access_lock
1378          * would cause a dead lock.
1379          */
1380         switch (msg.request.master) {
1381         case VHOST_USER_SET_FEATURES:
1382         case VHOST_USER_SET_PROTOCOL_FEATURES:
1383         case VHOST_USER_SET_OWNER:
1384         case VHOST_USER_SET_MEM_TABLE:
1385         case VHOST_USER_SET_LOG_BASE:
1386         case VHOST_USER_SET_LOG_FD:
1387         case VHOST_USER_SET_VRING_NUM:
1388         case VHOST_USER_SET_VRING_ADDR:
1389         case VHOST_USER_SET_VRING_BASE:
1390         case VHOST_USER_SET_VRING_KICK:
1391         case VHOST_USER_SET_VRING_CALL:
1392         case VHOST_USER_SET_VRING_ERR:
1393         case VHOST_USER_SET_VRING_ENABLE:
1394         case VHOST_USER_SEND_RARP:
1395         case VHOST_USER_NET_SET_MTU:
1396         case VHOST_USER_SET_SLAVE_REQ_FD:
1397                 vhost_user_lock_all_queue_pairs(dev);
1398                 unlock_required = 1;
1399                 break;
1400         default:
1401                 break;
1402
1403         }
1404
1405         switch (msg.request.master) {
1406         case VHOST_USER_GET_FEATURES:
1407                 msg.payload.u64 = vhost_user_get_features(dev);
1408                 msg.size = sizeof(msg.payload.u64);
1409                 send_vhost_reply(fd, &msg);
1410                 break;
1411         case VHOST_USER_SET_FEATURES:
1412                 ret = vhost_user_set_features(dev, msg.payload.u64);
1413                 if (ret)
1414                         return -1;
1415                 break;
1416
1417         case VHOST_USER_GET_PROTOCOL_FEATURES:
1418                 vhost_user_get_protocol_features(dev, &msg);
1419                 send_vhost_reply(fd, &msg);
1420                 break;
1421         case VHOST_USER_SET_PROTOCOL_FEATURES:
1422                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1423                 break;
1424
1425         case VHOST_USER_SET_OWNER:
1426                 vhost_user_set_owner();
1427                 break;
1428         case VHOST_USER_RESET_OWNER:
1429                 vhost_user_reset_owner(dev);
1430                 break;
1431
1432         case VHOST_USER_SET_MEM_TABLE:
1433                 ret = vhost_user_set_mem_table(dev, &msg);
1434                 break;
1435
1436         case VHOST_USER_SET_LOG_BASE:
1437                 vhost_user_set_log_base(dev, &msg);
1438
1439                 /* it needs a reply */
1440                 msg.size = sizeof(msg.payload.u64);
1441                 send_vhost_reply(fd, &msg);
1442                 break;
1443         case VHOST_USER_SET_LOG_FD:
1444                 close(msg.fds[0]);
1445                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1446                 break;
1447
1448         case VHOST_USER_SET_VRING_NUM:
1449                 vhost_user_set_vring_num(dev, &msg);
1450                 break;
1451         case VHOST_USER_SET_VRING_ADDR:
1452                 vhost_user_set_vring_addr(&dev, &msg);
1453                 break;
1454         case VHOST_USER_SET_VRING_BASE:
1455                 vhost_user_set_vring_base(dev, &msg);
1456                 break;
1457
1458         case VHOST_USER_GET_VRING_BASE:
1459                 vhost_user_get_vring_base(dev, &msg);
1460                 msg.size = sizeof(msg.payload.state);
1461                 send_vhost_reply(fd, &msg);
1462                 break;
1463
1464         case VHOST_USER_SET_VRING_KICK:
1465                 vhost_user_set_vring_kick(&dev, &msg);
1466                 break;
1467         case VHOST_USER_SET_VRING_CALL:
1468                 vhost_user_set_vring_call(dev, &msg);
1469                 break;
1470
1471         case VHOST_USER_SET_VRING_ERR:
1472                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1473                         close(msg.fds[0]);
1474                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1475                 break;
1476
1477         case VHOST_USER_GET_QUEUE_NUM:
1478                 msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
1479                 msg.size = sizeof(msg.payload.u64);
1480                 send_vhost_reply(fd, &msg);
1481                 break;
1482
1483         case VHOST_USER_SET_VRING_ENABLE:
1484                 vhost_user_set_vring_enable(dev, &msg);
1485                 break;
1486         case VHOST_USER_SEND_RARP:
1487                 vhost_user_send_rarp(dev, &msg);
1488                 break;
1489
1490         case VHOST_USER_NET_SET_MTU:
1491                 ret = vhost_user_net_set_mtu(dev, &msg);
1492                 break;
1493
1494         case VHOST_USER_SET_SLAVE_REQ_FD:
1495                 ret = vhost_user_set_req_fd(dev, &msg);
1496                 break;
1497
1498         case VHOST_USER_IOTLB_MSG:
1499                 ret = vhost_user_iotlb_msg(&dev, &msg);
1500                 break;
1501
1502         default:
1503                 ret = -1;
1504                 break;
1505
1506         }
1507
1508         if (unlock_required)
1509                 vhost_user_unlock_all_queue_pairs(dev);
1510
1511         if (msg.flags & VHOST_USER_NEED_REPLY) {
1512                 msg.payload.u64 = !!ret;
1513                 msg.size = sizeof(msg.payload.u64);
1514                 send_vhost_reply(fd, &msg);
1515         }
1516
1517         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1518                 dev->flags |= VIRTIO_DEV_READY;
1519
1520                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1521                         if (dev->dequeue_zero_copy) {
1522                                 RTE_LOG(INFO, VHOST_CONFIG,
1523                                                 "dequeue zero copy is enabled\n");
1524                         }
1525
1526                         if (dev->notify_ops->new_device(dev->vid) == 0)
1527                                 dev->flags |= VIRTIO_DEV_RUNNING;
1528                 }
1529         }
1530
1531         return 0;
1532 }
1533
1534 int
1535 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1536 {
1537         int ret;
1538         struct VhostUserMsg msg = {
1539                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1540                 .flags = VHOST_USER_VERSION,
1541                 .size = sizeof(msg.payload.iotlb),
1542                 .payload.iotlb = {
1543                         .iova = iova,
1544                         .perm = perm,
1545                         .type = VHOST_IOTLB_MISS,
1546                 },
1547         };
1548
1549         ret = send_vhost_message(dev->slave_req_fd, &msg);
1550         if (ret < 0) {
1551                 RTE_LOG(ERR, VHOST_CONFIG,
1552                                 "Failed to send IOTLB miss message (%d)\n",
1553                                 ret);
1554                 return ret;
1555         }
1556
1557         return 0;
1558 }