6db50affafe7a76f2daec59ca81c95b5b6ac3f26
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70 };
71
72 static uint64_t
73 get_blk_size(int fd)
74 {
75         struct stat stat;
76         int ret;
77
78         ret = fstat(fd, &stat);
79         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
80 }
81
82 static void
83 free_mem_region(struct virtio_net *dev)
84 {
85         uint32_t i;
86         struct rte_vhost_mem_region *reg;
87
88         if (!dev || !dev->mem)
89                 return;
90
91         for (i = 0; i < dev->mem->nregions; i++) {
92                 reg = &dev->mem->regions[i];
93                 if (reg->host_user_addr) {
94                         munmap(reg->mmap_addr, reg->mmap_size);
95                         close(reg->fd);
96                 }
97         }
98 }
99
100 void
101 vhost_backend_cleanup(struct virtio_net *dev)
102 {
103         if (dev->mem) {
104                 free_mem_region(dev);
105                 rte_free(dev->mem);
106                 dev->mem = NULL;
107         }
108
109         free(dev->guest_pages);
110         dev->guest_pages = NULL;
111
112         if (dev->log_addr) {
113                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
114                 dev->log_addr = 0;
115         }
116
117         if (dev->slave_req_fd >= 0) {
118                 close(dev->slave_req_fd);
119                 dev->slave_req_fd = -1;
120         }
121 }
122
123 /*
124  * This function just returns success at the moment unless
125  * the device hasn't been initialised.
126  */
127 static int
128 vhost_user_set_owner(void)
129 {
130         return 0;
131 }
132
133 static int
134 vhost_user_reset_owner(struct virtio_net *dev)
135 {
136         if (dev->flags & VIRTIO_DEV_RUNNING) {
137                 dev->flags &= ~VIRTIO_DEV_RUNNING;
138                 dev->notify_ops->destroy_device(dev->vid);
139         }
140
141         cleanup_device(dev, 0);
142         reset_device(dev);
143         return 0;
144 }
145
146 /*
147  * The features that we support are requested.
148  */
149 static uint64_t
150 vhost_user_get_features(struct virtio_net *dev)
151 {
152         uint64_t features = 0;
153
154         rte_vhost_driver_get_features(dev->ifname, &features);
155         return features;
156 }
157
158 /*
159  * We receive the negotiated features supported by us and the virtio device.
160  */
161 static int
162 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
163 {
164         uint64_t vhost_features = 0;
165
166         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
167         if (features & ~vhost_features) {
168                 RTE_LOG(ERR, VHOST_CONFIG,
169                         "(%d) received invalid negotiated features.\n",
170                         dev->vid);
171                 return -1;
172         }
173
174         if (dev->flags & VIRTIO_DEV_RUNNING) {
175                 if (dev->features == features)
176                         return 0;
177
178                 /*
179                  * Error out if master tries to change features while device is
180                  * in running state. The exception being VHOST_F_LOG_ALL, which
181                  * is enabled when the live-migration starts.
182                  */
183                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
184                         RTE_LOG(ERR, VHOST_CONFIG,
185                                 "(%d) features changed while device is running.\n",
186                                 dev->vid);
187                         return -1;
188                 }
189
190                 if (dev->notify_ops->features_changed)
191                         dev->notify_ops->features_changed(dev->vid, features);
192         }
193
194         dev->features = features;
195         if (dev->features &
196                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
197                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
198         } else {
199                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
200         }
201         LOG_DEBUG(VHOST_CONFIG,
202                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
203                 dev->vid,
204                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
205                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
206
207         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
208             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
209                 /*
210                  * Remove all but first queue pair if MQ hasn't been
211                  * negotiated. This is safe because the device is not
212                  * running at this stage.
213                  */
214                 while (dev->nr_vring > 2) {
215                         struct vhost_virtqueue *vq;
216
217                         vq = dev->virtqueue[--dev->nr_vring];
218                         if (!vq)
219                                 continue;
220
221                         dev->virtqueue[dev->nr_vring] = NULL;
222                         cleanup_vq(vq, 1);
223                         free_vq(vq);
224                 }
225         }
226
227         return 0;
228 }
229
230 /*
231  * The virtio device sends us the size of the descriptor ring.
232  */
233 static int
234 vhost_user_set_vring_num(struct virtio_net *dev,
235                          VhostUserMsg *msg)
236 {
237         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
238
239         vq->size = msg->payload.state.num;
240
241         if (dev->dequeue_zero_copy) {
242                 vq->nr_zmbuf = 0;
243                 vq->last_zmbuf_idx = 0;
244                 vq->zmbuf_size = vq->size;
245                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
246                                          sizeof(struct zcopy_mbuf), 0);
247                 if (vq->zmbufs == NULL) {
248                         RTE_LOG(WARNING, VHOST_CONFIG,
249                                 "failed to allocate mem for zero copy; "
250                                 "zero copy is force disabled\n");
251                         dev->dequeue_zero_copy = 0;
252                 }
253                 TAILQ_INIT(&vq->zmbuf_list);
254         }
255
256         vq->shadow_used_ring = rte_malloc(NULL,
257                                 vq->size * sizeof(struct vring_used_elem),
258                                 RTE_CACHE_LINE_SIZE);
259         if (!vq->shadow_used_ring) {
260                 RTE_LOG(ERR, VHOST_CONFIG,
261                         "failed to allocate memory for shadow used ring.\n");
262                 return -1;
263         }
264
265         vq->batch_copy_elems = rte_malloc(NULL,
266                                 vq->size * sizeof(struct batch_copy_elem),
267                                 RTE_CACHE_LINE_SIZE);
268         if (!vq->batch_copy_elems) {
269                 RTE_LOG(ERR, VHOST_CONFIG,
270                         "failed to allocate memory for batching copy.\n");
271                 return -1;
272         }
273
274         return 0;
275 }
276
277 /*
278  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
279  * same numa node as the memory of vring descriptor.
280  */
281 #ifdef RTE_LIBRTE_VHOST_NUMA
282 static struct virtio_net*
283 numa_realloc(struct virtio_net *dev, int index)
284 {
285         int oldnode, newnode;
286         struct virtio_net *old_dev;
287         struct vhost_virtqueue *old_vq, *vq;
288         struct zcopy_mbuf *new_zmbuf;
289         struct vring_used_elem *new_shadow_used_ring;
290         struct batch_copy_elem *new_batch_copy_elems;
291         int ret;
292
293         old_dev = dev;
294         vq = old_vq = dev->virtqueue[index];
295
296         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
297                             MPOL_F_NODE | MPOL_F_ADDR);
298
299         /* check if we need to reallocate vq */
300         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
301                              MPOL_F_NODE | MPOL_F_ADDR);
302         if (ret) {
303                 RTE_LOG(ERR, VHOST_CONFIG,
304                         "Unable to get vq numa information.\n");
305                 return dev;
306         }
307         if (oldnode != newnode) {
308                 RTE_LOG(INFO, VHOST_CONFIG,
309                         "reallocate vq from %d to %d node\n", oldnode, newnode);
310                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
311                 if (!vq)
312                         return dev;
313
314                 memcpy(vq, old_vq, sizeof(*vq));
315                 TAILQ_INIT(&vq->zmbuf_list);
316
317                 new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
318                         sizeof(struct zcopy_mbuf), 0, newnode);
319                 if (new_zmbuf) {
320                         rte_free(vq->zmbufs);
321                         vq->zmbufs = new_zmbuf;
322                 }
323
324                 new_shadow_used_ring = rte_malloc_socket(NULL,
325                         vq->size * sizeof(struct vring_used_elem),
326                         RTE_CACHE_LINE_SIZE,
327                         newnode);
328                 if (new_shadow_used_ring) {
329                         rte_free(vq->shadow_used_ring);
330                         vq->shadow_used_ring = new_shadow_used_ring;
331                 }
332
333                 new_batch_copy_elems = rte_malloc_socket(NULL,
334                         vq->size * sizeof(struct batch_copy_elem),
335                         RTE_CACHE_LINE_SIZE,
336                         newnode);
337                 if (new_batch_copy_elems) {
338                         rte_free(vq->batch_copy_elems);
339                         vq->batch_copy_elems = new_batch_copy_elems;
340                 }
341
342                 rte_free(old_vq);
343         }
344
345         /* check if we need to reallocate dev */
346         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
347                             MPOL_F_NODE | MPOL_F_ADDR);
348         if (ret) {
349                 RTE_LOG(ERR, VHOST_CONFIG,
350                         "Unable to get dev numa information.\n");
351                 goto out;
352         }
353         if (oldnode != newnode) {
354                 RTE_LOG(INFO, VHOST_CONFIG,
355                         "reallocate dev from %d to %d node\n",
356                         oldnode, newnode);
357                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
358                 if (!dev) {
359                         dev = old_dev;
360                         goto out;
361                 }
362
363                 memcpy(dev, old_dev, sizeof(*dev));
364                 rte_free(old_dev);
365         }
366
367 out:
368         dev->virtqueue[index] = vq;
369         vhost_devices[dev->vid] = dev;
370
371         if (old_vq != vq)
372                 vhost_user_iotlb_init(dev, index);
373
374         return dev;
375 }
376 #else
377 static struct virtio_net*
378 numa_realloc(struct virtio_net *dev, int index __rte_unused)
379 {
380         return dev;
381 }
382 #endif
383
384 /* Converts QEMU virtual address to Vhost virtual address. */
385 static uint64_t
386 qva_to_vva(struct virtio_net *dev, uint64_t qva)
387 {
388         struct rte_vhost_mem_region *reg;
389         uint32_t i;
390
391         /* Find the region where the address lives. */
392         for (i = 0; i < dev->mem->nregions; i++) {
393                 reg = &dev->mem->regions[i];
394
395                 if (qva >= reg->guest_user_addr &&
396                     qva <  reg->guest_user_addr + reg->size) {
397                         return qva - reg->guest_user_addr +
398                                reg->host_user_addr;
399                 }
400         }
401
402         return 0;
403 }
404
405
406 /*
407  * Converts ring address to Vhost virtual address.
408  * If IOMMU is enabled, the ring address is a guest IO virtual address,
409  * else it is a QEMU virtual address.
410  */
411 static uint64_t
412 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
413                 uint64_t ra, uint64_t size)
414 {
415         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
416                 uint64_t vva;
417
418                 vva = vhost_user_iotlb_cache_find(vq, ra,
419                                         &size, VHOST_ACCESS_RW);
420                 if (!vva)
421                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
422
423                 return vva;
424         }
425
426         return qva_to_vva(dev, ra);
427 }
428
429 static struct virtio_net *
430 translate_ring_addresses(struct virtio_net *dev, int vq_index)
431 {
432         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
433         struct vhost_vring_addr *addr = &vq->ring_addrs;
434
435         /* The addresses are converted from QEMU virtual to Vhost virtual. */
436         if (vq->desc && vq->avail && vq->used)
437                 return dev;
438
439         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
440                         vq, addr->desc_user_addr, sizeof(struct vring_desc));
441         if (vq->desc == 0) {
442                 RTE_LOG(DEBUG, VHOST_CONFIG,
443                         "(%d) failed to find desc ring address.\n",
444                         dev->vid);
445                 return dev;
446         }
447
448         dev = numa_realloc(dev, vq_index);
449         vq = dev->virtqueue[vq_index];
450         addr = &vq->ring_addrs;
451
452         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
453                         vq, addr->avail_user_addr, sizeof(struct vring_avail));
454         if (vq->avail == 0) {
455                 RTE_LOG(DEBUG, VHOST_CONFIG,
456                         "(%d) failed to find avail ring address.\n",
457                         dev->vid);
458                 return dev;
459         }
460
461         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
462                         vq, addr->used_user_addr, sizeof(struct vring_used));
463         if (vq->used == 0) {
464                 RTE_LOG(DEBUG, VHOST_CONFIG,
465                         "(%d) failed to find used ring address.\n",
466                         dev->vid);
467                 return dev;
468         }
469
470         if (vq->last_used_idx != vq->used->idx) {
471                 RTE_LOG(WARNING, VHOST_CONFIG,
472                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
473                         "some packets maybe resent for Tx and dropped for Rx\n",
474                         vq->last_used_idx, vq->used->idx);
475                 vq->last_used_idx  = vq->used->idx;
476                 vq->last_avail_idx = vq->used->idx;
477         }
478
479         vq->log_guest_addr = addr->log_guest_addr;
480
481         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
482                         dev->vid, vq->desc);
483         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
484                         dev->vid, vq->avail);
485         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
486                         dev->vid, vq->used);
487         LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
488                         dev->vid, vq->log_guest_addr);
489
490         return dev;
491 }
492
493 /*
494  * The virtio device sends us the desc, used and avail ring addresses.
495  * This function then converts these to our address space.
496  */
497 static int
498 vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
499 {
500         struct vhost_virtqueue *vq;
501         struct vhost_vring_addr *addr = &msg->payload.addr;
502         struct virtio_net *dev = *pdev;
503
504         if (dev->mem == NULL)
505                 return -1;
506
507         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
508         vq = dev->virtqueue[msg->payload.addr.index];
509
510         /*
511          * Rings addresses should not be interpreted as long as the ring is not
512          * started and enabled
513          */
514         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
515
516         vring_invalidate(dev, vq);
517
518         if (vq->enabled && (dev->features &
519                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
520                 dev = translate_ring_addresses(dev, msg->payload.state.index);
521                 if (!dev)
522                         return -1;
523
524                 *pdev = dev;
525         }
526
527         return 0;
528 }
529
530 /*
531  * The virtio device sends us the available ring last used index.
532  */
533 static int
534 vhost_user_set_vring_base(struct virtio_net *dev,
535                           VhostUserMsg *msg)
536 {
537         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
538                         msg->payload.state.num;
539         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
540                         msg->payload.state.num;
541
542         return 0;
543 }
544
545 static void
546 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
547                    uint64_t host_phys_addr, uint64_t size)
548 {
549         struct guest_page *page, *last_page;
550
551         if (dev->nr_guest_pages == dev->max_guest_pages) {
552                 dev->max_guest_pages *= 2;
553                 dev->guest_pages = realloc(dev->guest_pages,
554                                         dev->max_guest_pages * sizeof(*page));
555         }
556
557         if (dev->nr_guest_pages > 0) {
558                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
559                 /* merge if the two pages are continuous */
560                 if (host_phys_addr == last_page->host_phys_addr +
561                                       last_page->size) {
562                         last_page->size += size;
563                         return;
564                 }
565         }
566
567         page = &dev->guest_pages[dev->nr_guest_pages++];
568         page->guest_phys_addr = guest_phys_addr;
569         page->host_phys_addr  = host_phys_addr;
570         page->size = size;
571 }
572
573 static void
574 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
575                 uint64_t page_size)
576 {
577         uint64_t reg_size = reg->size;
578         uint64_t host_user_addr  = reg->host_user_addr;
579         uint64_t guest_phys_addr = reg->guest_phys_addr;
580         uint64_t host_phys_addr;
581         uint64_t size;
582
583         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
584         size = page_size - (guest_phys_addr & (page_size - 1));
585         size = RTE_MIN(size, reg_size);
586
587         add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
588         host_user_addr  += size;
589         guest_phys_addr += size;
590         reg_size -= size;
591
592         while (reg_size > 0) {
593                 size = RTE_MIN(reg_size, page_size);
594                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
595                                                   host_user_addr);
596                 add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
597
598                 host_user_addr  += size;
599                 guest_phys_addr += size;
600                 reg_size -= size;
601         }
602 }
603
604 #ifdef RTE_LIBRTE_VHOST_DEBUG
605 /* TODO: enable it only in debug mode? */
606 static void
607 dump_guest_pages(struct virtio_net *dev)
608 {
609         uint32_t i;
610         struct guest_page *page;
611
612         for (i = 0; i < dev->nr_guest_pages; i++) {
613                 page = &dev->guest_pages[i];
614
615                 RTE_LOG(INFO, VHOST_CONFIG,
616                         "guest physical page region %u\n"
617                         "\t guest_phys_addr: %" PRIx64 "\n"
618                         "\t host_phys_addr : %" PRIx64 "\n"
619                         "\t size           : %" PRIx64 "\n",
620                         i,
621                         page->guest_phys_addr,
622                         page->host_phys_addr,
623                         page->size);
624         }
625 }
626 #else
627 #define dump_guest_pages(dev)
628 #endif
629
630 static bool
631 vhost_memory_changed(struct VhostUserMemory *new,
632                      struct rte_vhost_memory *old)
633 {
634         uint32_t i;
635
636         if (new->nregions != old->nregions)
637                 return true;
638
639         for (i = 0; i < new->nregions; ++i) {
640                 VhostUserMemoryRegion *new_r = &new->regions[i];
641                 struct rte_vhost_mem_region *old_r = &old->regions[i];
642
643                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
644                         return true;
645                 if (new_r->memory_size != old_r->size)
646                         return true;
647                 if (new_r->userspace_addr != old_r->guest_user_addr)
648                         return true;
649         }
650
651         return false;
652 }
653
654 static int
655 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
656 {
657         struct VhostUserMemory memory = pmsg->payload.memory;
658         struct rte_vhost_mem_region *reg;
659         void *mmap_addr;
660         uint64_t mmap_size;
661         uint64_t mmap_offset;
662         uint64_t alignment;
663         uint32_t i;
664         int fd;
665
666         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
667                 RTE_LOG(INFO, VHOST_CONFIG,
668                         "(%d) memory regions not changed\n", dev->vid);
669
670                 for (i = 0; i < memory.nregions; i++)
671                         close(pmsg->fds[i]);
672
673                 return 0;
674         }
675
676         if (dev->mem) {
677                 free_mem_region(dev);
678                 rte_free(dev->mem);
679                 dev->mem = NULL;
680         }
681
682         dev->nr_guest_pages = 0;
683         if (!dev->guest_pages) {
684                 dev->max_guest_pages = 8;
685                 dev->guest_pages = malloc(dev->max_guest_pages *
686                                                 sizeof(struct guest_page));
687                 if (dev->guest_pages == NULL) {
688                         RTE_LOG(ERR, VHOST_CONFIG,
689                                 "(%d) failed to allocate memory "
690                                 "for dev->guest_pages\n",
691                                 dev->vid);
692                         return -1;
693                 }
694         }
695
696         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
697                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
698         if (dev->mem == NULL) {
699                 RTE_LOG(ERR, VHOST_CONFIG,
700                         "(%d) failed to allocate memory for dev->mem\n",
701                         dev->vid);
702                 return -1;
703         }
704         dev->mem->nregions = memory.nregions;
705
706         for (i = 0; i < memory.nregions; i++) {
707                 fd  = pmsg->fds[i];
708                 reg = &dev->mem->regions[i];
709
710                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
711                 reg->guest_user_addr = memory.regions[i].userspace_addr;
712                 reg->size            = memory.regions[i].memory_size;
713                 reg->fd              = fd;
714
715                 mmap_offset = memory.regions[i].mmap_offset;
716                 mmap_size   = reg->size + mmap_offset;
717
718                 /* mmap() without flag of MAP_ANONYMOUS, should be called
719                  * with length argument aligned with hugepagesz at older
720                  * longterm version Linux, like 2.6.32 and 3.2.72, or
721                  * mmap() will fail with EINVAL.
722                  *
723                  * to avoid failure, make sure in caller to keep length
724                  * aligned.
725                  */
726                 alignment = get_blk_size(fd);
727                 if (alignment == (uint64_t)-1) {
728                         RTE_LOG(ERR, VHOST_CONFIG,
729                                 "couldn't get hugepage size through fstat\n");
730                         goto err_mmap;
731                 }
732                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
733
734                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
735                                  MAP_SHARED | MAP_POPULATE, fd, 0);
736
737                 if (mmap_addr == MAP_FAILED) {
738                         RTE_LOG(ERR, VHOST_CONFIG,
739                                 "mmap region %u failed.\n", i);
740                         goto err_mmap;
741                 }
742
743                 reg->mmap_addr = mmap_addr;
744                 reg->mmap_size = mmap_size;
745                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
746                                       mmap_offset;
747
748                 if (dev->dequeue_zero_copy)
749                         add_guest_pages(dev, reg, alignment);
750
751                 RTE_LOG(INFO, VHOST_CONFIG,
752                         "guest memory region %u, size: 0x%" PRIx64 "\n"
753                         "\t guest physical addr: 0x%" PRIx64 "\n"
754                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
755                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
756                         "\t mmap addr : 0x%" PRIx64 "\n"
757                         "\t mmap size : 0x%" PRIx64 "\n"
758                         "\t mmap align: 0x%" PRIx64 "\n"
759                         "\t mmap off  : 0x%" PRIx64 "\n",
760                         i, reg->size,
761                         reg->guest_phys_addr,
762                         reg->guest_user_addr,
763                         reg->host_user_addr,
764                         (uint64_t)(uintptr_t)mmap_addr,
765                         mmap_size,
766                         alignment,
767                         mmap_offset);
768         }
769
770         dump_guest_pages(dev);
771
772         return 0;
773
774 err_mmap:
775         free_mem_region(dev);
776         rte_free(dev->mem);
777         dev->mem = NULL;
778         return -1;
779 }
780
781 static int
782 vq_is_ready(struct vhost_virtqueue *vq)
783 {
784         return vq && vq->desc && vq->avail && vq->used &&
785                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
786                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
787 }
788
789 static int
790 virtio_is_ready(struct virtio_net *dev)
791 {
792         struct vhost_virtqueue *vq;
793         uint32_t i;
794
795         if (dev->nr_vring == 0)
796                 return 0;
797
798         for (i = 0; i < dev->nr_vring; i++) {
799                 vq = dev->virtqueue[i];
800
801                 if (!vq_is_ready(vq))
802                         return 0;
803         }
804
805         RTE_LOG(INFO, VHOST_CONFIG,
806                 "virtio is now ready for processing.\n");
807         return 1;
808 }
809
810 static void
811 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
812 {
813         struct vhost_vring_file file;
814         struct vhost_virtqueue *vq;
815
816         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
817         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
818                 file.fd = VIRTIO_INVALID_EVENTFD;
819         else
820                 file.fd = pmsg->fds[0];
821         RTE_LOG(INFO, VHOST_CONFIG,
822                 "vring call idx:%d file:%d\n", file.index, file.fd);
823
824         vq = dev->virtqueue[file.index];
825         if (vq->callfd >= 0)
826                 close(vq->callfd);
827
828         vq->callfd = file.fd;
829 }
830
831 static void
832 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
833 {
834         struct vhost_vring_file file;
835         struct vhost_virtqueue *vq;
836         struct virtio_net *dev = *pdev;
837
838         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
839         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
840                 file.fd = VIRTIO_INVALID_EVENTFD;
841         else
842                 file.fd = pmsg->fds[0];
843         RTE_LOG(INFO, VHOST_CONFIG,
844                 "vring kick idx:%d file:%d\n", file.index, file.fd);
845
846         /* Interpret ring addresses only when ring is started. */
847         dev = translate_ring_addresses(dev, file.index);
848         if (!dev)
849                 return;
850
851         *pdev = dev;
852
853         vq = dev->virtqueue[file.index];
854
855         /*
856          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
857          * the ring starts already enabled. Otherwise, it is enabled via
858          * the SET_VRING_ENABLE message.
859          */
860         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
861                 vq->enabled = 1;
862
863         if (vq->kickfd >= 0)
864                 close(vq->kickfd);
865         vq->kickfd = file.fd;
866 }
867
868 static void
869 free_zmbufs(struct vhost_virtqueue *vq)
870 {
871         struct zcopy_mbuf *zmbuf, *next;
872
873         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
874              zmbuf != NULL; zmbuf = next) {
875                 next = TAILQ_NEXT(zmbuf, next);
876
877                 rte_pktmbuf_free(zmbuf->mbuf);
878                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
879         }
880
881         rte_free(vq->zmbufs);
882 }
883
884 /*
885  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
886  */
887 static int
888 vhost_user_get_vring_base(struct virtio_net *dev,
889                           VhostUserMsg *msg)
890 {
891         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
892
893         /* We have to stop the queue (virtio) if it is running. */
894         if (dev->flags & VIRTIO_DEV_RUNNING) {
895                 dev->flags &= ~VIRTIO_DEV_RUNNING;
896                 dev->notify_ops->destroy_device(dev->vid);
897         }
898
899         dev->flags &= ~VIRTIO_DEV_READY;
900
901         /* Here we are safe to get the last used index */
902         msg->payload.state.num = vq->last_used_idx;
903
904         RTE_LOG(INFO, VHOST_CONFIG,
905                 "vring base idx:%d file:%d\n", msg->payload.state.index,
906                 msg->payload.state.num);
907         /*
908          * Based on current qemu vhost-user implementation, this message is
909          * sent and only sent in vhost_vring_stop.
910          * TODO: cleanup the vring, it isn't usable since here.
911          */
912         if (vq->kickfd >= 0)
913                 close(vq->kickfd);
914
915         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
916
917         if (dev->dequeue_zero_copy)
918                 free_zmbufs(vq);
919         rte_free(vq->shadow_used_ring);
920         vq->shadow_used_ring = NULL;
921
922         rte_free(vq->batch_copy_elems);
923         vq->batch_copy_elems = NULL;
924
925         return 0;
926 }
927
928 /*
929  * when virtio queues are ready to work, qemu will send us to
930  * enable the virtio queue pair.
931  */
932 static int
933 vhost_user_set_vring_enable(struct virtio_net *dev,
934                             VhostUserMsg *msg)
935 {
936         int enable = (int)msg->payload.state.num;
937
938         RTE_LOG(INFO, VHOST_CONFIG,
939                 "set queue enable: %d to qp idx: %d\n",
940                 enable, msg->payload.state.index);
941
942         if (dev->notify_ops->vring_state_changed)
943                 dev->notify_ops->vring_state_changed(dev->vid,
944                                 msg->payload.state.index, enable);
945
946         dev->virtqueue[msg->payload.state.index]->enabled = enable;
947
948         return 0;
949 }
950
951 static void
952 vhost_user_get_protocol_features(struct virtio_net *dev,
953                                  struct VhostUserMsg *msg)
954 {
955         uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
956
957         rte_vhost_driver_get_features(dev->ifname, &features);
958
959         /*
960          * REPLY_ACK protocol feature is only mandatory for now
961          * for IOMMU feature. If IOMMU is explicitly disabled by the
962          * application, disable also REPLY_ACK feature for older buggy
963          * Qemu versions (from v2.7.0 to v2.9.0).
964          */
965         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
966                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
967
968         msg->payload.u64 = protocol_features;
969         msg->size = sizeof(msg->payload.u64);
970 }
971
972 static void
973 vhost_user_set_protocol_features(struct virtio_net *dev,
974                                  uint64_t protocol_features)
975 {
976         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
977                 return;
978
979         dev->protocol_features = protocol_features;
980 }
981
982 static int
983 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
984 {
985         int fd = msg->fds[0];
986         uint64_t size, off;
987         void *addr;
988
989         if (fd < 0) {
990                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
991                 return -1;
992         }
993
994         if (msg->size != sizeof(VhostUserLog)) {
995                 RTE_LOG(ERR, VHOST_CONFIG,
996                         "invalid log base msg size: %"PRId32" != %d\n",
997                         msg->size, (int)sizeof(VhostUserLog));
998                 return -1;
999         }
1000
1001         size = msg->payload.log.mmap_size;
1002         off  = msg->payload.log.mmap_offset;
1003         RTE_LOG(INFO, VHOST_CONFIG,
1004                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1005                 size, off);
1006
1007         /*
1008          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1009          * fail when offset is not page size aligned.
1010          */
1011         addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1012         close(fd);
1013         if (addr == MAP_FAILED) {
1014                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1015                 return -1;
1016         }
1017
1018         /*
1019          * Free previously mapped log memory on occasionally
1020          * multiple VHOST_USER_SET_LOG_BASE.
1021          */
1022         if (dev->log_addr) {
1023                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1024         }
1025         dev->log_addr = (uint64_t)(uintptr_t)addr;
1026         dev->log_base = dev->log_addr + off;
1027         dev->log_size = size;
1028
1029         return 0;
1030 }
1031
1032 /*
1033  * An rarp packet is constructed and broadcasted to notify switches about
1034  * the new location of the migrated VM, so that packets from outside will
1035  * not be lost after migration.
1036  *
1037  * However, we don't actually "send" a rarp packet here, instead, we set
1038  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1039  */
1040 static int
1041 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
1042 {
1043         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1044
1045         RTE_LOG(DEBUG, VHOST_CONFIG,
1046                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1047                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1048         memcpy(dev->mac.addr_bytes, mac, 6);
1049
1050         /*
1051          * Set the flag to inject a RARP broadcast packet at
1052          * rte_vhost_dequeue_burst().
1053          *
1054          * rte_smp_wmb() is for making sure the mac is copied
1055          * before the flag is set.
1056          */
1057         rte_smp_wmb();
1058         rte_atomic16_set(&dev->broadcast_rarp, 1);
1059
1060         return 0;
1061 }
1062
1063 static int
1064 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
1065 {
1066         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1067                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1068                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1069                                 msg->payload.u64);
1070
1071                 return -1;
1072         }
1073
1074         dev->mtu = msg->payload.u64;
1075
1076         return 0;
1077 }
1078
1079 static int
1080 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1081 {
1082         int fd = msg->fds[0];
1083
1084         if (fd < 0) {
1085                 RTE_LOG(ERR, VHOST_CONFIG,
1086                                 "Invalid file descriptor for slave channel (%d)\n",
1087                                 fd);
1088                 return -1;
1089         }
1090
1091         dev->slave_req_fd = fd;
1092
1093         return 0;
1094 }
1095
1096 static int
1097 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1098 {
1099         struct vhost_vring_addr *ra;
1100         uint64_t start, end;
1101
1102         start = imsg->iova;
1103         end = start + imsg->size;
1104
1105         ra = &vq->ring_addrs;
1106         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1107                 return 1;
1108         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1109                 return 1;
1110         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1111                 return 1;
1112
1113         return 0;
1114 }
1115
1116 static int
1117 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1118                                 struct vhost_iotlb_msg *imsg)
1119 {
1120         uint64_t istart, iend, vstart, vend;
1121
1122         istart = imsg->iova;
1123         iend = istart + imsg->size - 1;
1124
1125         vstart = (uintptr_t)vq->desc;
1126         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1127         if (vstart <= iend && istart <= vend)
1128                 return 1;
1129
1130         vstart = (uintptr_t)vq->avail;
1131         vend = vstart + sizeof(struct vring_avail);
1132         vend += sizeof(uint16_t) * vq->size - 1;
1133         if (vstart <= iend && istart <= vend)
1134                 return 1;
1135
1136         vstart = (uintptr_t)vq->used;
1137         vend = vstart + sizeof(struct vring_used);
1138         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1139         if (vstart <= iend && istart <= vend)
1140                 return 1;
1141
1142         return 0;
1143 }
1144
1145 static int
1146 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1147 {
1148         struct virtio_net *dev = *pdev;
1149         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1150         uint16_t i;
1151         uint64_t vva;
1152
1153         switch (imsg->type) {
1154         case VHOST_IOTLB_UPDATE:
1155                 vva = qva_to_vva(dev, imsg->uaddr);
1156                 if (!vva)
1157                         return -1;
1158
1159                 for (i = 0; i < dev->nr_vring; i++) {
1160                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1161
1162                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1163                                         imsg->size, imsg->perm);
1164
1165                         if (is_vring_iotlb_update(vq, imsg))
1166                                 *pdev = dev = translate_ring_addresses(dev, i);
1167                 }
1168                 break;
1169         case VHOST_IOTLB_INVALIDATE:
1170                 for (i = 0; i < dev->nr_vring; i++) {
1171                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1172
1173                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1174                                         imsg->size);
1175
1176                         if (is_vring_iotlb_invalidate(vq, imsg))
1177                                 vring_invalidate(dev, vq);
1178                 }
1179                 break;
1180         default:
1181                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1182                                 imsg->type);
1183                 return -1;
1184         }
1185
1186         return 0;
1187 }
1188
1189 /* return bytes# of read on success or negative val on failure. */
1190 static int
1191 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1192 {
1193         int ret;
1194
1195         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1196                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1197         if (ret <= 0)
1198                 return ret;
1199
1200         if (msg && msg->size) {
1201                 if (msg->size > sizeof(msg->payload)) {
1202                         RTE_LOG(ERR, VHOST_CONFIG,
1203                                 "invalid msg size: %d\n", msg->size);
1204                         return -1;
1205                 }
1206                 ret = read(sockfd, &msg->payload, msg->size);
1207                 if (ret <= 0)
1208                         return ret;
1209                 if (ret != (int)msg->size) {
1210                         RTE_LOG(ERR, VHOST_CONFIG,
1211                                 "read control message failed\n");
1212                         return -1;
1213                 }
1214         }
1215
1216         return ret;
1217 }
1218
1219 static int
1220 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
1221 {
1222         if (!msg)
1223                 return 0;
1224
1225         return send_fd_message(sockfd, (char *)msg,
1226                 VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
1227 }
1228
1229 static int
1230 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1231 {
1232         if (!msg)
1233                 return 0;
1234
1235         msg->flags &= ~VHOST_USER_VERSION_MASK;
1236         msg->flags &= ~VHOST_USER_NEED_REPLY;
1237         msg->flags |= VHOST_USER_VERSION;
1238         msg->flags |= VHOST_USER_REPLY_MASK;
1239
1240         return send_vhost_message(sockfd, msg);
1241 }
1242
1243 /*
1244  * Allocate a queue pair if it hasn't been allocated yet
1245  */
1246 static int
1247 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
1248 {
1249         uint16_t vring_idx;
1250
1251         switch (msg->request.master) {
1252         case VHOST_USER_SET_VRING_KICK:
1253         case VHOST_USER_SET_VRING_CALL:
1254         case VHOST_USER_SET_VRING_ERR:
1255                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1256                 break;
1257         case VHOST_USER_SET_VRING_NUM:
1258         case VHOST_USER_SET_VRING_BASE:
1259         case VHOST_USER_SET_VRING_ENABLE:
1260                 vring_idx = msg->payload.state.index;
1261                 break;
1262         case VHOST_USER_SET_VRING_ADDR:
1263                 vring_idx = msg->payload.addr.index;
1264                 break;
1265         default:
1266                 return 0;
1267         }
1268
1269         if (vring_idx >= VHOST_MAX_VRING) {
1270                 RTE_LOG(ERR, VHOST_CONFIG,
1271                         "invalid vring index: %u\n", vring_idx);
1272                 return -1;
1273         }
1274
1275         if (dev->virtqueue[vring_idx])
1276                 return 0;
1277
1278         return alloc_vring_queue(dev, vring_idx);
1279 }
1280
1281 static void
1282 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1283 {
1284         unsigned int i = 0;
1285         unsigned int vq_num = 0;
1286
1287         while (vq_num < dev->nr_vring) {
1288                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1289
1290                 if (vq) {
1291                         rte_spinlock_lock(&vq->access_lock);
1292                         vq_num++;
1293                 }
1294                 i++;
1295         }
1296 }
1297
1298 static void
1299 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1300 {
1301         unsigned int i = 0;
1302         unsigned int vq_num = 0;
1303
1304         while (vq_num < dev->nr_vring) {
1305                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1306
1307                 if (vq) {
1308                         rte_spinlock_unlock(&vq->access_lock);
1309                         vq_num++;
1310                 }
1311                 i++;
1312         }
1313 }
1314
1315 int
1316 vhost_user_msg_handler(int vid, int fd)
1317 {
1318         struct virtio_net *dev;
1319         struct VhostUserMsg msg;
1320         int ret;
1321         int unlock_required = 0;
1322
1323         dev = get_device(vid);
1324         if (dev == NULL)
1325                 return -1;
1326
1327         if (!dev->notify_ops) {
1328                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1329                 if (!dev->notify_ops) {
1330                         RTE_LOG(ERR, VHOST_CONFIG,
1331                                 "failed to get callback ops for driver %s\n",
1332                                 dev->ifname);
1333                         return -1;
1334                 }
1335         }
1336
1337         ret = read_vhost_message(fd, &msg);
1338         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1339                 if (ret < 0)
1340                         RTE_LOG(ERR, VHOST_CONFIG,
1341                                 "vhost read message failed\n");
1342                 else if (ret == 0)
1343                         RTE_LOG(INFO, VHOST_CONFIG,
1344                                 "vhost peer closed\n");
1345                 else
1346                         RTE_LOG(ERR, VHOST_CONFIG,
1347                                 "vhost read incorrect message\n");
1348
1349                 return -1;
1350         }
1351
1352         ret = 0;
1353         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1354                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1355                         vhost_message_str[msg.request.master]);
1356         else
1357                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1358                         vhost_message_str[msg.request.master]);
1359
1360         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1361         if (ret < 0) {
1362                 RTE_LOG(ERR, VHOST_CONFIG,
1363                         "failed to alloc queue\n");
1364                 return -1;
1365         }
1366
1367         /*
1368          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1369          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1370          * and device is destroyed. destroy_device waits for queues to be
1371          * inactive, so it is safe. Otherwise taking the access_lock
1372          * would cause a dead lock.
1373          */
1374         switch (msg.request.master) {
1375         case VHOST_USER_SET_FEATURES:
1376         case VHOST_USER_SET_PROTOCOL_FEATURES:
1377         case VHOST_USER_SET_OWNER:
1378         case VHOST_USER_SET_MEM_TABLE:
1379         case VHOST_USER_SET_LOG_BASE:
1380         case VHOST_USER_SET_LOG_FD:
1381         case VHOST_USER_SET_VRING_NUM:
1382         case VHOST_USER_SET_VRING_ADDR:
1383         case VHOST_USER_SET_VRING_BASE:
1384         case VHOST_USER_SET_VRING_KICK:
1385         case VHOST_USER_SET_VRING_CALL:
1386         case VHOST_USER_SET_VRING_ERR:
1387         case VHOST_USER_SET_VRING_ENABLE:
1388         case VHOST_USER_SEND_RARP:
1389         case VHOST_USER_NET_SET_MTU:
1390         case VHOST_USER_SET_SLAVE_REQ_FD:
1391                 vhost_user_lock_all_queue_pairs(dev);
1392                 unlock_required = 1;
1393                 break;
1394         default:
1395                 break;
1396
1397         }
1398
1399         switch (msg.request.master) {
1400         case VHOST_USER_GET_FEATURES:
1401                 msg.payload.u64 = vhost_user_get_features(dev);
1402                 msg.size = sizeof(msg.payload.u64);
1403                 send_vhost_reply(fd, &msg);
1404                 break;
1405         case VHOST_USER_SET_FEATURES:
1406                 ret = vhost_user_set_features(dev, msg.payload.u64);
1407                 if (ret)
1408                         return -1;
1409                 break;
1410
1411         case VHOST_USER_GET_PROTOCOL_FEATURES:
1412                 vhost_user_get_protocol_features(dev, &msg);
1413                 send_vhost_reply(fd, &msg);
1414                 break;
1415         case VHOST_USER_SET_PROTOCOL_FEATURES:
1416                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1417                 break;
1418
1419         case VHOST_USER_SET_OWNER:
1420                 vhost_user_set_owner();
1421                 break;
1422         case VHOST_USER_RESET_OWNER:
1423                 vhost_user_reset_owner(dev);
1424                 break;
1425
1426         case VHOST_USER_SET_MEM_TABLE:
1427                 ret = vhost_user_set_mem_table(dev, &msg);
1428                 break;
1429
1430         case VHOST_USER_SET_LOG_BASE:
1431                 vhost_user_set_log_base(dev, &msg);
1432
1433                 /* it needs a reply */
1434                 msg.size = sizeof(msg.payload.u64);
1435                 send_vhost_reply(fd, &msg);
1436                 break;
1437         case VHOST_USER_SET_LOG_FD:
1438                 close(msg.fds[0]);
1439                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1440                 break;
1441
1442         case VHOST_USER_SET_VRING_NUM:
1443                 vhost_user_set_vring_num(dev, &msg);
1444                 break;
1445         case VHOST_USER_SET_VRING_ADDR:
1446                 vhost_user_set_vring_addr(&dev, &msg);
1447                 break;
1448         case VHOST_USER_SET_VRING_BASE:
1449                 vhost_user_set_vring_base(dev, &msg);
1450                 break;
1451
1452         case VHOST_USER_GET_VRING_BASE:
1453                 vhost_user_get_vring_base(dev, &msg);
1454                 msg.size = sizeof(msg.payload.state);
1455                 send_vhost_reply(fd, &msg);
1456                 break;
1457
1458         case VHOST_USER_SET_VRING_KICK:
1459                 vhost_user_set_vring_kick(&dev, &msg);
1460                 break;
1461         case VHOST_USER_SET_VRING_CALL:
1462                 vhost_user_set_vring_call(dev, &msg);
1463                 break;
1464
1465         case VHOST_USER_SET_VRING_ERR:
1466                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1467                         close(msg.fds[0]);
1468                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1469                 break;
1470
1471         case VHOST_USER_GET_QUEUE_NUM:
1472                 msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
1473                 msg.size = sizeof(msg.payload.u64);
1474                 send_vhost_reply(fd, &msg);
1475                 break;
1476
1477         case VHOST_USER_SET_VRING_ENABLE:
1478                 vhost_user_set_vring_enable(dev, &msg);
1479                 break;
1480         case VHOST_USER_SEND_RARP:
1481                 vhost_user_send_rarp(dev, &msg);
1482                 break;
1483
1484         case VHOST_USER_NET_SET_MTU:
1485                 ret = vhost_user_net_set_mtu(dev, &msg);
1486                 break;
1487
1488         case VHOST_USER_SET_SLAVE_REQ_FD:
1489                 ret = vhost_user_set_req_fd(dev, &msg);
1490                 break;
1491
1492         case VHOST_USER_IOTLB_MSG:
1493                 ret = vhost_user_iotlb_msg(&dev, &msg);
1494                 break;
1495
1496         default:
1497                 ret = -1;
1498                 break;
1499
1500         }
1501
1502         if (unlock_required)
1503                 vhost_user_unlock_all_queue_pairs(dev);
1504
1505         if (msg.flags & VHOST_USER_NEED_REPLY) {
1506                 msg.payload.u64 = !!ret;
1507                 msg.size = sizeof(msg.payload.u64);
1508                 send_vhost_reply(fd, &msg);
1509         }
1510
1511         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1512                 dev->flags |= VIRTIO_DEV_READY;
1513
1514                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1515                         if (dev->dequeue_zero_copy) {
1516                                 RTE_LOG(INFO, VHOST_CONFIG,
1517                                                 "dequeue zero copy is enabled\n");
1518                         }
1519
1520                         if (dev->notify_ops->new_device(dev->vid) == 0)
1521                                 dev->flags |= VIRTIO_DEV_RUNNING;
1522                 }
1523         }
1524
1525         return 0;
1526 }
1527
1528 int
1529 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1530 {
1531         int ret;
1532         struct VhostUserMsg msg = {
1533                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1534                 .flags = VHOST_USER_VERSION,
1535                 .size = sizeof(msg.payload.iotlb),
1536                 .payload.iotlb = {
1537                         .iova = iova,
1538                         .perm = perm,
1539                         .type = VHOST_IOTLB_MISS,
1540                 },
1541         };
1542
1543         ret = send_vhost_message(dev->slave_req_fd, &msg);
1544         if (ret < 0) {
1545                 RTE_LOG(ERR, VHOST_CONFIG,
1546                                 "Failed to send IOTLB miss message (%d)\n",
1547                                 ret);
1548                 return ret;
1549         }
1550
1551         return 0;
1552 }