5ffd8891d9fe50e09bf4bcccade1b9bf61af35ff
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/types.h>
12 #include <sys/stat.h>
13 #include <assert.h>
14 #ifdef RTE_LIBRTE_VHOST_NUMA
15 #include <numaif.h>
16 #endif
17
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_log.h>
21
22 #include "iotlb.h"
23 #include "vhost.h"
24 #include "vhost_user.h"
25
26 #define VIRTIO_MIN_MTU 68
27 #define VIRTIO_MAX_MTU 65535
28
29 static const char *vhost_message_str[VHOST_USER_MAX] = {
30         [VHOST_USER_NONE] = "VHOST_USER_NONE",
31         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
32         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
33         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
34         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
35         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
36         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
37         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
38         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
39         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
40         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
41         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
42         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
43         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
44         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
45         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
46         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
47         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
48         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
49         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
50         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
51         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
52         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
53 };
54
55 static uint64_t
56 get_blk_size(int fd)
57 {
58         struct stat stat;
59         int ret;
60
61         ret = fstat(fd, &stat);
62         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
63 }
64
65 static void
66 free_mem_region(struct virtio_net *dev)
67 {
68         uint32_t i;
69         struct rte_vhost_mem_region *reg;
70
71         if (!dev || !dev->mem)
72                 return;
73
74         for (i = 0; i < dev->mem->nregions; i++) {
75                 reg = &dev->mem->regions[i];
76                 if (reg->host_user_addr) {
77                         munmap(reg->mmap_addr, reg->mmap_size);
78                         close(reg->fd);
79                 }
80         }
81 }
82
83 void
84 vhost_backend_cleanup(struct virtio_net *dev)
85 {
86         if (dev->mem) {
87                 free_mem_region(dev);
88                 rte_free(dev->mem);
89                 dev->mem = NULL;
90         }
91
92         free(dev->guest_pages);
93         dev->guest_pages = NULL;
94
95         if (dev->log_addr) {
96                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
97                 dev->log_addr = 0;
98         }
99
100         if (dev->slave_req_fd >= 0) {
101                 close(dev->slave_req_fd);
102                 dev->slave_req_fd = -1;
103         }
104 }
105
106 /*
107  * This function just returns success at the moment unless
108  * the device hasn't been initialised.
109  */
110 static int
111 vhost_user_set_owner(void)
112 {
113         return 0;
114 }
115
116 static int
117 vhost_user_reset_owner(struct virtio_net *dev)
118 {
119         if (dev->flags & VIRTIO_DEV_RUNNING) {
120                 dev->flags &= ~VIRTIO_DEV_RUNNING;
121                 dev->notify_ops->destroy_device(dev->vid);
122         }
123
124         cleanup_device(dev, 0);
125         reset_device(dev);
126         return 0;
127 }
128
129 /*
130  * The features that we support are requested.
131  */
132 static uint64_t
133 vhost_user_get_features(struct virtio_net *dev)
134 {
135         uint64_t features = 0;
136
137         rte_vhost_driver_get_features(dev->ifname, &features);
138         return features;
139 }
140
141 /*
142  * We receive the negotiated features supported by us and the virtio device.
143  */
144 static int
145 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
146 {
147         uint64_t vhost_features = 0;
148
149         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
150         if (features & ~vhost_features) {
151                 RTE_LOG(ERR, VHOST_CONFIG,
152                         "(%d) received invalid negotiated features.\n",
153                         dev->vid);
154                 return -1;
155         }
156
157         if (dev->flags & VIRTIO_DEV_RUNNING) {
158                 if (dev->features == features)
159                         return 0;
160
161                 /*
162                  * Error out if master tries to change features while device is
163                  * in running state. The exception being VHOST_F_LOG_ALL, which
164                  * is enabled when the live-migration starts.
165                  */
166                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
167                         RTE_LOG(ERR, VHOST_CONFIG,
168                                 "(%d) features changed while device is running.\n",
169                                 dev->vid);
170                         return -1;
171                 }
172
173                 if (dev->notify_ops->features_changed)
174                         dev->notify_ops->features_changed(dev->vid, features);
175         }
176
177         dev->features = features;
178         if (dev->features &
179                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
180                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
181         } else {
182                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
183         }
184         LOG_DEBUG(VHOST_CONFIG,
185                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
186                 dev->vid,
187                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
188                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
189
190         return 0;
191 }
192
193 /*
194  * The virtio device sends us the size of the descriptor ring.
195  */
196 static int
197 vhost_user_set_vring_num(struct virtio_net *dev,
198                          VhostUserMsg *msg)
199 {
200         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
201
202         vq->size = msg->payload.state.num;
203
204         if (dev->dequeue_zero_copy) {
205                 vq->nr_zmbuf = 0;
206                 vq->last_zmbuf_idx = 0;
207                 vq->zmbuf_size = vq->size;
208                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
209                                          sizeof(struct zcopy_mbuf), 0);
210                 if (vq->zmbufs == NULL) {
211                         RTE_LOG(WARNING, VHOST_CONFIG,
212                                 "failed to allocate mem for zero copy; "
213                                 "zero copy is force disabled\n");
214                         dev->dequeue_zero_copy = 0;
215                 }
216         }
217
218         vq->shadow_used_ring = rte_malloc(NULL,
219                                 vq->size * sizeof(struct vring_used_elem),
220                                 RTE_CACHE_LINE_SIZE);
221         if (!vq->shadow_used_ring) {
222                 RTE_LOG(ERR, VHOST_CONFIG,
223                         "failed to allocate memory for shadow used ring.\n");
224                 return -1;
225         }
226
227         vq->batch_copy_elems = rte_malloc(NULL,
228                                 vq->size * sizeof(struct batch_copy_elem),
229                                 RTE_CACHE_LINE_SIZE);
230         if (!vq->batch_copy_elems) {
231                 RTE_LOG(ERR, VHOST_CONFIG,
232                         "failed to allocate memory for batching copy.\n");
233                 return -1;
234         }
235
236         return 0;
237 }
238
239 /*
240  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
241  * same numa node as the memory of vring descriptor.
242  */
243 #ifdef RTE_LIBRTE_VHOST_NUMA
244 static struct virtio_net*
245 numa_realloc(struct virtio_net *dev, int index)
246 {
247         int oldnode, newnode;
248         struct virtio_net *old_dev;
249         struct vhost_virtqueue *old_vq, *vq;
250         int ret;
251
252         old_dev = dev;
253         vq = old_vq = dev->virtqueue[index];
254
255         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
256                             MPOL_F_NODE | MPOL_F_ADDR);
257
258         /* check if we need to reallocate vq */
259         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
260                              MPOL_F_NODE | MPOL_F_ADDR);
261         if (ret) {
262                 RTE_LOG(ERR, VHOST_CONFIG,
263                         "Unable to get vq numa information.\n");
264                 return dev;
265         }
266         if (oldnode != newnode) {
267                 RTE_LOG(INFO, VHOST_CONFIG,
268                         "reallocate vq from %d to %d node\n", oldnode, newnode);
269                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
270                 if (!vq)
271                         return dev;
272
273                 memcpy(vq, old_vq, sizeof(*vq));
274                 rte_free(old_vq);
275         }
276
277         /* check if we need to reallocate dev */
278         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
279                             MPOL_F_NODE | MPOL_F_ADDR);
280         if (ret) {
281                 RTE_LOG(ERR, VHOST_CONFIG,
282                         "Unable to get dev numa information.\n");
283                 goto out;
284         }
285         if (oldnode != newnode) {
286                 RTE_LOG(INFO, VHOST_CONFIG,
287                         "reallocate dev from %d to %d node\n",
288                         oldnode, newnode);
289                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
290                 if (!dev) {
291                         dev = old_dev;
292                         goto out;
293                 }
294
295                 memcpy(dev, old_dev, sizeof(*dev));
296                 rte_free(old_dev);
297         }
298
299 out:
300         dev->virtqueue[index] = vq;
301         vhost_devices[dev->vid] = dev;
302
303         if (old_vq != vq)
304                 vhost_user_iotlb_init(dev, index);
305
306         return dev;
307 }
308 #else
309 static struct virtio_net*
310 numa_realloc(struct virtio_net *dev, int index __rte_unused)
311 {
312         return dev;
313 }
314 #endif
315
316 /* Converts QEMU virtual address to Vhost virtual address. */
317 static uint64_t
318 qva_to_vva(struct virtio_net *dev, uint64_t qva)
319 {
320         struct rte_vhost_mem_region *reg;
321         uint32_t i;
322
323         /* Find the region where the address lives. */
324         for (i = 0; i < dev->mem->nregions; i++) {
325                 reg = &dev->mem->regions[i];
326
327                 if (qva >= reg->guest_user_addr &&
328                     qva <  reg->guest_user_addr + reg->size) {
329                         return qva - reg->guest_user_addr +
330                                reg->host_user_addr;
331                 }
332         }
333
334         return 0;
335 }
336
337
338 /*
339  * Converts ring address to Vhost virtual address.
340  * If IOMMU is enabled, the ring address is a guest IO virtual address,
341  * else it is a QEMU virtual address.
342  */
343 static uint64_t
344 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
345                 uint64_t ra, uint64_t size)
346 {
347         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
348                 uint64_t vva;
349
350                 vva = vhost_user_iotlb_cache_find(vq, ra,
351                                         &size, VHOST_ACCESS_RW);
352                 if (!vva)
353                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
354
355                 return vva;
356         }
357
358         return qva_to_vva(dev, ra);
359 }
360
361 static struct virtio_net *
362 translate_ring_addresses(struct virtio_net *dev, int vq_index)
363 {
364         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
365         struct vhost_vring_addr *addr = &vq->ring_addrs;
366
367         /* The addresses are converted from QEMU virtual to Vhost virtual. */
368         if (vq->desc && vq->avail && vq->used)
369                 return dev;
370
371         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
372                         vq, addr->desc_user_addr, sizeof(struct vring_desc));
373         if (vq->desc == 0) {
374                 RTE_LOG(DEBUG, VHOST_CONFIG,
375                         "(%d) failed to find desc ring address.\n",
376                         dev->vid);
377                 return dev;
378         }
379
380         dev = numa_realloc(dev, vq_index);
381         vq = dev->virtqueue[vq_index];
382         addr = &vq->ring_addrs;
383
384         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
385                         vq, addr->avail_user_addr, sizeof(struct vring_avail));
386         if (vq->avail == 0) {
387                 RTE_LOG(DEBUG, VHOST_CONFIG,
388                         "(%d) failed to find avail ring address.\n",
389                         dev->vid);
390                 return dev;
391         }
392
393         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
394                         vq, addr->used_user_addr, sizeof(struct vring_used));
395         if (vq->used == 0) {
396                 RTE_LOG(DEBUG, VHOST_CONFIG,
397                         "(%d) failed to find used ring address.\n",
398                         dev->vid);
399                 return dev;
400         }
401
402         if (vq->last_used_idx != vq->used->idx) {
403                 RTE_LOG(WARNING, VHOST_CONFIG,
404                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
405                         "some packets maybe resent for Tx and dropped for Rx\n",
406                         vq->last_used_idx, vq->used->idx);
407                 vq->last_used_idx  = vq->used->idx;
408                 vq->last_avail_idx = vq->used->idx;
409         }
410
411         vq->log_guest_addr = addr->log_guest_addr;
412
413         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
414                         dev->vid, vq->desc);
415         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
416                         dev->vid, vq->avail);
417         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
418                         dev->vid, vq->used);
419         LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
420                         dev->vid, vq->log_guest_addr);
421
422         return dev;
423 }
424
425 /*
426  * The virtio device sends us the desc, used and avail ring addresses.
427  * This function then converts these to our address space.
428  */
429 static int
430 vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
431 {
432         struct vhost_virtqueue *vq;
433         struct vhost_vring_addr *addr = &msg->payload.addr;
434         struct virtio_net *dev = *pdev;
435
436         if (dev->mem == NULL)
437                 return -1;
438
439         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
440         vq = dev->virtqueue[msg->payload.addr.index];
441
442         /*
443          * Rings addresses should not be interpreted as long as the ring is not
444          * started and enabled
445          */
446         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
447
448         vring_invalidate(dev, vq);
449
450         if (vq->enabled && (dev->features &
451                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
452                 dev = translate_ring_addresses(dev, msg->payload.state.index);
453                 if (!dev)
454                         return -1;
455
456                 *pdev = dev;
457         }
458
459         return 0;
460 }
461
462 /*
463  * The virtio device sends us the available ring last used index.
464  */
465 static int
466 vhost_user_set_vring_base(struct virtio_net *dev,
467                           VhostUserMsg *msg)
468 {
469         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
470                         msg->payload.state.num;
471         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
472                         msg->payload.state.num;
473
474         return 0;
475 }
476
477 static void
478 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
479                    uint64_t host_phys_addr, uint64_t size)
480 {
481         struct guest_page *page, *last_page;
482
483         if (dev->nr_guest_pages == dev->max_guest_pages) {
484                 dev->max_guest_pages *= 2;
485                 dev->guest_pages = realloc(dev->guest_pages,
486                                         dev->max_guest_pages * sizeof(*page));
487         }
488
489         if (dev->nr_guest_pages > 0) {
490                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
491                 /* merge if the two pages are continuous */
492                 if (host_phys_addr == last_page->host_phys_addr +
493                                       last_page->size) {
494                         last_page->size += size;
495                         return;
496                 }
497         }
498
499         page = &dev->guest_pages[dev->nr_guest_pages++];
500         page->guest_phys_addr = guest_phys_addr;
501         page->host_phys_addr  = host_phys_addr;
502         page->size = size;
503 }
504
505 static void
506 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
507                 uint64_t page_size)
508 {
509         uint64_t reg_size = reg->size;
510         uint64_t host_user_addr  = reg->host_user_addr;
511         uint64_t guest_phys_addr = reg->guest_phys_addr;
512         uint64_t host_phys_addr;
513         uint64_t size;
514
515         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
516         size = page_size - (guest_phys_addr & (page_size - 1));
517         size = RTE_MIN(size, reg_size);
518
519         add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
520         host_user_addr  += size;
521         guest_phys_addr += size;
522         reg_size -= size;
523
524         while (reg_size > 0) {
525                 size = RTE_MIN(reg_size, page_size);
526                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
527                                                   host_user_addr);
528                 add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
529
530                 host_user_addr  += size;
531                 guest_phys_addr += size;
532                 reg_size -= size;
533         }
534 }
535
536 #ifdef RTE_LIBRTE_VHOST_DEBUG
537 /* TODO: enable it only in debug mode? */
538 static void
539 dump_guest_pages(struct virtio_net *dev)
540 {
541         uint32_t i;
542         struct guest_page *page;
543
544         for (i = 0; i < dev->nr_guest_pages; i++) {
545                 page = &dev->guest_pages[i];
546
547                 RTE_LOG(INFO, VHOST_CONFIG,
548                         "guest physical page region %u\n"
549                         "\t guest_phys_addr: %" PRIx64 "\n"
550                         "\t host_phys_addr : %" PRIx64 "\n"
551                         "\t size           : %" PRIx64 "\n",
552                         i,
553                         page->guest_phys_addr,
554                         page->host_phys_addr,
555                         page->size);
556         }
557 }
558 #else
559 #define dump_guest_pages(dev)
560 #endif
561
562 static bool
563 vhost_memory_changed(struct VhostUserMemory *new,
564                      struct rte_vhost_memory *old)
565 {
566         uint32_t i;
567
568         if (new->nregions != old->nregions)
569                 return true;
570
571         for (i = 0; i < new->nregions; ++i) {
572                 VhostUserMemoryRegion *new_r = &new->regions[i];
573                 struct rte_vhost_mem_region *old_r = &old->regions[i];
574
575                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
576                         return true;
577                 if (new_r->memory_size != old_r->size)
578                         return true;
579                 if (new_r->userspace_addr != old_r->guest_user_addr)
580                         return true;
581         }
582
583         return false;
584 }
585
586 static int
587 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
588 {
589         struct VhostUserMemory memory = pmsg->payload.memory;
590         struct rte_vhost_mem_region *reg;
591         void *mmap_addr;
592         uint64_t mmap_size;
593         uint64_t mmap_offset;
594         uint64_t alignment;
595         uint32_t i;
596         int fd;
597
598         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
599                 RTE_LOG(INFO, VHOST_CONFIG,
600                         "(%d) memory regions not changed\n", dev->vid);
601
602                 for (i = 0; i < memory.nregions; i++)
603                         close(pmsg->fds[i]);
604
605                 return 0;
606         }
607
608         if (dev->mem) {
609                 free_mem_region(dev);
610                 rte_free(dev->mem);
611                 dev->mem = NULL;
612         }
613
614         dev->nr_guest_pages = 0;
615         if (!dev->guest_pages) {
616                 dev->max_guest_pages = 8;
617                 dev->guest_pages = malloc(dev->max_guest_pages *
618                                                 sizeof(struct guest_page));
619                 if (dev->guest_pages == NULL) {
620                         RTE_LOG(ERR, VHOST_CONFIG,
621                                 "(%d) failed to allocate memory "
622                                 "for dev->guest_pages\n",
623                                 dev->vid);
624                         return -1;
625                 }
626         }
627
628         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
629                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
630         if (dev->mem == NULL) {
631                 RTE_LOG(ERR, VHOST_CONFIG,
632                         "(%d) failed to allocate memory for dev->mem\n",
633                         dev->vid);
634                 return -1;
635         }
636         dev->mem->nregions = memory.nregions;
637
638         for (i = 0; i < memory.nregions; i++) {
639                 fd  = pmsg->fds[i];
640                 reg = &dev->mem->regions[i];
641
642                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
643                 reg->guest_user_addr = memory.regions[i].userspace_addr;
644                 reg->size            = memory.regions[i].memory_size;
645                 reg->fd              = fd;
646
647                 mmap_offset = memory.regions[i].mmap_offset;
648                 mmap_size   = reg->size + mmap_offset;
649
650                 /* mmap() without flag of MAP_ANONYMOUS, should be called
651                  * with length argument aligned with hugepagesz at older
652                  * longterm version Linux, like 2.6.32 and 3.2.72, or
653                  * mmap() will fail with EINVAL.
654                  *
655                  * to avoid failure, make sure in caller to keep length
656                  * aligned.
657                  */
658                 alignment = get_blk_size(fd);
659                 if (alignment == (uint64_t)-1) {
660                         RTE_LOG(ERR, VHOST_CONFIG,
661                                 "couldn't get hugepage size through fstat\n");
662                         goto err_mmap;
663                 }
664                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
665
666                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
667                                  MAP_SHARED | MAP_POPULATE, fd, 0);
668
669                 if (mmap_addr == MAP_FAILED) {
670                         RTE_LOG(ERR, VHOST_CONFIG,
671                                 "mmap region %u failed.\n", i);
672                         goto err_mmap;
673                 }
674
675                 reg->mmap_addr = mmap_addr;
676                 reg->mmap_size = mmap_size;
677                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
678                                       mmap_offset;
679
680                 if (dev->dequeue_zero_copy)
681                         add_guest_pages(dev, reg, alignment);
682
683                 RTE_LOG(INFO, VHOST_CONFIG,
684                         "guest memory region %u, size: 0x%" PRIx64 "\n"
685                         "\t guest physical addr: 0x%" PRIx64 "\n"
686                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
687                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
688                         "\t mmap addr : 0x%" PRIx64 "\n"
689                         "\t mmap size : 0x%" PRIx64 "\n"
690                         "\t mmap align: 0x%" PRIx64 "\n"
691                         "\t mmap off  : 0x%" PRIx64 "\n",
692                         i, reg->size,
693                         reg->guest_phys_addr,
694                         reg->guest_user_addr,
695                         reg->host_user_addr,
696                         (uint64_t)(uintptr_t)mmap_addr,
697                         mmap_size,
698                         alignment,
699                         mmap_offset);
700         }
701
702         dump_guest_pages(dev);
703
704         return 0;
705
706 err_mmap:
707         free_mem_region(dev);
708         rte_free(dev->mem);
709         dev->mem = NULL;
710         return -1;
711 }
712
713 static int
714 vq_is_ready(struct vhost_virtqueue *vq)
715 {
716         return vq && vq->desc && vq->avail && vq->used &&
717                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
718                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
719 }
720
721 static int
722 virtio_is_ready(struct virtio_net *dev)
723 {
724         struct vhost_virtqueue *vq;
725         uint32_t i;
726
727         if (dev->nr_vring == 0)
728                 return 0;
729
730         for (i = 0; i < dev->nr_vring; i++) {
731                 vq = dev->virtqueue[i];
732
733                 if (!vq_is_ready(vq))
734                         return 0;
735         }
736
737         RTE_LOG(INFO, VHOST_CONFIG,
738                 "virtio is now ready for processing.\n");
739         return 1;
740 }
741
742 static void
743 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
744 {
745         struct vhost_vring_file file;
746         struct vhost_virtqueue *vq;
747
748         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
749         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
750                 file.fd = VIRTIO_INVALID_EVENTFD;
751         else
752                 file.fd = pmsg->fds[0];
753         RTE_LOG(INFO, VHOST_CONFIG,
754                 "vring call idx:%d file:%d\n", file.index, file.fd);
755
756         vq = dev->virtqueue[file.index];
757         if (vq->callfd >= 0)
758                 close(vq->callfd);
759
760         vq->callfd = file.fd;
761 }
762
763 static void
764 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
765 {
766         struct vhost_vring_file file;
767         struct vhost_virtqueue *vq;
768         struct virtio_net *dev = *pdev;
769
770         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
771         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
772                 file.fd = VIRTIO_INVALID_EVENTFD;
773         else
774                 file.fd = pmsg->fds[0];
775         RTE_LOG(INFO, VHOST_CONFIG,
776                 "vring kick idx:%d file:%d\n", file.index, file.fd);
777
778         /* Interpret ring addresses only when ring is started. */
779         dev = translate_ring_addresses(dev, file.index);
780         if (!dev)
781                 return;
782
783         *pdev = dev;
784
785         vq = dev->virtqueue[file.index];
786
787         /*
788          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
789          * the ring starts already enabled. Otherwise, it is enabled via
790          * the SET_VRING_ENABLE message.
791          */
792         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
793                 vq->enabled = 1;
794
795         if (vq->kickfd >= 0)
796                 close(vq->kickfd);
797         vq->kickfd = file.fd;
798 }
799
800 static void
801 free_zmbufs(struct vhost_virtqueue *vq)
802 {
803         struct zcopy_mbuf *zmbuf, *next;
804
805         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
806              zmbuf != NULL; zmbuf = next) {
807                 next = TAILQ_NEXT(zmbuf, next);
808
809                 rte_pktmbuf_free(zmbuf->mbuf);
810                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
811         }
812
813         rte_free(vq->zmbufs);
814 }
815
816 /*
817  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
818  */
819 static int
820 vhost_user_get_vring_base(struct virtio_net *dev,
821                           VhostUserMsg *msg)
822 {
823         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
824
825         /* We have to stop the queue (virtio) if it is running. */
826         if (dev->flags & VIRTIO_DEV_RUNNING) {
827                 dev->flags &= ~VIRTIO_DEV_RUNNING;
828                 dev->notify_ops->destroy_device(dev->vid);
829         }
830
831         dev->flags &= ~VIRTIO_DEV_READY;
832
833         /* Here we are safe to get the last used index */
834         msg->payload.state.num = vq->last_used_idx;
835
836         RTE_LOG(INFO, VHOST_CONFIG,
837                 "vring base idx:%d file:%d\n", msg->payload.state.index,
838                 msg->payload.state.num);
839         /*
840          * Based on current qemu vhost-user implementation, this message is
841          * sent and only sent in vhost_vring_stop.
842          * TODO: cleanup the vring, it isn't usable since here.
843          */
844         if (vq->kickfd >= 0)
845                 close(vq->kickfd);
846
847         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
848
849         if (dev->dequeue_zero_copy)
850                 free_zmbufs(vq);
851         rte_free(vq->shadow_used_ring);
852         vq->shadow_used_ring = NULL;
853
854         rte_free(vq->batch_copy_elems);
855         vq->batch_copy_elems = NULL;
856
857         return 0;
858 }
859
860 /*
861  * when virtio queues are ready to work, qemu will send us to
862  * enable the virtio queue pair.
863  */
864 static int
865 vhost_user_set_vring_enable(struct virtio_net *dev,
866                             VhostUserMsg *msg)
867 {
868         int enable = (int)msg->payload.state.num;
869
870         RTE_LOG(INFO, VHOST_CONFIG,
871                 "set queue enable: %d to qp idx: %d\n",
872                 enable, msg->payload.state.index);
873
874         if (dev->notify_ops->vring_state_changed)
875                 dev->notify_ops->vring_state_changed(dev->vid,
876                                 msg->payload.state.index, enable);
877
878         dev->virtqueue[msg->payload.state.index]->enabled = enable;
879
880         return 0;
881 }
882
883 static void
884 vhost_user_get_protocol_features(struct virtio_net *dev,
885                                  struct VhostUserMsg *msg)
886 {
887         uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
888
889         rte_vhost_driver_get_features(dev->ifname, &features);
890
891         /*
892          * REPLY_ACK protocol feature is only mandatory for now
893          * for IOMMU feature. If IOMMU is explicitly disabled by the
894          * application, disable also REPLY_ACK feature for older buggy
895          * Qemu versions (from v2.7.0 to v2.9.0).
896          */
897         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
898                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
899
900         msg->payload.u64 = protocol_features;
901         msg->size = sizeof(msg->payload.u64);
902 }
903
904 static void
905 vhost_user_set_protocol_features(struct virtio_net *dev,
906                                  uint64_t protocol_features)
907 {
908         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
909                 return;
910
911         dev->protocol_features = protocol_features;
912 }
913
914 static int
915 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
916 {
917         int fd = msg->fds[0];
918         uint64_t size, off;
919         void *addr;
920
921         if (fd < 0) {
922                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
923                 return -1;
924         }
925
926         if (msg->size != sizeof(VhostUserLog)) {
927                 RTE_LOG(ERR, VHOST_CONFIG,
928                         "invalid log base msg size: %"PRId32" != %d\n",
929                         msg->size, (int)sizeof(VhostUserLog));
930                 return -1;
931         }
932
933         size = msg->payload.log.mmap_size;
934         off  = msg->payload.log.mmap_offset;
935         RTE_LOG(INFO, VHOST_CONFIG,
936                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
937                 size, off);
938
939         /*
940          * mmap from 0 to workaround a hugepage mmap bug: mmap will
941          * fail when offset is not page size aligned.
942          */
943         addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
944         close(fd);
945         if (addr == MAP_FAILED) {
946                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
947                 return -1;
948         }
949
950         /*
951          * Free previously mapped log memory on occasionally
952          * multiple VHOST_USER_SET_LOG_BASE.
953          */
954         if (dev->log_addr) {
955                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
956         }
957         dev->log_addr = (uint64_t)(uintptr_t)addr;
958         dev->log_base = dev->log_addr + off;
959         dev->log_size = size;
960
961         return 0;
962 }
963
964 /*
965  * An rarp packet is constructed and broadcasted to notify switches about
966  * the new location of the migrated VM, so that packets from outside will
967  * not be lost after migration.
968  *
969  * However, we don't actually "send" a rarp packet here, instead, we set
970  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
971  */
972 static int
973 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
974 {
975         uint8_t *mac = (uint8_t *)&msg->payload.u64;
976
977         RTE_LOG(DEBUG, VHOST_CONFIG,
978                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
979                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
980         memcpy(dev->mac.addr_bytes, mac, 6);
981
982         /*
983          * Set the flag to inject a RARP broadcast packet at
984          * rte_vhost_dequeue_burst().
985          *
986          * rte_smp_wmb() is for making sure the mac is copied
987          * before the flag is set.
988          */
989         rte_smp_wmb();
990         rte_atomic16_set(&dev->broadcast_rarp, 1);
991
992         return 0;
993 }
994
995 static int
996 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
997 {
998         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
999                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1000                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1001                                 msg->payload.u64);
1002
1003                 return -1;
1004         }
1005
1006         dev->mtu = msg->payload.u64;
1007
1008         return 0;
1009 }
1010
1011 static int
1012 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1013 {
1014         int fd = msg->fds[0];
1015
1016         if (fd < 0) {
1017                 RTE_LOG(ERR, VHOST_CONFIG,
1018                                 "Invalid file descriptor for slave channel (%d)\n",
1019                                 fd);
1020                 return -1;
1021         }
1022
1023         dev->slave_req_fd = fd;
1024
1025         return 0;
1026 }
1027
1028 static int
1029 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1030 {
1031         struct vhost_vring_addr *ra;
1032         uint64_t start, end;
1033
1034         start = imsg->iova;
1035         end = start + imsg->size;
1036
1037         ra = &vq->ring_addrs;
1038         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1039                 return 1;
1040         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1041                 return 1;
1042         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1043                 return 1;
1044
1045         return 0;
1046 }
1047
1048 static int
1049 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1050                                 struct vhost_iotlb_msg *imsg)
1051 {
1052         uint64_t istart, iend, vstart, vend;
1053
1054         istart = imsg->iova;
1055         iend = istart + imsg->size - 1;
1056
1057         vstart = (uintptr_t)vq->desc;
1058         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1059         if (vstart <= iend && istart <= vend)
1060                 return 1;
1061
1062         vstart = (uintptr_t)vq->avail;
1063         vend = vstart + sizeof(struct vring_avail);
1064         vend += sizeof(uint16_t) * vq->size - 1;
1065         if (vstart <= iend && istart <= vend)
1066                 return 1;
1067
1068         vstart = (uintptr_t)vq->used;
1069         vend = vstart + sizeof(struct vring_used);
1070         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1071         if (vstart <= iend && istart <= vend)
1072                 return 1;
1073
1074         return 0;
1075 }
1076
1077 static int
1078 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1079 {
1080         struct virtio_net *dev = *pdev;
1081         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1082         uint16_t i;
1083         uint64_t vva;
1084
1085         switch (imsg->type) {
1086         case VHOST_IOTLB_UPDATE:
1087                 vva = qva_to_vva(dev, imsg->uaddr);
1088                 if (!vva)
1089                         return -1;
1090
1091                 for (i = 0; i < dev->nr_vring; i++) {
1092                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1093
1094                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1095                                         imsg->size, imsg->perm);
1096
1097                         if (is_vring_iotlb_update(vq, imsg))
1098                                 *pdev = dev = translate_ring_addresses(dev, i);
1099                 }
1100                 break;
1101         case VHOST_IOTLB_INVALIDATE:
1102                 for (i = 0; i < dev->nr_vring; i++) {
1103                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1104
1105                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1106                                         imsg->size);
1107
1108                         if (is_vring_iotlb_invalidate(vq, imsg))
1109                                 vring_invalidate(dev, vq);
1110                 }
1111                 break;
1112         default:
1113                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1114                                 imsg->type);
1115                 return -1;
1116         }
1117
1118         return 0;
1119 }
1120
1121 /* return bytes# of read on success or negative val on failure. */
1122 static int
1123 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1124 {
1125         int ret;
1126
1127         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1128                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1129         if (ret <= 0)
1130                 return ret;
1131
1132         if (msg && msg->size) {
1133                 if (msg->size > sizeof(msg->payload)) {
1134                         RTE_LOG(ERR, VHOST_CONFIG,
1135                                 "invalid msg size: %d\n", msg->size);
1136                         return -1;
1137                 }
1138                 ret = read(sockfd, &msg->payload, msg->size);
1139                 if (ret <= 0)
1140                         return ret;
1141                 if (ret != (int)msg->size) {
1142                         RTE_LOG(ERR, VHOST_CONFIG,
1143                                 "read control message failed\n");
1144                         return -1;
1145                 }
1146         }
1147
1148         return ret;
1149 }
1150
1151 static int
1152 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
1153 {
1154         if (!msg)
1155                 return 0;
1156
1157         return send_fd_message(sockfd, (char *)msg,
1158                 VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
1159 }
1160
1161 static int
1162 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1163 {
1164         if (!msg)
1165                 return 0;
1166
1167         msg->flags &= ~VHOST_USER_VERSION_MASK;
1168         msg->flags &= ~VHOST_USER_NEED_REPLY;
1169         msg->flags |= VHOST_USER_VERSION;
1170         msg->flags |= VHOST_USER_REPLY_MASK;
1171
1172         return send_vhost_message(sockfd, msg);
1173 }
1174
1175 /*
1176  * Allocate a queue pair if it hasn't been allocated yet
1177  */
1178 static int
1179 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
1180 {
1181         uint16_t vring_idx;
1182
1183         switch (msg->request.master) {
1184         case VHOST_USER_SET_VRING_KICK:
1185         case VHOST_USER_SET_VRING_CALL:
1186         case VHOST_USER_SET_VRING_ERR:
1187                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1188                 break;
1189         case VHOST_USER_SET_VRING_NUM:
1190         case VHOST_USER_SET_VRING_BASE:
1191         case VHOST_USER_SET_VRING_ENABLE:
1192                 vring_idx = msg->payload.state.index;
1193                 break;
1194         case VHOST_USER_SET_VRING_ADDR:
1195                 vring_idx = msg->payload.addr.index;
1196                 break;
1197         default:
1198                 return 0;
1199         }
1200
1201         if (vring_idx >= VHOST_MAX_VRING) {
1202                 RTE_LOG(ERR, VHOST_CONFIG,
1203                         "invalid vring index: %u\n", vring_idx);
1204                 return -1;
1205         }
1206
1207         if (dev->virtqueue[vring_idx])
1208                 return 0;
1209
1210         return alloc_vring_queue(dev, vring_idx);
1211 }
1212
1213 int
1214 vhost_user_msg_handler(int vid, int fd)
1215 {
1216         struct virtio_net *dev;
1217         struct VhostUserMsg msg;
1218         int ret;
1219
1220         dev = get_device(vid);
1221         if (dev == NULL)
1222                 return -1;
1223
1224         if (!dev->notify_ops) {
1225                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1226                 if (!dev->notify_ops) {
1227                         RTE_LOG(ERR, VHOST_CONFIG,
1228                                 "failed to get callback ops for driver %s\n",
1229                                 dev->ifname);
1230                         return -1;
1231                 }
1232         }
1233
1234         ret = read_vhost_message(fd, &msg);
1235         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1236                 if (ret < 0)
1237                         RTE_LOG(ERR, VHOST_CONFIG,
1238                                 "vhost read message failed\n");
1239                 else if (ret == 0)
1240                         RTE_LOG(INFO, VHOST_CONFIG,
1241                                 "vhost peer closed\n");
1242                 else
1243                         RTE_LOG(ERR, VHOST_CONFIG,
1244                                 "vhost read incorrect message\n");
1245
1246                 return -1;
1247         }
1248
1249         ret = 0;
1250         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1251                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1252                         vhost_message_str[msg.request.master]);
1253         else
1254                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1255                         vhost_message_str[msg.request.master]);
1256
1257         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1258         if (ret < 0) {
1259                 RTE_LOG(ERR, VHOST_CONFIG,
1260                         "failed to alloc queue\n");
1261                 return -1;
1262         }
1263
1264         switch (msg.request.master) {
1265         case VHOST_USER_GET_FEATURES:
1266                 msg.payload.u64 = vhost_user_get_features(dev);
1267                 msg.size = sizeof(msg.payload.u64);
1268                 send_vhost_reply(fd, &msg);
1269                 break;
1270         case VHOST_USER_SET_FEATURES:
1271                 ret = vhost_user_set_features(dev, msg.payload.u64);
1272                 if (ret)
1273                         return -1;
1274                 break;
1275
1276         case VHOST_USER_GET_PROTOCOL_FEATURES:
1277                 vhost_user_get_protocol_features(dev, &msg);
1278                 send_vhost_reply(fd, &msg);
1279                 break;
1280         case VHOST_USER_SET_PROTOCOL_FEATURES:
1281                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1282                 break;
1283
1284         case VHOST_USER_SET_OWNER:
1285                 vhost_user_set_owner();
1286                 break;
1287         case VHOST_USER_RESET_OWNER:
1288                 vhost_user_reset_owner(dev);
1289                 break;
1290
1291         case VHOST_USER_SET_MEM_TABLE:
1292                 ret = vhost_user_set_mem_table(dev, &msg);
1293                 break;
1294
1295         case VHOST_USER_SET_LOG_BASE:
1296                 vhost_user_set_log_base(dev, &msg);
1297
1298                 /* it needs a reply */
1299                 msg.size = sizeof(msg.payload.u64);
1300                 send_vhost_reply(fd, &msg);
1301                 break;
1302         case VHOST_USER_SET_LOG_FD:
1303                 close(msg.fds[0]);
1304                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1305                 break;
1306
1307         case VHOST_USER_SET_VRING_NUM:
1308                 vhost_user_set_vring_num(dev, &msg);
1309                 break;
1310         case VHOST_USER_SET_VRING_ADDR:
1311                 vhost_user_set_vring_addr(&dev, &msg);
1312                 break;
1313         case VHOST_USER_SET_VRING_BASE:
1314                 vhost_user_set_vring_base(dev, &msg);
1315                 break;
1316
1317         case VHOST_USER_GET_VRING_BASE:
1318                 vhost_user_get_vring_base(dev, &msg);
1319                 msg.size = sizeof(msg.payload.state);
1320                 send_vhost_reply(fd, &msg);
1321                 break;
1322
1323         case VHOST_USER_SET_VRING_KICK:
1324                 vhost_user_set_vring_kick(&dev, &msg);
1325                 break;
1326         case VHOST_USER_SET_VRING_CALL:
1327                 vhost_user_set_vring_call(dev, &msg);
1328                 break;
1329
1330         case VHOST_USER_SET_VRING_ERR:
1331                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1332                         close(msg.fds[0]);
1333                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1334                 break;
1335
1336         case VHOST_USER_GET_QUEUE_NUM:
1337                 msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
1338                 msg.size = sizeof(msg.payload.u64);
1339                 send_vhost_reply(fd, &msg);
1340                 break;
1341
1342         case VHOST_USER_SET_VRING_ENABLE:
1343                 vhost_user_set_vring_enable(dev, &msg);
1344                 break;
1345         case VHOST_USER_SEND_RARP:
1346                 vhost_user_send_rarp(dev, &msg);
1347                 break;
1348
1349         case VHOST_USER_NET_SET_MTU:
1350                 ret = vhost_user_net_set_mtu(dev, &msg);
1351                 break;
1352
1353         case VHOST_USER_SET_SLAVE_REQ_FD:
1354                 ret = vhost_user_set_req_fd(dev, &msg);
1355                 break;
1356
1357         case VHOST_USER_IOTLB_MSG:
1358                 ret = vhost_user_iotlb_msg(&dev, &msg);
1359                 break;
1360
1361         default:
1362                 ret = -1;
1363                 break;
1364
1365         }
1366
1367         if (msg.flags & VHOST_USER_NEED_REPLY) {
1368                 msg.payload.u64 = !!ret;
1369                 msg.size = sizeof(msg.payload.u64);
1370                 send_vhost_reply(fd, &msg);
1371         }
1372
1373         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1374                 dev->flags |= VIRTIO_DEV_READY;
1375
1376                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1377                         if (dev->dequeue_zero_copy) {
1378                                 RTE_LOG(INFO, VHOST_CONFIG,
1379                                                 "dequeue zero copy is enabled\n");
1380                         }
1381
1382                         if (dev->notify_ops->new_device(dev->vid) == 0)
1383                                 dev->flags |= VIRTIO_DEV_RUNNING;
1384                 }
1385         }
1386
1387         return 0;
1388 }
1389
1390 int
1391 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1392 {
1393         int ret;
1394         struct VhostUserMsg msg = {
1395                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1396                 .flags = VHOST_USER_VERSION,
1397                 .size = sizeof(msg.payload.iotlb),
1398                 .payload.iotlb = {
1399                         .iova = iova,
1400                         .perm = perm,
1401                         .type = VHOST_IOTLB_MISS,
1402                 },
1403         };
1404
1405         ret = send_vhost_message(dev->slave_req_fd, &msg);
1406         if (ret < 0) {
1407                 RTE_LOG(ERR, VHOST_CONFIG,
1408                                 "Failed to send IOTLB miss message (%d)\n",
1409                                 ret);
1410                 return ret;
1411         }
1412
1413         return 0;
1414 }