lib: use SPDX tag for Intel copyright files
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <stdint.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <sys/mman.h>
11 #include <sys/types.h>
12 #include <sys/stat.h>
13 #include <assert.h>
14 #ifdef RTE_LIBRTE_VHOST_NUMA
15 #include <numaif.h>
16 #endif
17
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_log.h>
21
22 #include "iotlb.h"
23 #include "vhost.h"
24 #include "vhost_user.h"
25
26 #define VIRTIO_MIN_MTU 68
27 #define VIRTIO_MAX_MTU 65535
28
29 static const char *vhost_message_str[VHOST_USER_MAX] = {
30         [VHOST_USER_NONE] = "VHOST_USER_NONE",
31         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
32         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
33         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
34         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
35         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
36         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
37         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
38         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
39         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
40         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
41         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
42         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
43         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
44         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
45         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
46         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
47         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
48         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
49         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
50         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
51         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
52         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
53 };
54
55 static uint64_t
56 get_blk_size(int fd)
57 {
58         struct stat stat;
59         int ret;
60
61         ret = fstat(fd, &stat);
62         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
63 }
64
65 static void
66 free_mem_region(struct virtio_net *dev)
67 {
68         uint32_t i;
69         struct rte_vhost_mem_region *reg;
70
71         if (!dev || !dev->mem)
72                 return;
73
74         for (i = 0; i < dev->mem->nregions; i++) {
75                 reg = &dev->mem->regions[i];
76                 if (reg->host_user_addr) {
77                         munmap(reg->mmap_addr, reg->mmap_size);
78                         close(reg->fd);
79                 }
80         }
81 }
82
83 void
84 vhost_backend_cleanup(struct virtio_net *dev)
85 {
86         if (dev->mem) {
87                 free_mem_region(dev);
88                 rte_free(dev->mem);
89                 dev->mem = NULL;
90         }
91
92         free(dev->guest_pages);
93         dev->guest_pages = NULL;
94
95         if (dev->log_addr) {
96                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
97                 dev->log_addr = 0;
98         }
99
100         if (dev->slave_req_fd >= 0) {
101                 close(dev->slave_req_fd);
102                 dev->slave_req_fd = -1;
103         }
104 }
105
106 /*
107  * This function just returns success at the moment unless
108  * the device hasn't been initialised.
109  */
110 static int
111 vhost_user_set_owner(void)
112 {
113         return 0;
114 }
115
116 static int
117 vhost_user_reset_owner(struct virtio_net *dev)
118 {
119         if (dev->flags & VIRTIO_DEV_RUNNING) {
120                 dev->flags &= ~VIRTIO_DEV_RUNNING;
121                 dev->notify_ops->destroy_device(dev->vid);
122         }
123
124         cleanup_device(dev, 0);
125         reset_device(dev);
126         return 0;
127 }
128
129 /*
130  * The features that we support are requested.
131  */
132 static uint64_t
133 vhost_user_get_features(struct virtio_net *dev)
134 {
135         uint64_t features = 0;
136
137         rte_vhost_driver_get_features(dev->ifname, &features);
138         return features;
139 }
140
141 /*
142  * We receive the negotiated features supported by us and the virtio device.
143  */
144 static int
145 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
146 {
147         uint64_t vhost_features = 0;
148
149         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
150         if (features & ~vhost_features) {
151                 RTE_LOG(ERR, VHOST_CONFIG,
152                         "(%d) received invalid negotiated features.\n",
153                         dev->vid);
154                 return -1;
155         }
156
157         if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) {
158                 if (dev->notify_ops->features_changed)
159                         dev->notify_ops->features_changed(dev->vid, features);
160         }
161
162         dev->features = features;
163         if (dev->features &
164                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
165                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
166         } else {
167                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
168         }
169         LOG_DEBUG(VHOST_CONFIG,
170                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
171                 dev->vid,
172                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
173                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
174
175         return 0;
176 }
177
178 /*
179  * The virtio device sends us the size of the descriptor ring.
180  */
181 static int
182 vhost_user_set_vring_num(struct virtio_net *dev,
183                          VhostUserMsg *msg)
184 {
185         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
186
187         vq->size = msg->payload.state.num;
188
189         if (dev->dequeue_zero_copy) {
190                 vq->nr_zmbuf = 0;
191                 vq->last_zmbuf_idx = 0;
192                 vq->zmbuf_size = vq->size;
193                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
194                                          sizeof(struct zcopy_mbuf), 0);
195                 if (vq->zmbufs == NULL) {
196                         RTE_LOG(WARNING, VHOST_CONFIG,
197                                 "failed to allocate mem for zero copy; "
198                                 "zero copy is force disabled\n");
199                         dev->dequeue_zero_copy = 0;
200                 }
201         }
202
203         vq->shadow_used_ring = rte_malloc(NULL,
204                                 vq->size * sizeof(struct vring_used_elem),
205                                 RTE_CACHE_LINE_SIZE);
206         if (!vq->shadow_used_ring) {
207                 RTE_LOG(ERR, VHOST_CONFIG,
208                         "failed to allocate memory for shadow used ring.\n");
209                 return -1;
210         }
211
212         vq->batch_copy_elems = rte_malloc(NULL,
213                                 vq->size * sizeof(struct batch_copy_elem),
214                                 RTE_CACHE_LINE_SIZE);
215         if (!vq->batch_copy_elems) {
216                 RTE_LOG(ERR, VHOST_CONFIG,
217                         "failed to allocate memory for batching copy.\n");
218                 return -1;
219         }
220
221         return 0;
222 }
223
224 /*
225  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
226  * same numa node as the memory of vring descriptor.
227  */
228 #ifdef RTE_LIBRTE_VHOST_NUMA
229 static struct virtio_net*
230 numa_realloc(struct virtio_net *dev, int index)
231 {
232         int oldnode, newnode;
233         struct virtio_net *old_dev;
234         struct vhost_virtqueue *old_vq, *vq;
235         int ret;
236
237         old_dev = dev;
238         vq = old_vq = dev->virtqueue[index];
239
240         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
241                             MPOL_F_NODE | MPOL_F_ADDR);
242
243         /* check if we need to reallocate vq */
244         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
245                              MPOL_F_NODE | MPOL_F_ADDR);
246         if (ret) {
247                 RTE_LOG(ERR, VHOST_CONFIG,
248                         "Unable to get vq numa information.\n");
249                 return dev;
250         }
251         if (oldnode != newnode) {
252                 RTE_LOG(INFO, VHOST_CONFIG,
253                         "reallocate vq from %d to %d node\n", oldnode, newnode);
254                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
255                 if (!vq)
256                         return dev;
257
258                 memcpy(vq, old_vq, sizeof(*vq));
259                 rte_free(old_vq);
260         }
261
262         /* check if we need to reallocate dev */
263         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
264                             MPOL_F_NODE | MPOL_F_ADDR);
265         if (ret) {
266                 RTE_LOG(ERR, VHOST_CONFIG,
267                         "Unable to get dev numa information.\n");
268                 goto out;
269         }
270         if (oldnode != newnode) {
271                 RTE_LOG(INFO, VHOST_CONFIG,
272                         "reallocate dev from %d to %d node\n",
273                         oldnode, newnode);
274                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
275                 if (!dev) {
276                         dev = old_dev;
277                         goto out;
278                 }
279
280                 memcpy(dev, old_dev, sizeof(*dev));
281                 rte_free(old_dev);
282         }
283
284 out:
285         dev->virtqueue[index] = vq;
286         vhost_devices[dev->vid] = dev;
287
288         if (old_vq != vq)
289                 vhost_user_iotlb_init(dev, index);
290
291         return dev;
292 }
293 #else
294 static struct virtio_net*
295 numa_realloc(struct virtio_net *dev, int index __rte_unused)
296 {
297         return dev;
298 }
299 #endif
300
301 /* Converts QEMU virtual address to Vhost virtual address. */
302 static uint64_t
303 qva_to_vva(struct virtio_net *dev, uint64_t qva)
304 {
305         struct rte_vhost_mem_region *reg;
306         uint32_t i;
307
308         /* Find the region where the address lives. */
309         for (i = 0; i < dev->mem->nregions; i++) {
310                 reg = &dev->mem->regions[i];
311
312                 if (qva >= reg->guest_user_addr &&
313                     qva <  reg->guest_user_addr + reg->size) {
314                         return qva - reg->guest_user_addr +
315                                reg->host_user_addr;
316                 }
317         }
318
319         return 0;
320 }
321
322
323 /*
324  * Converts ring address to Vhost virtual address.
325  * If IOMMU is enabled, the ring address is a guest IO virtual address,
326  * else it is a QEMU virtual address.
327  */
328 static uint64_t
329 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
330                 uint64_t ra, uint64_t size)
331 {
332         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
333                 uint64_t vva;
334
335                 vva = vhost_user_iotlb_cache_find(vq, ra,
336                                         &size, VHOST_ACCESS_RW);
337                 if (!vva)
338                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
339
340                 return vva;
341         }
342
343         return qva_to_vva(dev, ra);
344 }
345
346 static struct virtio_net *
347 translate_ring_addresses(struct virtio_net *dev, int vq_index)
348 {
349         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
350         struct vhost_vring_addr *addr = &vq->ring_addrs;
351
352         /* The addresses are converted from QEMU virtual to Vhost virtual. */
353         if (vq->desc && vq->avail && vq->used)
354                 return dev;
355
356         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
357                         vq, addr->desc_user_addr, sizeof(struct vring_desc));
358         if (vq->desc == 0) {
359                 RTE_LOG(DEBUG, VHOST_CONFIG,
360                         "(%d) failed to find desc ring address.\n",
361                         dev->vid);
362                 return dev;
363         }
364
365         dev = numa_realloc(dev, vq_index);
366         vq = dev->virtqueue[vq_index];
367         addr = &vq->ring_addrs;
368
369         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
370                         vq, addr->avail_user_addr, sizeof(struct vring_avail));
371         if (vq->avail == 0) {
372                 RTE_LOG(DEBUG, VHOST_CONFIG,
373                         "(%d) failed to find avail ring address.\n",
374                         dev->vid);
375                 return dev;
376         }
377
378         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
379                         vq, addr->used_user_addr, sizeof(struct vring_used));
380         if (vq->used == 0) {
381                 RTE_LOG(DEBUG, VHOST_CONFIG,
382                         "(%d) failed to find used ring address.\n",
383                         dev->vid);
384                 return dev;
385         }
386
387         if (vq->last_used_idx != vq->used->idx) {
388                 RTE_LOG(WARNING, VHOST_CONFIG,
389                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
390                         "some packets maybe resent for Tx and dropped for Rx\n",
391                         vq->last_used_idx, vq->used->idx);
392                 vq->last_used_idx  = vq->used->idx;
393                 vq->last_avail_idx = vq->used->idx;
394         }
395
396         vq->log_guest_addr = addr->log_guest_addr;
397
398         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
399                         dev->vid, vq->desc);
400         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
401                         dev->vid, vq->avail);
402         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
403                         dev->vid, vq->used);
404         LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
405                         dev->vid, vq->log_guest_addr);
406
407         return dev;
408 }
409
410 /*
411  * The virtio device sends us the desc, used and avail ring addresses.
412  * This function then converts these to our address space.
413  */
414 static int
415 vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
416 {
417         struct vhost_virtqueue *vq;
418         struct vhost_vring_addr *addr = &msg->payload.addr;
419         struct virtio_net *dev = *pdev;
420
421         if (dev->mem == NULL)
422                 return -1;
423
424         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
425         vq = dev->virtqueue[msg->payload.addr.index];
426
427         /*
428          * Rings addresses should not be interpreted as long as the ring is not
429          * started and enabled
430          */
431         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
432
433         vring_invalidate(dev, vq);
434
435         if (vq->enabled && (dev->features &
436                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
437                 dev = translate_ring_addresses(dev, msg->payload.state.index);
438                 if (!dev)
439                         return -1;
440
441                 *pdev = dev;
442         }
443
444         return 0;
445 }
446
447 /*
448  * The virtio device sends us the available ring last used index.
449  */
450 static int
451 vhost_user_set_vring_base(struct virtio_net *dev,
452                           VhostUserMsg *msg)
453 {
454         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
455                         msg->payload.state.num;
456         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
457                         msg->payload.state.num;
458
459         return 0;
460 }
461
462 static void
463 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
464                    uint64_t host_phys_addr, uint64_t size)
465 {
466         struct guest_page *page, *last_page;
467
468         if (dev->nr_guest_pages == dev->max_guest_pages) {
469                 dev->max_guest_pages *= 2;
470                 dev->guest_pages = realloc(dev->guest_pages,
471                                         dev->max_guest_pages * sizeof(*page));
472         }
473
474         if (dev->nr_guest_pages > 0) {
475                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
476                 /* merge if the two pages are continuous */
477                 if (host_phys_addr == last_page->host_phys_addr +
478                                       last_page->size) {
479                         last_page->size += size;
480                         return;
481                 }
482         }
483
484         page = &dev->guest_pages[dev->nr_guest_pages++];
485         page->guest_phys_addr = guest_phys_addr;
486         page->host_phys_addr  = host_phys_addr;
487         page->size = size;
488 }
489
490 static void
491 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
492                 uint64_t page_size)
493 {
494         uint64_t reg_size = reg->size;
495         uint64_t host_user_addr  = reg->host_user_addr;
496         uint64_t guest_phys_addr = reg->guest_phys_addr;
497         uint64_t host_phys_addr;
498         uint64_t size;
499
500         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
501         size = page_size - (guest_phys_addr & (page_size - 1));
502         size = RTE_MIN(size, reg_size);
503
504         add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
505         host_user_addr  += size;
506         guest_phys_addr += size;
507         reg_size -= size;
508
509         while (reg_size > 0) {
510                 size = RTE_MIN(reg_size, page_size);
511                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
512                                                   host_user_addr);
513                 add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
514
515                 host_user_addr  += size;
516                 guest_phys_addr += size;
517                 reg_size -= size;
518         }
519 }
520
521 #ifdef RTE_LIBRTE_VHOST_DEBUG
522 /* TODO: enable it only in debug mode? */
523 static void
524 dump_guest_pages(struct virtio_net *dev)
525 {
526         uint32_t i;
527         struct guest_page *page;
528
529         for (i = 0; i < dev->nr_guest_pages; i++) {
530                 page = &dev->guest_pages[i];
531
532                 RTE_LOG(INFO, VHOST_CONFIG,
533                         "guest physical page region %u\n"
534                         "\t guest_phys_addr: %" PRIx64 "\n"
535                         "\t host_phys_addr : %" PRIx64 "\n"
536                         "\t size           : %" PRIx64 "\n",
537                         i,
538                         page->guest_phys_addr,
539                         page->host_phys_addr,
540                         page->size);
541         }
542 }
543 #else
544 #define dump_guest_pages(dev)
545 #endif
546
547 static int
548 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
549 {
550         struct VhostUserMemory memory = pmsg->payload.memory;
551         struct rte_vhost_mem_region *reg;
552         void *mmap_addr;
553         uint64_t mmap_size;
554         uint64_t mmap_offset;
555         uint64_t alignment;
556         uint32_t i;
557         int fd;
558
559         if (dev->mem) {
560                 free_mem_region(dev);
561                 rte_free(dev->mem);
562                 dev->mem = NULL;
563         }
564
565         dev->nr_guest_pages = 0;
566         if (!dev->guest_pages) {
567                 dev->max_guest_pages = 8;
568                 dev->guest_pages = malloc(dev->max_guest_pages *
569                                                 sizeof(struct guest_page));
570                 if (dev->guest_pages == NULL) {
571                         RTE_LOG(ERR, VHOST_CONFIG,
572                                 "(%d) failed to allocate memory "
573                                 "for dev->guest_pages\n",
574                                 dev->vid);
575                         return -1;
576                 }
577         }
578
579         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
580                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
581         if (dev->mem == NULL) {
582                 RTE_LOG(ERR, VHOST_CONFIG,
583                         "(%d) failed to allocate memory for dev->mem\n",
584                         dev->vid);
585                 return -1;
586         }
587         dev->mem->nregions = memory.nregions;
588
589         for (i = 0; i < memory.nregions; i++) {
590                 fd  = pmsg->fds[i];
591                 reg = &dev->mem->regions[i];
592
593                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
594                 reg->guest_user_addr = memory.regions[i].userspace_addr;
595                 reg->size            = memory.regions[i].memory_size;
596                 reg->fd              = fd;
597
598                 mmap_offset = memory.regions[i].mmap_offset;
599                 mmap_size   = reg->size + mmap_offset;
600
601                 /* mmap() without flag of MAP_ANONYMOUS, should be called
602                  * with length argument aligned with hugepagesz at older
603                  * longterm version Linux, like 2.6.32 and 3.2.72, or
604                  * mmap() will fail with EINVAL.
605                  *
606                  * to avoid failure, make sure in caller to keep length
607                  * aligned.
608                  */
609                 alignment = get_blk_size(fd);
610                 if (alignment == (uint64_t)-1) {
611                         RTE_LOG(ERR, VHOST_CONFIG,
612                                 "couldn't get hugepage size through fstat\n");
613                         goto err_mmap;
614                 }
615                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
616
617                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
618                                  MAP_SHARED | MAP_POPULATE, fd, 0);
619
620                 if (mmap_addr == MAP_FAILED) {
621                         RTE_LOG(ERR, VHOST_CONFIG,
622                                 "mmap region %u failed.\n", i);
623                         goto err_mmap;
624                 }
625
626                 reg->mmap_addr = mmap_addr;
627                 reg->mmap_size = mmap_size;
628                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
629                                       mmap_offset;
630
631                 if (dev->dequeue_zero_copy)
632                         add_guest_pages(dev, reg, alignment);
633
634                 RTE_LOG(INFO, VHOST_CONFIG,
635                         "guest memory region %u, size: 0x%" PRIx64 "\n"
636                         "\t guest physical addr: 0x%" PRIx64 "\n"
637                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
638                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
639                         "\t mmap addr : 0x%" PRIx64 "\n"
640                         "\t mmap size : 0x%" PRIx64 "\n"
641                         "\t mmap align: 0x%" PRIx64 "\n"
642                         "\t mmap off  : 0x%" PRIx64 "\n",
643                         i, reg->size,
644                         reg->guest_phys_addr,
645                         reg->guest_user_addr,
646                         reg->host_user_addr,
647                         (uint64_t)(uintptr_t)mmap_addr,
648                         mmap_size,
649                         alignment,
650                         mmap_offset);
651         }
652
653         dump_guest_pages(dev);
654
655         return 0;
656
657 err_mmap:
658         free_mem_region(dev);
659         rte_free(dev->mem);
660         dev->mem = NULL;
661         return -1;
662 }
663
664 static int
665 vq_is_ready(struct vhost_virtqueue *vq)
666 {
667         return vq && vq->desc && vq->avail && vq->used &&
668                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
669                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
670 }
671
672 static int
673 virtio_is_ready(struct virtio_net *dev)
674 {
675         struct vhost_virtqueue *vq;
676         uint32_t i;
677
678         if (dev->nr_vring == 0)
679                 return 0;
680
681         for (i = 0; i < dev->nr_vring; i++) {
682                 vq = dev->virtqueue[i];
683
684                 if (!vq_is_ready(vq))
685                         return 0;
686         }
687
688         RTE_LOG(INFO, VHOST_CONFIG,
689                 "virtio is now ready for processing.\n");
690         return 1;
691 }
692
693 static void
694 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
695 {
696         struct vhost_vring_file file;
697         struct vhost_virtqueue *vq;
698
699         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
700         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
701                 file.fd = VIRTIO_INVALID_EVENTFD;
702         else
703                 file.fd = pmsg->fds[0];
704         RTE_LOG(INFO, VHOST_CONFIG,
705                 "vring call idx:%d file:%d\n", file.index, file.fd);
706
707         vq = dev->virtqueue[file.index];
708         if (vq->callfd >= 0)
709                 close(vq->callfd);
710
711         vq->callfd = file.fd;
712 }
713
714 static void
715 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
716 {
717         struct vhost_vring_file file;
718         struct vhost_virtqueue *vq;
719         struct virtio_net *dev = *pdev;
720
721         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
722         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
723                 file.fd = VIRTIO_INVALID_EVENTFD;
724         else
725                 file.fd = pmsg->fds[0];
726         RTE_LOG(INFO, VHOST_CONFIG,
727                 "vring kick idx:%d file:%d\n", file.index, file.fd);
728
729         /* Interpret ring addresses only when ring is started. */
730         dev = translate_ring_addresses(dev, file.index);
731         if (!dev)
732                 return;
733
734         *pdev = dev;
735
736         vq = dev->virtqueue[file.index];
737
738         /*
739          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
740          * the ring starts already enabled. Otherwise, it is enabled via
741          * the SET_VRING_ENABLE message.
742          */
743         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
744                 vq->enabled = 1;
745
746         if (vq->kickfd >= 0)
747                 close(vq->kickfd);
748         vq->kickfd = file.fd;
749 }
750
751 static void
752 free_zmbufs(struct vhost_virtqueue *vq)
753 {
754         struct zcopy_mbuf *zmbuf, *next;
755
756         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
757              zmbuf != NULL; zmbuf = next) {
758                 next = TAILQ_NEXT(zmbuf, next);
759
760                 rte_pktmbuf_free(zmbuf->mbuf);
761                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
762         }
763
764         rte_free(vq->zmbufs);
765 }
766
767 /*
768  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
769  */
770 static int
771 vhost_user_get_vring_base(struct virtio_net *dev,
772                           VhostUserMsg *msg)
773 {
774         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
775
776         /* We have to stop the queue (virtio) if it is running. */
777         if (dev->flags & VIRTIO_DEV_RUNNING) {
778                 dev->flags &= ~VIRTIO_DEV_RUNNING;
779                 dev->notify_ops->destroy_device(dev->vid);
780         }
781
782         dev->flags &= ~VIRTIO_DEV_READY;
783
784         /* Here we are safe to get the last used index */
785         msg->payload.state.num = vq->last_used_idx;
786
787         RTE_LOG(INFO, VHOST_CONFIG,
788                 "vring base idx:%d file:%d\n", msg->payload.state.index,
789                 msg->payload.state.num);
790         /*
791          * Based on current qemu vhost-user implementation, this message is
792          * sent and only sent in vhost_vring_stop.
793          * TODO: cleanup the vring, it isn't usable since here.
794          */
795         if (vq->kickfd >= 0)
796                 close(vq->kickfd);
797
798         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
799
800         if (dev->dequeue_zero_copy)
801                 free_zmbufs(vq);
802         rte_free(vq->shadow_used_ring);
803         vq->shadow_used_ring = NULL;
804
805         rte_free(vq->batch_copy_elems);
806         vq->batch_copy_elems = NULL;
807
808         return 0;
809 }
810
811 /*
812  * when virtio queues are ready to work, qemu will send us to
813  * enable the virtio queue pair.
814  */
815 static int
816 vhost_user_set_vring_enable(struct virtio_net *dev,
817                             VhostUserMsg *msg)
818 {
819         int enable = (int)msg->payload.state.num;
820
821         RTE_LOG(INFO, VHOST_CONFIG,
822                 "set queue enable: %d to qp idx: %d\n",
823                 enable, msg->payload.state.index);
824
825         if (dev->notify_ops->vring_state_changed)
826                 dev->notify_ops->vring_state_changed(dev->vid,
827                                 msg->payload.state.index, enable);
828
829         dev->virtqueue[msg->payload.state.index]->enabled = enable;
830
831         return 0;
832 }
833
834 static void
835 vhost_user_get_protocol_features(struct virtio_net *dev,
836                                  struct VhostUserMsg *msg)
837 {
838         uint64_t features, protocol_features = VHOST_USER_PROTOCOL_FEATURES;
839
840         rte_vhost_driver_get_features(dev->ifname, &features);
841
842         /*
843          * REPLY_ACK protocol feature is only mandatory for now
844          * for IOMMU feature. If IOMMU is explicitly disabled by the
845          * application, disable also REPLY_ACK feature for older buggy
846          * Qemu versions (from v2.7.0 to v2.9.0).
847          */
848         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
849                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
850
851         msg->payload.u64 = protocol_features;
852         msg->size = sizeof(msg->payload.u64);
853 }
854
855 static void
856 vhost_user_set_protocol_features(struct virtio_net *dev,
857                                  uint64_t protocol_features)
858 {
859         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
860                 return;
861
862         dev->protocol_features = protocol_features;
863 }
864
865 static int
866 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
867 {
868         int fd = msg->fds[0];
869         uint64_t size, off;
870         void *addr;
871
872         if (fd < 0) {
873                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
874                 return -1;
875         }
876
877         if (msg->size != sizeof(VhostUserLog)) {
878                 RTE_LOG(ERR, VHOST_CONFIG,
879                         "invalid log base msg size: %"PRId32" != %d\n",
880                         msg->size, (int)sizeof(VhostUserLog));
881                 return -1;
882         }
883
884         size = msg->payload.log.mmap_size;
885         off  = msg->payload.log.mmap_offset;
886         RTE_LOG(INFO, VHOST_CONFIG,
887                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
888                 size, off);
889
890         /*
891          * mmap from 0 to workaround a hugepage mmap bug: mmap will
892          * fail when offset is not page size aligned.
893          */
894         addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
895         close(fd);
896         if (addr == MAP_FAILED) {
897                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
898                 return -1;
899         }
900
901         /*
902          * Free previously mapped log memory on occasionally
903          * multiple VHOST_USER_SET_LOG_BASE.
904          */
905         if (dev->log_addr) {
906                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
907         }
908         dev->log_addr = (uint64_t)(uintptr_t)addr;
909         dev->log_base = dev->log_addr + off;
910         dev->log_size = size;
911
912         return 0;
913 }
914
915 /*
916  * An rarp packet is constructed and broadcasted to notify switches about
917  * the new location of the migrated VM, so that packets from outside will
918  * not be lost after migration.
919  *
920  * However, we don't actually "send" a rarp packet here, instead, we set
921  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
922  */
923 static int
924 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
925 {
926         uint8_t *mac = (uint8_t *)&msg->payload.u64;
927
928         RTE_LOG(DEBUG, VHOST_CONFIG,
929                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
930                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
931         memcpy(dev->mac.addr_bytes, mac, 6);
932
933         /*
934          * Set the flag to inject a RARP broadcast packet at
935          * rte_vhost_dequeue_burst().
936          *
937          * rte_smp_wmb() is for making sure the mac is copied
938          * before the flag is set.
939          */
940         rte_smp_wmb();
941         rte_atomic16_set(&dev->broadcast_rarp, 1);
942
943         return 0;
944 }
945
946 static int
947 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
948 {
949         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
950                         msg->payload.u64 > VIRTIO_MAX_MTU) {
951                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
952                                 msg->payload.u64);
953
954                 return -1;
955         }
956
957         dev->mtu = msg->payload.u64;
958
959         return 0;
960 }
961
962 static int
963 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
964 {
965         int fd = msg->fds[0];
966
967         if (fd < 0) {
968                 RTE_LOG(ERR, VHOST_CONFIG,
969                                 "Invalid file descriptor for slave channel (%d)\n",
970                                 fd);
971                 return -1;
972         }
973
974         dev->slave_req_fd = fd;
975
976         return 0;
977 }
978
979 static int
980 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
981 {
982         struct vhost_vring_addr *ra;
983         uint64_t start, end;
984
985         start = imsg->iova;
986         end = start + imsg->size;
987
988         ra = &vq->ring_addrs;
989         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
990                 return 1;
991         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
992                 return 1;
993         if (ra->used_user_addr >= start && ra->used_user_addr < end)
994                 return 1;
995
996         return 0;
997 }
998
999 static int
1000 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1001                                 struct vhost_iotlb_msg *imsg)
1002 {
1003         uint64_t istart, iend, vstart, vend;
1004
1005         istart = imsg->iova;
1006         iend = istart + imsg->size - 1;
1007
1008         vstart = (uintptr_t)vq->desc;
1009         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1010         if (vstart <= iend && istart <= vend)
1011                 return 1;
1012
1013         vstart = (uintptr_t)vq->avail;
1014         vend = vstart + sizeof(struct vring_avail);
1015         vend += sizeof(uint16_t) * vq->size - 1;
1016         if (vstart <= iend && istart <= vend)
1017                 return 1;
1018
1019         vstart = (uintptr_t)vq->used;
1020         vend = vstart + sizeof(struct vring_used);
1021         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1022         if (vstart <= iend && istart <= vend)
1023                 return 1;
1024
1025         return 0;
1026 }
1027
1028 static int
1029 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1030 {
1031         struct virtio_net *dev = *pdev;
1032         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1033         uint16_t i;
1034         uint64_t vva;
1035
1036         switch (imsg->type) {
1037         case VHOST_IOTLB_UPDATE:
1038                 vva = qva_to_vva(dev, imsg->uaddr);
1039                 if (!vva)
1040                         return -1;
1041
1042                 for (i = 0; i < dev->nr_vring; i++) {
1043                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1044
1045                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1046                                         imsg->size, imsg->perm);
1047
1048                         if (is_vring_iotlb_update(vq, imsg))
1049                                 *pdev = dev = translate_ring_addresses(dev, i);
1050                 }
1051                 break;
1052         case VHOST_IOTLB_INVALIDATE:
1053                 for (i = 0; i < dev->nr_vring; i++) {
1054                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1055
1056                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1057                                         imsg->size);
1058
1059                         if (is_vring_iotlb_invalidate(vq, imsg))
1060                                 vring_invalidate(dev, vq);
1061                 }
1062                 break;
1063         default:
1064                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1065                                 imsg->type);
1066                 return -1;
1067         }
1068
1069         return 0;
1070 }
1071
1072 /* return bytes# of read on success or negative val on failure. */
1073 static int
1074 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1075 {
1076         int ret;
1077
1078         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1079                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1080         if (ret <= 0)
1081                 return ret;
1082
1083         if (msg && msg->size) {
1084                 if (msg->size > sizeof(msg->payload)) {
1085                         RTE_LOG(ERR, VHOST_CONFIG,
1086                                 "invalid msg size: %d\n", msg->size);
1087                         return -1;
1088                 }
1089                 ret = read(sockfd, &msg->payload, msg->size);
1090                 if (ret <= 0)
1091                         return ret;
1092                 if (ret != (int)msg->size) {
1093                         RTE_LOG(ERR, VHOST_CONFIG,
1094                                 "read control message failed\n");
1095                         return -1;
1096                 }
1097         }
1098
1099         return ret;
1100 }
1101
1102 static int
1103 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
1104 {
1105         if (!msg)
1106                 return 0;
1107
1108         return send_fd_message(sockfd, (char *)msg,
1109                 VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
1110 }
1111
1112 static int
1113 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1114 {
1115         if (!msg)
1116                 return 0;
1117
1118         msg->flags &= ~VHOST_USER_VERSION_MASK;
1119         msg->flags &= ~VHOST_USER_NEED_REPLY;
1120         msg->flags |= VHOST_USER_VERSION;
1121         msg->flags |= VHOST_USER_REPLY_MASK;
1122
1123         return send_vhost_message(sockfd, msg);
1124 }
1125
1126 /*
1127  * Allocate a queue pair if it hasn't been allocated yet
1128  */
1129 static int
1130 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
1131 {
1132         uint16_t vring_idx;
1133
1134         switch (msg->request.master) {
1135         case VHOST_USER_SET_VRING_KICK:
1136         case VHOST_USER_SET_VRING_CALL:
1137         case VHOST_USER_SET_VRING_ERR:
1138                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1139                 break;
1140         case VHOST_USER_SET_VRING_NUM:
1141         case VHOST_USER_SET_VRING_BASE:
1142         case VHOST_USER_SET_VRING_ENABLE:
1143                 vring_idx = msg->payload.state.index;
1144                 break;
1145         case VHOST_USER_SET_VRING_ADDR:
1146                 vring_idx = msg->payload.addr.index;
1147                 break;
1148         default:
1149                 return 0;
1150         }
1151
1152         if (vring_idx >= VHOST_MAX_VRING) {
1153                 RTE_LOG(ERR, VHOST_CONFIG,
1154                         "invalid vring index: %u\n", vring_idx);
1155                 return -1;
1156         }
1157
1158         if (dev->virtqueue[vring_idx])
1159                 return 0;
1160
1161         return alloc_vring_queue(dev, vring_idx);
1162 }
1163
1164 int
1165 vhost_user_msg_handler(int vid, int fd)
1166 {
1167         struct virtio_net *dev;
1168         struct VhostUserMsg msg;
1169         int ret;
1170
1171         dev = get_device(vid);
1172         if (dev == NULL)
1173                 return -1;
1174
1175         if (!dev->notify_ops) {
1176                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1177                 if (!dev->notify_ops) {
1178                         RTE_LOG(ERR, VHOST_CONFIG,
1179                                 "failed to get callback ops for driver %s\n",
1180                                 dev->ifname);
1181                         return -1;
1182                 }
1183         }
1184
1185         ret = read_vhost_message(fd, &msg);
1186         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1187                 if (ret < 0)
1188                         RTE_LOG(ERR, VHOST_CONFIG,
1189                                 "vhost read message failed\n");
1190                 else if (ret == 0)
1191                         RTE_LOG(INFO, VHOST_CONFIG,
1192                                 "vhost peer closed\n");
1193                 else
1194                         RTE_LOG(ERR, VHOST_CONFIG,
1195                                 "vhost read incorrect message\n");
1196
1197                 return -1;
1198         }
1199
1200         ret = 0;
1201         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1202                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1203                         vhost_message_str[msg.request.master]);
1204         else
1205                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1206                         vhost_message_str[msg.request.master]);
1207
1208         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1209         if (ret < 0) {
1210                 RTE_LOG(ERR, VHOST_CONFIG,
1211                         "failed to alloc queue\n");
1212                 return -1;
1213         }
1214
1215         switch (msg.request.master) {
1216         case VHOST_USER_GET_FEATURES:
1217                 msg.payload.u64 = vhost_user_get_features(dev);
1218                 msg.size = sizeof(msg.payload.u64);
1219                 send_vhost_reply(fd, &msg);
1220                 break;
1221         case VHOST_USER_SET_FEATURES:
1222                 vhost_user_set_features(dev, msg.payload.u64);
1223                 break;
1224
1225         case VHOST_USER_GET_PROTOCOL_FEATURES:
1226                 vhost_user_get_protocol_features(dev, &msg);
1227                 send_vhost_reply(fd, &msg);
1228                 break;
1229         case VHOST_USER_SET_PROTOCOL_FEATURES:
1230                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1231                 break;
1232
1233         case VHOST_USER_SET_OWNER:
1234                 vhost_user_set_owner();
1235                 break;
1236         case VHOST_USER_RESET_OWNER:
1237                 vhost_user_reset_owner(dev);
1238                 break;
1239
1240         case VHOST_USER_SET_MEM_TABLE:
1241                 ret = vhost_user_set_mem_table(dev, &msg);
1242                 break;
1243
1244         case VHOST_USER_SET_LOG_BASE:
1245                 vhost_user_set_log_base(dev, &msg);
1246
1247                 /* it needs a reply */
1248                 msg.size = sizeof(msg.payload.u64);
1249                 send_vhost_reply(fd, &msg);
1250                 break;
1251         case VHOST_USER_SET_LOG_FD:
1252                 close(msg.fds[0]);
1253                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1254                 break;
1255
1256         case VHOST_USER_SET_VRING_NUM:
1257                 vhost_user_set_vring_num(dev, &msg);
1258                 break;
1259         case VHOST_USER_SET_VRING_ADDR:
1260                 vhost_user_set_vring_addr(&dev, &msg);
1261                 break;
1262         case VHOST_USER_SET_VRING_BASE:
1263                 vhost_user_set_vring_base(dev, &msg);
1264                 break;
1265
1266         case VHOST_USER_GET_VRING_BASE:
1267                 vhost_user_get_vring_base(dev, &msg);
1268                 msg.size = sizeof(msg.payload.state);
1269                 send_vhost_reply(fd, &msg);
1270                 break;
1271
1272         case VHOST_USER_SET_VRING_KICK:
1273                 vhost_user_set_vring_kick(&dev, &msg);
1274                 break;
1275         case VHOST_USER_SET_VRING_CALL:
1276                 vhost_user_set_vring_call(dev, &msg);
1277                 break;
1278
1279         case VHOST_USER_SET_VRING_ERR:
1280                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1281                         close(msg.fds[0]);
1282                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1283                 break;
1284
1285         case VHOST_USER_GET_QUEUE_NUM:
1286                 msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
1287                 msg.size = sizeof(msg.payload.u64);
1288                 send_vhost_reply(fd, &msg);
1289                 break;
1290
1291         case VHOST_USER_SET_VRING_ENABLE:
1292                 vhost_user_set_vring_enable(dev, &msg);
1293                 break;
1294         case VHOST_USER_SEND_RARP:
1295                 vhost_user_send_rarp(dev, &msg);
1296                 break;
1297
1298         case VHOST_USER_NET_SET_MTU:
1299                 ret = vhost_user_net_set_mtu(dev, &msg);
1300                 break;
1301
1302         case VHOST_USER_SET_SLAVE_REQ_FD:
1303                 ret = vhost_user_set_req_fd(dev, &msg);
1304                 break;
1305
1306         case VHOST_USER_IOTLB_MSG:
1307                 ret = vhost_user_iotlb_msg(&dev, &msg);
1308                 break;
1309
1310         default:
1311                 ret = -1;
1312                 break;
1313
1314         }
1315
1316         if (msg.flags & VHOST_USER_NEED_REPLY) {
1317                 msg.payload.u64 = !!ret;
1318                 msg.size = sizeof(msg.payload.u64);
1319                 send_vhost_reply(fd, &msg);
1320         }
1321
1322         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1323                 dev->flags |= VIRTIO_DEV_READY;
1324
1325                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1326                         if (dev->dequeue_zero_copy) {
1327                                 RTE_LOG(INFO, VHOST_CONFIG,
1328                                                 "dequeue zero copy is enabled\n");
1329                         }
1330
1331                         if (dev->notify_ops->new_device(dev->vid) == 0)
1332                                 dev->flags |= VIRTIO_DEV_RUNNING;
1333                 }
1334         }
1335
1336         return 0;
1337 }
1338
1339 int
1340 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1341 {
1342         int ret;
1343         struct VhostUserMsg msg = {
1344                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1345                 .flags = VHOST_USER_VERSION,
1346                 .size = sizeof(msg.payload.iotlb),
1347                 .payload.iotlb = {
1348                         .iova = iova,
1349                         .perm = perm,
1350                         .type = VHOST_IOTLB_MISS,
1351                 },
1352         };
1353
1354         ret = send_vhost_message(dev->slave_req_fd, &msg);
1355         if (ret < 0) {
1356                 RTE_LOG(ERR, VHOST_CONFIG,
1357                                 "Failed to send IOTLB miss message (%d)\n",
1358                                 ret);
1359                 return ret;
1360         }
1361
1362         return 0;
1363 }