vhost: enable fds passing in vhost-user messages
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
71         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
72 };
73
74 static uint64_t
75 get_blk_size(int fd)
76 {
77         struct stat stat;
78         int ret;
79
80         ret = fstat(fd, &stat);
81         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
82 }
83
84 static void
85 free_mem_region(struct virtio_net *dev)
86 {
87         uint32_t i;
88         struct rte_vhost_mem_region *reg;
89
90         if (!dev || !dev->mem)
91                 return;
92
93         for (i = 0; i < dev->mem->nregions; i++) {
94                 reg = &dev->mem->regions[i];
95                 if (reg->host_user_addr) {
96                         munmap(reg->mmap_addr, reg->mmap_size);
97                         close(reg->fd);
98                 }
99         }
100 }
101
102 void
103 vhost_backend_cleanup(struct virtio_net *dev)
104 {
105         if (dev->mem) {
106                 free_mem_region(dev);
107                 rte_free(dev->mem);
108                 dev->mem = NULL;
109         }
110
111         free(dev->guest_pages);
112         dev->guest_pages = NULL;
113
114         if (dev->log_addr) {
115                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
116                 dev->log_addr = 0;
117         }
118
119         if (dev->slave_req_fd >= 0) {
120                 close(dev->slave_req_fd);
121                 dev->slave_req_fd = -1;
122         }
123 }
124
125 /*
126  * This function just returns success at the moment unless
127  * the device hasn't been initialised.
128  */
129 static int
130 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
131                         struct VhostUserMsg *msg __rte_unused,
132                         int main_fd __rte_unused)
133 {
134         return VH_RESULT_OK;
135 }
136
137 static int
138 vhost_user_reset_owner(struct virtio_net **pdev,
139                         struct VhostUserMsg *msg __rte_unused,
140                         int main_fd __rte_unused)
141 {
142         struct virtio_net *dev = *pdev;
143         vhost_destroy_device_notify(dev);
144
145         cleanup_device(dev, 0);
146         reset_device(dev);
147         return VH_RESULT_OK;
148 }
149
150 /*
151  * The features that we support are requested.
152  */
153 static int
154 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
155                         int main_fd __rte_unused)
156 {
157         struct virtio_net *dev = *pdev;
158         uint64_t features = 0;
159
160         rte_vhost_driver_get_features(dev->ifname, &features);
161
162         msg->payload.u64 = features;
163         msg->size = sizeof(msg->payload.u64);
164         msg->fd_num = 0;
165
166         return VH_RESULT_REPLY;
167 }
168
169 /*
170  * The queue number that we support are requested.
171  */
172 static int
173 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
174                         int main_fd __rte_unused)
175 {
176         struct virtio_net *dev = *pdev;
177         uint32_t queue_num = 0;
178
179         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
180
181         msg->payload.u64 = (uint64_t)queue_num;
182         msg->size = sizeof(msg->payload.u64);
183         msg->fd_num = 0;
184
185         return VH_RESULT_REPLY;
186 }
187
188 /*
189  * We receive the negotiated features supported by us and the virtio device.
190  */
191 static int
192 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
193                         int main_fd __rte_unused)
194 {
195         struct virtio_net *dev = *pdev;
196         uint64_t features = msg->payload.u64;
197         uint64_t vhost_features = 0;
198         struct rte_vdpa_device *vdpa_dev;
199         int did = -1;
200
201         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
202         if (features & ~vhost_features) {
203                 RTE_LOG(ERR, VHOST_CONFIG,
204                         "(%d) received invalid negotiated features.\n",
205                         dev->vid);
206                 return VH_RESULT_ERR;
207         }
208
209         if (dev->flags & VIRTIO_DEV_RUNNING) {
210                 if (dev->features == features)
211                         return VH_RESULT_OK;
212
213                 /*
214                  * Error out if master tries to change features while device is
215                  * in running state. The exception being VHOST_F_LOG_ALL, which
216                  * is enabled when the live-migration starts.
217                  */
218                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
219                         RTE_LOG(ERR, VHOST_CONFIG,
220                                 "(%d) features changed while device is running.\n",
221                                 dev->vid);
222                         return VH_RESULT_ERR;
223                 }
224
225                 if (dev->notify_ops->features_changed)
226                         dev->notify_ops->features_changed(dev->vid, features);
227         }
228
229         dev->features = features;
230         if (dev->features &
231                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
232                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
233         } else {
234                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
235         }
236         VHOST_LOG_DEBUG(VHOST_CONFIG,
237                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
238                 dev->vid,
239                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
240                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
241
242         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
243             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
244                 /*
245                  * Remove all but first queue pair if MQ hasn't been
246                  * negotiated. This is safe because the device is not
247                  * running at this stage.
248                  */
249                 while (dev->nr_vring > 2) {
250                         struct vhost_virtqueue *vq;
251
252                         vq = dev->virtqueue[--dev->nr_vring];
253                         if (!vq)
254                                 continue;
255
256                         dev->virtqueue[dev->nr_vring] = NULL;
257                         cleanup_vq(vq, 1);
258                         free_vq(dev, vq);
259                 }
260         }
261
262         did = dev->vdpa_dev_id;
263         vdpa_dev = rte_vdpa_get_device(did);
264         if (vdpa_dev && vdpa_dev->ops->set_features)
265                 vdpa_dev->ops->set_features(dev->vid);
266
267         return VH_RESULT_OK;
268 }
269
270 /*
271  * The virtio device sends us the size of the descriptor ring.
272  */
273 static int
274 vhost_user_set_vring_num(struct virtio_net **pdev,
275                         struct VhostUserMsg *msg,
276                         int main_fd __rte_unused)
277 {
278         struct virtio_net *dev = *pdev;
279         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
280
281         vq->size = msg->payload.state.num;
282
283         /* VIRTIO 1.0, 2.4 Virtqueues says:
284          *
285          *   Queue Size value is always a power of 2. The maximum Queue Size
286          *   value is 32768.
287          */
288         if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
289                 RTE_LOG(ERR, VHOST_CONFIG,
290                         "invalid virtqueue size %u\n", vq->size);
291                 return VH_RESULT_ERR;
292         }
293
294         if (dev->dequeue_zero_copy) {
295                 vq->nr_zmbuf = 0;
296                 vq->last_zmbuf_idx = 0;
297                 vq->zmbuf_size = vq->size;
298                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
299                                          sizeof(struct zcopy_mbuf), 0);
300                 if (vq->zmbufs == NULL) {
301                         RTE_LOG(WARNING, VHOST_CONFIG,
302                                 "failed to allocate mem for zero copy; "
303                                 "zero copy is force disabled\n");
304                         dev->dequeue_zero_copy = 0;
305                 }
306                 TAILQ_INIT(&vq->zmbuf_list);
307         }
308
309         if (vq_is_packed(dev)) {
310                 vq->shadow_used_packed = rte_malloc(NULL,
311                                 vq->size *
312                                 sizeof(struct vring_used_elem_packed),
313                                 RTE_CACHE_LINE_SIZE);
314                 if (!vq->shadow_used_packed) {
315                         RTE_LOG(ERR, VHOST_CONFIG,
316                                         "failed to allocate memory for shadow used ring.\n");
317                         return VH_RESULT_ERR;
318                 }
319
320         } else {
321                 vq->shadow_used_split = rte_malloc(NULL,
322                                 vq->size * sizeof(struct vring_used_elem),
323                                 RTE_CACHE_LINE_SIZE);
324                 if (!vq->shadow_used_split) {
325                         RTE_LOG(ERR, VHOST_CONFIG,
326                                         "failed to allocate memory for shadow used ring.\n");
327                         return VH_RESULT_ERR;
328                 }
329         }
330
331         vq->batch_copy_elems = rte_malloc(NULL,
332                                 vq->size * sizeof(struct batch_copy_elem),
333                                 RTE_CACHE_LINE_SIZE);
334         if (!vq->batch_copy_elems) {
335                 RTE_LOG(ERR, VHOST_CONFIG,
336                         "failed to allocate memory for batching copy.\n");
337                 return VH_RESULT_ERR;
338         }
339
340         return VH_RESULT_OK;
341 }
342
343 /*
344  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
345  * same numa node as the memory of vring descriptor.
346  */
347 #ifdef RTE_LIBRTE_VHOST_NUMA
348 static struct virtio_net*
349 numa_realloc(struct virtio_net *dev, int index)
350 {
351         int oldnode, newnode;
352         struct virtio_net *old_dev;
353         struct vhost_virtqueue *old_vq, *vq;
354         struct zcopy_mbuf *new_zmbuf;
355         struct vring_used_elem *new_shadow_used_split;
356         struct vring_used_elem_packed *new_shadow_used_packed;
357         struct batch_copy_elem *new_batch_copy_elems;
358         int ret;
359
360         old_dev = dev;
361         vq = old_vq = dev->virtqueue[index];
362
363         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
364                             MPOL_F_NODE | MPOL_F_ADDR);
365
366         /* check if we need to reallocate vq */
367         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
368                              MPOL_F_NODE | MPOL_F_ADDR);
369         if (ret) {
370                 RTE_LOG(ERR, VHOST_CONFIG,
371                         "Unable to get vq numa information.\n");
372                 return dev;
373         }
374         if (oldnode != newnode) {
375                 RTE_LOG(INFO, VHOST_CONFIG,
376                         "reallocate vq from %d to %d node\n", oldnode, newnode);
377                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
378                 if (!vq)
379                         return dev;
380
381                 memcpy(vq, old_vq, sizeof(*vq));
382                 TAILQ_INIT(&vq->zmbuf_list);
383
384                 if (dev->dequeue_zero_copy) {
385                         new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
386                                         sizeof(struct zcopy_mbuf), 0, newnode);
387                         if (new_zmbuf) {
388                                 rte_free(vq->zmbufs);
389                                 vq->zmbufs = new_zmbuf;
390                         }
391                 }
392
393                 if (vq_is_packed(dev)) {
394                         new_shadow_used_packed = rte_malloc_socket(NULL,
395                                         vq->size *
396                                         sizeof(struct vring_used_elem_packed),
397                                         RTE_CACHE_LINE_SIZE,
398                                         newnode);
399                         if (new_shadow_used_packed) {
400                                 rte_free(vq->shadow_used_packed);
401                                 vq->shadow_used_packed = new_shadow_used_packed;
402                         }
403                 } else {
404                         new_shadow_used_split = rte_malloc_socket(NULL,
405                                         vq->size *
406                                         sizeof(struct vring_used_elem),
407                                         RTE_CACHE_LINE_SIZE,
408                                         newnode);
409                         if (new_shadow_used_split) {
410                                 rte_free(vq->shadow_used_split);
411                                 vq->shadow_used_split = new_shadow_used_split;
412                         }
413                 }
414
415                 new_batch_copy_elems = rte_malloc_socket(NULL,
416                         vq->size * sizeof(struct batch_copy_elem),
417                         RTE_CACHE_LINE_SIZE,
418                         newnode);
419                 if (new_batch_copy_elems) {
420                         rte_free(vq->batch_copy_elems);
421                         vq->batch_copy_elems = new_batch_copy_elems;
422                 }
423
424                 rte_free(old_vq);
425         }
426
427         /* check if we need to reallocate dev */
428         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
429                             MPOL_F_NODE | MPOL_F_ADDR);
430         if (ret) {
431                 RTE_LOG(ERR, VHOST_CONFIG,
432                         "Unable to get dev numa information.\n");
433                 goto out;
434         }
435         if (oldnode != newnode) {
436                 RTE_LOG(INFO, VHOST_CONFIG,
437                         "reallocate dev from %d to %d node\n",
438                         oldnode, newnode);
439                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
440                 if (!dev) {
441                         dev = old_dev;
442                         goto out;
443                 }
444
445                 memcpy(dev, old_dev, sizeof(*dev));
446                 rte_free(old_dev);
447         }
448
449 out:
450         dev->virtqueue[index] = vq;
451         vhost_devices[dev->vid] = dev;
452
453         if (old_vq != vq)
454                 vhost_user_iotlb_init(dev, index);
455
456         return dev;
457 }
458 #else
459 static struct virtio_net*
460 numa_realloc(struct virtio_net *dev, int index __rte_unused)
461 {
462         return dev;
463 }
464 #endif
465
466 /* Converts QEMU virtual address to Vhost virtual address. */
467 static uint64_t
468 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
469 {
470         struct rte_vhost_mem_region *r;
471         uint32_t i;
472
473         /* Find the region where the address lives. */
474         for (i = 0; i < dev->mem->nregions; i++) {
475                 r = &dev->mem->regions[i];
476
477                 if (qva >= r->guest_user_addr &&
478                     qva <  r->guest_user_addr + r->size) {
479
480                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
481                                 *len = r->guest_user_addr + r->size - qva;
482
483                         return qva - r->guest_user_addr +
484                                r->host_user_addr;
485                 }
486         }
487         *len = 0;
488
489         return 0;
490 }
491
492
493 /*
494  * Converts ring address to Vhost virtual address.
495  * If IOMMU is enabled, the ring address is a guest IO virtual address,
496  * else it is a QEMU virtual address.
497  */
498 static uint64_t
499 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
500                 uint64_t ra, uint64_t *size)
501 {
502         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
503                 uint64_t vva;
504
505                 vva = vhost_user_iotlb_cache_find(vq, ra,
506                                         size, VHOST_ACCESS_RW);
507                 if (!vva)
508                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
509
510                 return vva;
511         }
512
513         return qva_to_vva(dev, ra, size);
514 }
515
516 static struct virtio_net *
517 translate_ring_addresses(struct virtio_net *dev, int vq_index)
518 {
519         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
520         struct vhost_vring_addr *addr = &vq->ring_addrs;
521         uint64_t len;
522
523         if (vq_is_packed(dev)) {
524                 len = sizeof(struct vring_packed_desc) * vq->size;
525                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
526                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
527                 vq->log_guest_addr = 0;
528                 if (vq->desc_packed == NULL ||
529                                 len != sizeof(struct vring_packed_desc) *
530                                 vq->size) {
531                         RTE_LOG(DEBUG, VHOST_CONFIG,
532                                 "(%d) failed to map desc_packed ring.\n",
533                                 dev->vid);
534                         return dev;
535                 }
536
537                 dev = numa_realloc(dev, vq_index);
538                 vq = dev->virtqueue[vq_index];
539                 addr = &vq->ring_addrs;
540
541                 len = sizeof(struct vring_packed_desc_event);
542                 vq->driver_event = (struct vring_packed_desc_event *)
543                                         (uintptr_t)ring_addr_to_vva(dev,
544                                         vq, addr->avail_user_addr, &len);
545                 if (vq->driver_event == NULL ||
546                                 len != sizeof(struct vring_packed_desc_event)) {
547                         RTE_LOG(DEBUG, VHOST_CONFIG,
548                                 "(%d) failed to find driver area address.\n",
549                                 dev->vid);
550                         return dev;
551                 }
552
553                 len = sizeof(struct vring_packed_desc_event);
554                 vq->device_event = (struct vring_packed_desc_event *)
555                                         (uintptr_t)ring_addr_to_vva(dev,
556                                         vq, addr->used_user_addr, &len);
557                 if (vq->device_event == NULL ||
558                                 len != sizeof(struct vring_packed_desc_event)) {
559                         RTE_LOG(DEBUG, VHOST_CONFIG,
560                                 "(%d) failed to find device area address.\n",
561                                 dev->vid);
562                         return dev;
563                 }
564
565                 return dev;
566         }
567
568         /* The addresses are converted from QEMU virtual to Vhost virtual. */
569         if (vq->desc && vq->avail && vq->used)
570                 return dev;
571
572         len = sizeof(struct vring_desc) * vq->size;
573         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
574                         vq, addr->desc_user_addr, &len);
575         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
576                 RTE_LOG(DEBUG, VHOST_CONFIG,
577                         "(%d) failed to map desc ring.\n",
578                         dev->vid);
579                 return dev;
580         }
581
582         dev = numa_realloc(dev, vq_index);
583         vq = dev->virtqueue[vq_index];
584         addr = &vq->ring_addrs;
585
586         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
587         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
588                         vq, addr->avail_user_addr, &len);
589         if (vq->avail == 0 ||
590                         len != sizeof(struct vring_avail) +
591                         sizeof(uint16_t) * vq->size) {
592                 RTE_LOG(DEBUG, VHOST_CONFIG,
593                         "(%d) failed to map avail ring.\n",
594                         dev->vid);
595                 return dev;
596         }
597
598         len = sizeof(struct vring_used) +
599                 sizeof(struct vring_used_elem) * vq->size;
600         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
601                         vq, addr->used_user_addr, &len);
602         if (vq->used == 0 || len != sizeof(struct vring_used) +
603                         sizeof(struct vring_used_elem) * vq->size) {
604                 RTE_LOG(DEBUG, VHOST_CONFIG,
605                         "(%d) failed to map used ring.\n",
606                         dev->vid);
607                 return dev;
608         }
609
610         if (vq->last_used_idx != vq->used->idx) {
611                 RTE_LOG(WARNING, VHOST_CONFIG,
612                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
613                         "some packets maybe resent for Tx and dropped for Rx\n",
614                         vq->last_used_idx, vq->used->idx);
615                 vq->last_used_idx  = vq->used->idx;
616                 vq->last_avail_idx = vq->used->idx;
617         }
618
619         vq->log_guest_addr = addr->log_guest_addr;
620
621         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
622                         dev->vid, vq->desc);
623         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
624                         dev->vid, vq->avail);
625         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
626                         dev->vid, vq->used);
627         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
628                         dev->vid, vq->log_guest_addr);
629
630         return dev;
631 }
632
633 /*
634  * The virtio device sends us the desc, used and avail ring addresses.
635  * This function then converts these to our address space.
636  */
637 static int
638 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
639                         int main_fd __rte_unused)
640 {
641         struct virtio_net *dev = *pdev;
642         struct vhost_virtqueue *vq;
643         struct vhost_vring_addr *addr = &msg->payload.addr;
644
645         if (dev->mem == NULL)
646                 return VH_RESULT_ERR;
647
648         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
649         vq = dev->virtqueue[msg->payload.addr.index];
650
651         /*
652          * Rings addresses should not be interpreted as long as the ring is not
653          * started and enabled
654          */
655         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
656
657         vring_invalidate(dev, vq);
658
659         if (vq->enabled && (dev->features &
660                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
661                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
662                 if (!dev)
663                         return VH_RESULT_ERR;
664
665                 *pdev = dev;
666         }
667
668         return VH_RESULT_OK;
669 }
670
671 /*
672  * The virtio device sends us the available ring last used index.
673  */
674 static int
675 vhost_user_set_vring_base(struct virtio_net **pdev,
676                         struct VhostUserMsg *msg,
677                         int main_fd __rte_unused)
678 {
679         struct virtio_net *dev = *pdev;
680         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
681                         msg->payload.state.num;
682         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
683                         msg->payload.state.num;
684
685         return VH_RESULT_OK;
686 }
687
688 static int
689 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
690                    uint64_t host_phys_addr, uint64_t size)
691 {
692         struct guest_page *page, *last_page;
693
694         if (dev->nr_guest_pages == dev->max_guest_pages) {
695                 dev->max_guest_pages *= 2;
696                 dev->guest_pages = realloc(dev->guest_pages,
697                                         dev->max_guest_pages * sizeof(*page));
698                 if (!dev->guest_pages) {
699                         RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
700                         return -1;
701                 }
702         }
703
704         if (dev->nr_guest_pages > 0) {
705                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
706                 /* merge if the two pages are continuous */
707                 if (host_phys_addr == last_page->host_phys_addr +
708                                       last_page->size) {
709                         last_page->size += size;
710                         return 0;
711                 }
712         }
713
714         page = &dev->guest_pages[dev->nr_guest_pages++];
715         page->guest_phys_addr = guest_phys_addr;
716         page->host_phys_addr  = host_phys_addr;
717         page->size = size;
718
719         return 0;
720 }
721
722 static int
723 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
724                 uint64_t page_size)
725 {
726         uint64_t reg_size = reg->size;
727         uint64_t host_user_addr  = reg->host_user_addr;
728         uint64_t guest_phys_addr = reg->guest_phys_addr;
729         uint64_t host_phys_addr;
730         uint64_t size;
731
732         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
733         size = page_size - (guest_phys_addr & (page_size - 1));
734         size = RTE_MIN(size, reg_size);
735
736         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
737                 return -1;
738
739         host_user_addr  += size;
740         guest_phys_addr += size;
741         reg_size -= size;
742
743         while (reg_size > 0) {
744                 size = RTE_MIN(reg_size, page_size);
745                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
746                                                   host_user_addr);
747                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
748                                 size) < 0)
749                         return -1;
750
751                 host_user_addr  += size;
752                 guest_phys_addr += size;
753                 reg_size -= size;
754         }
755
756         return 0;
757 }
758
759 #ifdef RTE_LIBRTE_VHOST_DEBUG
760 /* TODO: enable it only in debug mode? */
761 static void
762 dump_guest_pages(struct virtio_net *dev)
763 {
764         uint32_t i;
765         struct guest_page *page;
766
767         for (i = 0; i < dev->nr_guest_pages; i++) {
768                 page = &dev->guest_pages[i];
769
770                 RTE_LOG(INFO, VHOST_CONFIG,
771                         "guest physical page region %u\n"
772                         "\t guest_phys_addr: %" PRIx64 "\n"
773                         "\t host_phys_addr : %" PRIx64 "\n"
774                         "\t size           : %" PRIx64 "\n",
775                         i,
776                         page->guest_phys_addr,
777                         page->host_phys_addr,
778                         page->size);
779         }
780 }
781 #else
782 #define dump_guest_pages(dev)
783 #endif
784
785 static bool
786 vhost_memory_changed(struct VhostUserMemory *new,
787                      struct rte_vhost_memory *old)
788 {
789         uint32_t i;
790
791         if (new->nregions != old->nregions)
792                 return true;
793
794         for (i = 0; i < new->nregions; ++i) {
795                 VhostUserMemoryRegion *new_r = &new->regions[i];
796                 struct rte_vhost_mem_region *old_r = &old->regions[i];
797
798                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
799                         return true;
800                 if (new_r->memory_size != old_r->size)
801                         return true;
802                 if (new_r->userspace_addr != old_r->guest_user_addr)
803                         return true;
804         }
805
806         return false;
807 }
808
809 static int
810 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
811                         int main_fd __rte_unused)
812 {
813         struct virtio_net *dev = *pdev;
814         struct VhostUserMemory memory = msg->payload.memory;
815         struct rte_vhost_mem_region *reg;
816         void *mmap_addr;
817         uint64_t mmap_size;
818         uint64_t mmap_offset;
819         uint64_t alignment;
820         uint32_t i;
821         int populate;
822         int fd;
823
824         if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
825                 RTE_LOG(ERR, VHOST_CONFIG,
826                         "too many memory regions (%u)\n", memory.nregions);
827                 return VH_RESULT_ERR;
828         }
829
830         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
831                 RTE_LOG(INFO, VHOST_CONFIG,
832                         "(%d) memory regions not changed\n", dev->vid);
833
834                 for (i = 0; i < memory.nregions; i++)
835                         close(msg->fds[i]);
836
837                 return VH_RESULT_OK;
838         }
839
840         if (dev->mem) {
841                 free_mem_region(dev);
842                 rte_free(dev->mem);
843                 dev->mem = NULL;
844         }
845
846         /* Flush IOTLB cache as previous HVAs are now invalid */
847         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
848                 for (i = 0; i < dev->nr_vring; i++)
849                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
850
851         dev->nr_guest_pages = 0;
852         if (!dev->guest_pages) {
853                 dev->max_guest_pages = 8;
854                 dev->guest_pages = malloc(dev->max_guest_pages *
855                                                 sizeof(struct guest_page));
856                 if (dev->guest_pages == NULL) {
857                         RTE_LOG(ERR, VHOST_CONFIG,
858                                 "(%d) failed to allocate memory "
859                                 "for dev->guest_pages\n",
860                                 dev->vid);
861                         return VH_RESULT_ERR;
862                 }
863         }
864
865         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
866                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
867         if (dev->mem == NULL) {
868                 RTE_LOG(ERR, VHOST_CONFIG,
869                         "(%d) failed to allocate memory for dev->mem\n",
870                         dev->vid);
871                 return VH_RESULT_ERR;
872         }
873         dev->mem->nregions = memory.nregions;
874
875         for (i = 0; i < memory.nregions; i++) {
876                 fd  = msg->fds[i];
877                 reg = &dev->mem->regions[i];
878
879                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
880                 reg->guest_user_addr = memory.regions[i].userspace_addr;
881                 reg->size            = memory.regions[i].memory_size;
882                 reg->fd              = fd;
883
884                 mmap_offset = memory.regions[i].mmap_offset;
885
886                 /* Check for memory_size + mmap_offset overflow */
887                 if (mmap_offset >= -reg->size) {
888                         RTE_LOG(ERR, VHOST_CONFIG,
889                                 "mmap_offset (%#"PRIx64") and memory_size "
890                                 "(%#"PRIx64") overflow\n",
891                                 mmap_offset, reg->size);
892                         goto err_mmap;
893                 }
894
895                 mmap_size = reg->size + mmap_offset;
896
897                 /* mmap() without flag of MAP_ANONYMOUS, should be called
898                  * with length argument aligned with hugepagesz at older
899                  * longterm version Linux, like 2.6.32 and 3.2.72, or
900                  * mmap() will fail with EINVAL.
901                  *
902                  * to avoid failure, make sure in caller to keep length
903                  * aligned.
904                  */
905                 alignment = get_blk_size(fd);
906                 if (alignment == (uint64_t)-1) {
907                         RTE_LOG(ERR, VHOST_CONFIG,
908                                 "couldn't get hugepage size through fstat\n");
909                         goto err_mmap;
910                 }
911                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
912
913                 populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
914                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
915                                  MAP_SHARED | populate, fd, 0);
916
917                 if (mmap_addr == MAP_FAILED) {
918                         RTE_LOG(ERR, VHOST_CONFIG,
919                                 "mmap region %u failed.\n", i);
920                         goto err_mmap;
921                 }
922
923                 reg->mmap_addr = mmap_addr;
924                 reg->mmap_size = mmap_size;
925                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
926                                       mmap_offset;
927
928                 if (dev->dequeue_zero_copy)
929                         if (add_guest_pages(dev, reg, alignment) < 0) {
930                                 RTE_LOG(ERR, VHOST_CONFIG,
931                                         "adding guest pages to region %u failed.\n",
932                                         i);
933                                 goto err_mmap;
934                         }
935
936                 RTE_LOG(INFO, VHOST_CONFIG,
937                         "guest memory region %u, size: 0x%" PRIx64 "\n"
938                         "\t guest physical addr: 0x%" PRIx64 "\n"
939                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
940                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
941                         "\t mmap addr : 0x%" PRIx64 "\n"
942                         "\t mmap size : 0x%" PRIx64 "\n"
943                         "\t mmap align: 0x%" PRIx64 "\n"
944                         "\t mmap off  : 0x%" PRIx64 "\n",
945                         i, reg->size,
946                         reg->guest_phys_addr,
947                         reg->guest_user_addr,
948                         reg->host_user_addr,
949                         (uint64_t)(uintptr_t)mmap_addr,
950                         mmap_size,
951                         alignment,
952                         mmap_offset);
953         }
954
955         for (i = 0; i < dev->nr_vring; i++) {
956                 struct vhost_virtqueue *vq = dev->virtqueue[i];
957
958                 if (vq->desc || vq->avail || vq->used) {
959                         /*
960                          * If the memory table got updated, the ring addresses
961                          * need to be translated again as virtual addresses have
962                          * changed.
963                          */
964                         vring_invalidate(dev, vq);
965
966                         dev = translate_ring_addresses(dev, i);
967                         if (!dev) {
968                                 dev = *pdev;
969                                 goto err_mmap;
970                         }
971
972                         *pdev = dev;
973                 }
974         }
975
976         dump_guest_pages(dev);
977
978         return VH_RESULT_OK;
979
980 err_mmap:
981         free_mem_region(dev);
982         rte_free(dev->mem);
983         dev->mem = NULL;
984         return VH_RESULT_ERR;
985 }
986
987 static bool
988 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
989 {
990         bool rings_ok;
991
992         if (!vq)
993                 return false;
994
995         if (vq_is_packed(dev))
996                 rings_ok = !!vq->desc_packed;
997         else
998                 rings_ok = vq->desc && vq->avail && vq->used;
999
1000         return rings_ok &&
1001                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1002                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
1003 }
1004
1005 static int
1006 virtio_is_ready(struct virtio_net *dev)
1007 {
1008         struct vhost_virtqueue *vq;
1009         uint32_t i;
1010
1011         if (dev->nr_vring == 0)
1012                 return 0;
1013
1014         for (i = 0; i < dev->nr_vring; i++) {
1015                 vq = dev->virtqueue[i];
1016
1017                 if (!vq_is_ready(dev, vq))
1018                         return 0;
1019         }
1020
1021         RTE_LOG(INFO, VHOST_CONFIG,
1022                 "virtio is now ready for processing.\n");
1023         return 1;
1024 }
1025
1026 static int
1027 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1028                         int main_fd __rte_unused)
1029 {
1030         struct virtio_net *dev = *pdev;
1031         struct vhost_vring_file file;
1032         struct vhost_virtqueue *vq;
1033
1034         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1035         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1036                 file.fd = VIRTIO_INVALID_EVENTFD;
1037         else
1038                 file.fd = msg->fds[0];
1039         RTE_LOG(INFO, VHOST_CONFIG,
1040                 "vring call idx:%d file:%d\n", file.index, file.fd);
1041
1042         vq = dev->virtqueue[file.index];
1043         if (vq->callfd >= 0)
1044                 close(vq->callfd);
1045
1046         vq->callfd = file.fd;
1047
1048         return VH_RESULT_OK;
1049 }
1050
1051 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1052                         struct VhostUserMsg *msg,
1053                         int main_fd __rte_unused)
1054 {
1055         if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1056                 close(msg->fds[0]);
1057         RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1058
1059         return VH_RESULT_OK;
1060 }
1061
1062 static int
1063 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
1064                         int main_fd __rte_unused)
1065 {
1066         struct virtio_net *dev = *pdev;
1067         struct vhost_vring_file file;
1068         struct vhost_virtqueue *vq;
1069
1070         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1071         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1072                 file.fd = VIRTIO_INVALID_EVENTFD;
1073         else
1074                 file.fd = msg->fds[0];
1075         RTE_LOG(INFO, VHOST_CONFIG,
1076                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1077
1078         /* Interpret ring addresses only when ring is started. */
1079         dev = translate_ring_addresses(dev, file.index);
1080         if (!dev)
1081                 return VH_RESULT_ERR;
1082
1083         *pdev = dev;
1084
1085         vq = dev->virtqueue[file.index];
1086
1087         /*
1088          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1089          * the ring starts already enabled. Otherwise, it is enabled via
1090          * the SET_VRING_ENABLE message.
1091          */
1092         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
1093                 vq->enabled = 1;
1094
1095         if (vq->kickfd >= 0)
1096                 close(vq->kickfd);
1097         vq->kickfd = file.fd;
1098
1099         return VH_RESULT_OK;
1100 }
1101
1102 static void
1103 free_zmbufs(struct vhost_virtqueue *vq)
1104 {
1105         struct zcopy_mbuf *zmbuf, *next;
1106
1107         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1108              zmbuf != NULL; zmbuf = next) {
1109                 next = TAILQ_NEXT(zmbuf, next);
1110
1111                 rte_pktmbuf_free(zmbuf->mbuf);
1112                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1113         }
1114
1115         rte_free(vq->zmbufs);
1116 }
1117
1118 /*
1119  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1120  */
1121 static int
1122 vhost_user_get_vring_base(struct virtio_net **pdev,
1123                         struct VhostUserMsg *msg,
1124                         int main_fd __rte_unused)
1125 {
1126         struct virtio_net *dev = *pdev;
1127         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1128
1129         /* We have to stop the queue (virtio) if it is running. */
1130         vhost_destroy_device_notify(dev);
1131
1132         dev->flags &= ~VIRTIO_DEV_READY;
1133         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1134
1135         /* Here we are safe to get the last avail index */
1136         msg->payload.state.num = vq->last_avail_idx;
1137
1138         RTE_LOG(INFO, VHOST_CONFIG,
1139                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1140                 msg->payload.state.num);
1141         /*
1142          * Based on current qemu vhost-user implementation, this message is
1143          * sent and only sent in vhost_vring_stop.
1144          * TODO: cleanup the vring, it isn't usable since here.
1145          */
1146         if (vq->kickfd >= 0)
1147                 close(vq->kickfd);
1148
1149         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1150
1151         if (vq->callfd >= 0)
1152                 close(vq->callfd);
1153
1154         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1155
1156         if (dev->dequeue_zero_copy)
1157                 free_zmbufs(vq);
1158         if (vq_is_packed(dev)) {
1159                 rte_free(vq->shadow_used_packed);
1160                 vq->shadow_used_packed = NULL;
1161         } else {
1162                 rte_free(vq->shadow_used_split);
1163                 vq->shadow_used_split = NULL;
1164         }
1165
1166         rte_free(vq->batch_copy_elems);
1167         vq->batch_copy_elems = NULL;
1168
1169         msg->size = sizeof(msg->payload.state);
1170         msg->fd_num = 0;
1171
1172         return VH_RESULT_REPLY;
1173 }
1174
1175 /*
1176  * when virtio queues are ready to work, qemu will send us to
1177  * enable the virtio queue pair.
1178  */
1179 static int
1180 vhost_user_set_vring_enable(struct virtio_net **pdev,
1181                         struct VhostUserMsg *msg,
1182                         int main_fd __rte_unused)
1183 {
1184         struct virtio_net *dev = *pdev;
1185         int enable = (int)msg->payload.state.num;
1186         int index = (int)msg->payload.state.index;
1187         struct rte_vdpa_device *vdpa_dev;
1188         int did = -1;
1189
1190         RTE_LOG(INFO, VHOST_CONFIG,
1191                 "set queue enable: %d to qp idx: %d\n",
1192                 enable, index);
1193
1194         did = dev->vdpa_dev_id;
1195         vdpa_dev = rte_vdpa_get_device(did);
1196         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
1197                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
1198
1199         if (dev->notify_ops->vring_state_changed)
1200                 dev->notify_ops->vring_state_changed(dev->vid,
1201                                 index, enable);
1202
1203         dev->virtqueue[index]->enabled = enable;
1204
1205         return VH_RESULT_OK;
1206 }
1207
1208 static int
1209 vhost_user_get_protocol_features(struct virtio_net **pdev,
1210                         struct VhostUserMsg *msg,
1211                         int main_fd __rte_unused)
1212 {
1213         struct virtio_net *dev = *pdev;
1214         uint64_t features, protocol_features;
1215
1216         rte_vhost_driver_get_features(dev->ifname, &features);
1217         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
1218
1219         /*
1220          * REPLY_ACK protocol feature is only mandatory for now
1221          * for IOMMU feature. If IOMMU is explicitly disabled by the
1222          * application, disable also REPLY_ACK feature for older buggy
1223          * Qemu versions (from v2.7.0 to v2.9.0).
1224          */
1225         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
1226                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
1227
1228         msg->payload.u64 = protocol_features;
1229         msg->size = sizeof(msg->payload.u64);
1230         msg->fd_num = 0;
1231
1232         return VH_RESULT_REPLY;
1233 }
1234
1235 static int
1236 vhost_user_set_protocol_features(struct virtio_net **pdev,
1237                         struct VhostUserMsg *msg,
1238                         int main_fd __rte_unused)
1239 {
1240         struct virtio_net *dev = *pdev;
1241         uint64_t protocol_features = msg->payload.u64;
1242         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) {
1243                 RTE_LOG(ERR, VHOST_CONFIG,
1244                         "(%d) received invalid protocol features.\n",
1245                         dev->vid);
1246                 return VH_RESULT_ERR;
1247         }
1248
1249         dev->protocol_features = protocol_features;
1250
1251         return VH_RESULT_OK;
1252 }
1253
1254 static int
1255 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
1256                         int main_fd __rte_unused)
1257 {
1258         struct virtio_net *dev = *pdev;
1259         int fd = msg->fds[0];
1260         uint64_t size, off;
1261         void *addr;
1262
1263         if (fd < 0) {
1264                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
1265                 return VH_RESULT_ERR;
1266         }
1267
1268         if (msg->size != sizeof(VhostUserLog)) {
1269                 RTE_LOG(ERR, VHOST_CONFIG,
1270                         "invalid log base msg size: %"PRId32" != %d\n",
1271                         msg->size, (int)sizeof(VhostUserLog));
1272                 return VH_RESULT_ERR;
1273         }
1274
1275         size = msg->payload.log.mmap_size;
1276         off  = msg->payload.log.mmap_offset;
1277
1278         /* Don't allow mmap_offset to point outside the mmap region */
1279         if (off > size) {
1280                 RTE_LOG(ERR, VHOST_CONFIG,
1281                         "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
1282                         off, size);
1283                 return VH_RESULT_ERR;
1284         }
1285
1286         RTE_LOG(INFO, VHOST_CONFIG,
1287                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1288                 size, off);
1289
1290         /*
1291          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1292          * fail when offset is not page size aligned.
1293          */
1294         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1295         close(fd);
1296         if (addr == MAP_FAILED) {
1297                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1298                 return VH_RESULT_ERR;
1299         }
1300
1301         /*
1302          * Free previously mapped log memory on occasionally
1303          * multiple VHOST_USER_SET_LOG_BASE.
1304          */
1305         if (dev->log_addr) {
1306                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1307         }
1308         dev->log_addr = (uint64_t)(uintptr_t)addr;
1309         dev->log_base = dev->log_addr + off;
1310         dev->log_size = size;
1311
1312         /*
1313          * The spec is not clear about it (yet), but QEMU doesn't expect
1314          * any payload in the reply.
1315          */
1316         msg->size = 0;
1317         msg->fd_num = 0;
1318
1319         return VH_RESULT_REPLY;
1320 }
1321
1322 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
1323                         struct VhostUserMsg *msg,
1324                         int main_fd __rte_unused)
1325 {
1326         close(msg->fds[0]);
1327         RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1328
1329         return VH_RESULT_OK;
1330 }
1331
1332 /*
1333  * An rarp packet is constructed and broadcasted to notify switches about
1334  * the new location of the migrated VM, so that packets from outside will
1335  * not be lost after migration.
1336  *
1337  * However, we don't actually "send" a rarp packet here, instead, we set
1338  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1339  */
1340 static int
1341 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
1342                         int main_fd __rte_unused)
1343 {
1344         struct virtio_net *dev = *pdev;
1345         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1346         struct rte_vdpa_device *vdpa_dev;
1347         int did = -1;
1348
1349         RTE_LOG(DEBUG, VHOST_CONFIG,
1350                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1351                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1352         memcpy(dev->mac.addr_bytes, mac, 6);
1353
1354         /*
1355          * Set the flag to inject a RARP broadcast packet at
1356          * rte_vhost_dequeue_burst().
1357          *
1358          * rte_smp_wmb() is for making sure the mac is copied
1359          * before the flag is set.
1360          */
1361         rte_smp_wmb();
1362         rte_atomic16_set(&dev->broadcast_rarp, 1);
1363         did = dev->vdpa_dev_id;
1364         vdpa_dev = rte_vdpa_get_device(did);
1365         if (vdpa_dev && vdpa_dev->ops->migration_done)
1366                 vdpa_dev->ops->migration_done(dev->vid);
1367
1368         return VH_RESULT_OK;
1369 }
1370
1371 static int
1372 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
1373                         int main_fd __rte_unused)
1374 {
1375         struct virtio_net *dev = *pdev;
1376         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1377                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1378                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1379                                 msg->payload.u64);
1380
1381                 return VH_RESULT_ERR;
1382         }
1383
1384         dev->mtu = msg->payload.u64;
1385
1386         return VH_RESULT_OK;
1387 }
1388
1389 static int
1390 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
1391                         int main_fd __rte_unused)
1392 {
1393         struct virtio_net *dev = *pdev;
1394         int fd = msg->fds[0];
1395
1396         if (fd < 0) {
1397                 RTE_LOG(ERR, VHOST_CONFIG,
1398                                 "Invalid file descriptor for slave channel (%d)\n",
1399                                 fd);
1400                 return VH_RESULT_ERR;
1401         }
1402
1403         dev->slave_req_fd = fd;
1404
1405         return VH_RESULT_OK;
1406 }
1407
1408 static int
1409 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1410 {
1411         struct vhost_vring_addr *ra;
1412         uint64_t start, end;
1413
1414         start = imsg->iova;
1415         end = start + imsg->size;
1416
1417         ra = &vq->ring_addrs;
1418         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1419                 return 1;
1420         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1421                 return 1;
1422         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1423                 return 1;
1424
1425         return 0;
1426 }
1427
1428 static int
1429 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1430                                 struct vhost_iotlb_msg *imsg)
1431 {
1432         uint64_t istart, iend, vstart, vend;
1433
1434         istart = imsg->iova;
1435         iend = istart + imsg->size - 1;
1436
1437         vstart = (uintptr_t)vq->desc;
1438         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1439         if (vstart <= iend && istart <= vend)
1440                 return 1;
1441
1442         vstart = (uintptr_t)vq->avail;
1443         vend = vstart + sizeof(struct vring_avail);
1444         vend += sizeof(uint16_t) * vq->size - 1;
1445         if (vstart <= iend && istart <= vend)
1446                 return 1;
1447
1448         vstart = (uintptr_t)vq->used;
1449         vend = vstart + sizeof(struct vring_used);
1450         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1451         if (vstart <= iend && istart <= vend)
1452                 return 1;
1453
1454         return 0;
1455 }
1456
1457 static int
1458 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
1459                         int main_fd __rte_unused)
1460 {
1461         struct virtio_net *dev = *pdev;
1462         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1463         uint16_t i;
1464         uint64_t vva, len;
1465
1466         switch (imsg->type) {
1467         case VHOST_IOTLB_UPDATE:
1468                 len = imsg->size;
1469                 vva = qva_to_vva(dev, imsg->uaddr, &len);
1470                 if (!vva)
1471                         return VH_RESULT_ERR;
1472
1473                 for (i = 0; i < dev->nr_vring; i++) {
1474                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1475
1476                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1477                                         len, imsg->perm);
1478
1479                         if (is_vring_iotlb_update(vq, imsg))
1480                                 *pdev = dev = translate_ring_addresses(dev, i);
1481                 }
1482                 break;
1483         case VHOST_IOTLB_INVALIDATE:
1484                 for (i = 0; i < dev->nr_vring; i++) {
1485                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1486
1487                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1488                                         imsg->size);
1489
1490                         if (is_vring_iotlb_invalidate(vq, imsg))
1491                                 vring_invalidate(dev, vq);
1492                 }
1493                 break;
1494         default:
1495                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1496                                 imsg->type);
1497                 return VH_RESULT_ERR;
1498         }
1499
1500         return VH_RESULT_OK;
1501 }
1502
1503 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
1504                                         struct VhostUserMsg *msg,
1505                                         int main_fd);
1506 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
1507         [VHOST_USER_NONE] = NULL,
1508         [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
1509         [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
1510         [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
1511         [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
1512         [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
1513         [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
1514         [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
1515         [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
1516         [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
1517         [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
1518         [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
1519         [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
1520         [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
1521         [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
1522         [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
1523         [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
1524         [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
1525         [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
1526         [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
1527         [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
1528         [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
1529         [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
1530 };
1531
1532
1533 /* return bytes# of read on success or negative val on failure. */
1534 static int
1535 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1536 {
1537         int ret;
1538
1539         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1540                 msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
1541         if (ret <= 0)
1542                 return ret;
1543
1544         if (msg && msg->size) {
1545                 if (msg->size > sizeof(msg->payload)) {
1546                         RTE_LOG(ERR, VHOST_CONFIG,
1547                                 "invalid msg size: %d\n", msg->size);
1548                         return -1;
1549                 }
1550                 ret = read(sockfd, &msg->payload, msg->size);
1551                 if (ret <= 0)
1552                         return ret;
1553                 if (ret != (int)msg->size) {
1554                         RTE_LOG(ERR, VHOST_CONFIG,
1555                                 "read control message failed\n");
1556                         return -1;
1557                 }
1558         }
1559
1560         return ret;
1561 }
1562
1563 static int
1564 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
1565 {
1566         if (!msg)
1567                 return 0;
1568
1569         return send_fd_message(sockfd, (char *)msg,
1570                 VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
1571 }
1572
1573 static int
1574 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1575 {
1576         if (!msg)
1577                 return 0;
1578
1579         msg->flags &= ~VHOST_USER_VERSION_MASK;
1580         msg->flags &= ~VHOST_USER_NEED_REPLY;
1581         msg->flags |= VHOST_USER_VERSION;
1582         msg->flags |= VHOST_USER_REPLY_MASK;
1583
1584         return send_vhost_message(sockfd, msg);
1585 }
1586
1587 static int
1588 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
1589 {
1590         int ret;
1591
1592         if (msg->flags & VHOST_USER_NEED_REPLY)
1593                 rte_spinlock_lock(&dev->slave_req_lock);
1594
1595         ret = send_vhost_message(dev->slave_req_fd, msg);
1596         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
1597                 rte_spinlock_unlock(&dev->slave_req_lock);
1598
1599         return ret;
1600 }
1601
1602 /*
1603  * Allocate a queue pair if it hasn't been allocated yet
1604  */
1605 static int
1606 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
1607                         struct VhostUserMsg *msg)
1608 {
1609         uint16_t vring_idx;
1610
1611         switch (msg->request.master) {
1612         case VHOST_USER_SET_VRING_KICK:
1613         case VHOST_USER_SET_VRING_CALL:
1614         case VHOST_USER_SET_VRING_ERR:
1615                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1616                 break;
1617         case VHOST_USER_SET_VRING_NUM:
1618         case VHOST_USER_SET_VRING_BASE:
1619         case VHOST_USER_SET_VRING_ENABLE:
1620                 vring_idx = msg->payload.state.index;
1621                 break;
1622         case VHOST_USER_SET_VRING_ADDR:
1623                 vring_idx = msg->payload.addr.index;
1624                 break;
1625         default:
1626                 return 0;
1627         }
1628
1629         if (vring_idx >= VHOST_MAX_VRING) {
1630                 RTE_LOG(ERR, VHOST_CONFIG,
1631                         "invalid vring index: %u\n", vring_idx);
1632                 return -1;
1633         }
1634
1635         if (dev->virtqueue[vring_idx])
1636                 return 0;
1637
1638         return alloc_vring_queue(dev, vring_idx);
1639 }
1640
1641 static void
1642 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1643 {
1644         unsigned int i = 0;
1645         unsigned int vq_num = 0;
1646
1647         while (vq_num < dev->nr_vring) {
1648                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1649
1650                 if (vq) {
1651                         rte_spinlock_lock(&vq->access_lock);
1652                         vq_num++;
1653                 }
1654                 i++;
1655         }
1656 }
1657
1658 static void
1659 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1660 {
1661         unsigned int i = 0;
1662         unsigned int vq_num = 0;
1663
1664         while (vq_num < dev->nr_vring) {
1665                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1666
1667                 if (vq) {
1668                         rte_spinlock_unlock(&vq->access_lock);
1669                         vq_num++;
1670                 }
1671                 i++;
1672         }
1673 }
1674
1675 int
1676 vhost_user_msg_handler(int vid, int fd)
1677 {
1678         struct virtio_net *dev;
1679         struct VhostUserMsg msg;
1680         struct rte_vdpa_device *vdpa_dev;
1681         int did = -1;
1682         int ret;
1683         int unlock_required = 0;
1684         uint32_t skip_master = 0;
1685         int request;
1686
1687         dev = get_device(vid);
1688         if (dev == NULL)
1689                 return -1;
1690
1691         if (!dev->notify_ops) {
1692                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1693                 if (!dev->notify_ops) {
1694                         RTE_LOG(ERR, VHOST_CONFIG,
1695                                 "failed to get callback ops for driver %s\n",
1696                                 dev->ifname);
1697                         return -1;
1698                 }
1699         }
1700
1701         ret = read_vhost_message(fd, &msg);
1702         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1703                 if (ret < 0)
1704                         RTE_LOG(ERR, VHOST_CONFIG,
1705                                 "vhost read message failed\n");
1706                 else if (ret == 0)
1707                         RTE_LOG(INFO, VHOST_CONFIG,
1708                                 "vhost peer closed\n");
1709                 else
1710                         RTE_LOG(ERR, VHOST_CONFIG,
1711                                 "vhost read incorrect message\n");
1712
1713                 return -1;
1714         }
1715
1716         ret = 0;
1717         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1718                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1719                         vhost_message_str[msg.request.master]);
1720         else
1721                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1722                         vhost_message_str[msg.request.master]);
1723
1724         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1725         if (ret < 0) {
1726                 RTE_LOG(ERR, VHOST_CONFIG,
1727                         "failed to alloc queue\n");
1728                 return -1;
1729         }
1730
1731         /*
1732          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1733          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1734          * and device is destroyed. destroy_device waits for queues to be
1735          * inactive, so it is safe. Otherwise taking the access_lock
1736          * would cause a dead lock.
1737          */
1738         switch (msg.request.master) {
1739         case VHOST_USER_SET_FEATURES:
1740         case VHOST_USER_SET_PROTOCOL_FEATURES:
1741         case VHOST_USER_SET_OWNER:
1742         case VHOST_USER_SET_MEM_TABLE:
1743         case VHOST_USER_SET_LOG_BASE:
1744         case VHOST_USER_SET_LOG_FD:
1745         case VHOST_USER_SET_VRING_NUM:
1746         case VHOST_USER_SET_VRING_ADDR:
1747         case VHOST_USER_SET_VRING_BASE:
1748         case VHOST_USER_SET_VRING_KICK:
1749         case VHOST_USER_SET_VRING_CALL:
1750         case VHOST_USER_SET_VRING_ERR:
1751         case VHOST_USER_SET_VRING_ENABLE:
1752         case VHOST_USER_SEND_RARP:
1753         case VHOST_USER_NET_SET_MTU:
1754         case VHOST_USER_SET_SLAVE_REQ_FD:
1755                 vhost_user_lock_all_queue_pairs(dev);
1756                 unlock_required = 1;
1757                 break;
1758         default:
1759                 break;
1760
1761         }
1762
1763         if (dev->extern_ops.pre_msg_handle) {
1764                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
1765                                 (void *)&msg, &skip_master);
1766                 if (ret == VH_RESULT_ERR)
1767                         goto skip_to_reply;
1768                 else if (ret == VH_RESULT_REPLY)
1769                         send_vhost_reply(fd, &msg);
1770
1771                 if (skip_master)
1772                         goto skip_to_post_handle;
1773         }
1774
1775         request = msg.request.master;
1776         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
1777                 if (!vhost_message_handlers[request])
1778                         goto skip_to_post_handle;
1779                 ret = vhost_message_handlers[request](&dev, &msg, fd);
1780
1781                 switch (ret) {
1782                 case VH_RESULT_ERR:
1783                         RTE_LOG(ERR, VHOST_CONFIG,
1784                                 "Processing %s failed.\n",
1785                                 vhost_message_str[request]);
1786                         break;
1787                 case VH_RESULT_OK:
1788                         RTE_LOG(DEBUG, VHOST_CONFIG,
1789                                 "Processing %s succeeded.\n",
1790                                 vhost_message_str[request]);
1791                         break;
1792                 case VH_RESULT_REPLY:
1793                         RTE_LOG(DEBUG, VHOST_CONFIG,
1794                                 "Processing %s succeeded and needs reply.\n",
1795                                 vhost_message_str[request]);
1796                         send_vhost_reply(fd, &msg);
1797                         break;
1798                 }
1799         } else {
1800                 RTE_LOG(ERR, VHOST_CONFIG,
1801                         "Requested invalid message type %d.\n", request);
1802                 ret = VH_RESULT_ERR;
1803         }
1804
1805 skip_to_post_handle:
1806         if (ret != VH_RESULT_ERR && dev->extern_ops.post_msg_handle) {
1807                 ret = (*dev->extern_ops.post_msg_handle)(
1808                                 dev->vid, (void *)&msg);
1809                 if (ret == VH_RESULT_ERR)
1810                         goto skip_to_reply;
1811                 else if (ret == VH_RESULT_REPLY)
1812                         send_vhost_reply(fd, &msg);
1813         }
1814
1815 skip_to_reply:
1816         if (unlock_required)
1817                 vhost_user_unlock_all_queue_pairs(dev);
1818
1819         /*
1820          * If the request required a reply that was already sent,
1821          * this optional reply-ack won't be sent as the
1822          * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
1823          */
1824         if (msg.flags & VHOST_USER_NEED_REPLY) {
1825                 msg.payload.u64 = ret == VH_RESULT_ERR;
1826                 msg.size = sizeof(msg.payload.u64);
1827                 msg.fd_num = 0;
1828                 send_vhost_reply(fd, &msg);
1829         } else if (ret == VH_RESULT_ERR) {
1830                 RTE_LOG(ERR, VHOST_CONFIG,
1831                         "vhost message handling failed.\n");
1832                 return -1;
1833         }
1834
1835         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1836                 dev->flags |= VIRTIO_DEV_READY;
1837
1838                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1839                         if (dev->dequeue_zero_copy) {
1840                                 RTE_LOG(INFO, VHOST_CONFIG,
1841                                                 "dequeue zero copy is enabled\n");
1842                         }
1843
1844                         if (dev->notify_ops->new_device(dev->vid) == 0)
1845                                 dev->flags |= VIRTIO_DEV_RUNNING;
1846                 }
1847         }
1848
1849         did = dev->vdpa_dev_id;
1850         vdpa_dev = rte_vdpa_get_device(did);
1851         if (vdpa_dev && virtio_is_ready(dev) &&
1852                         !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
1853                         msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
1854                 if (vdpa_dev->ops->dev_conf)
1855                         vdpa_dev->ops->dev_conf(dev->vid);
1856                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
1857                 if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
1858                         RTE_LOG(INFO, VHOST_CONFIG,
1859                                 "(%d) software relay is used for vDPA, performance may be low.\n",
1860                                 dev->vid);
1861                 }
1862         }
1863
1864         return 0;
1865 }
1866
1867 static int process_slave_message_reply(struct virtio_net *dev,
1868                                        const struct VhostUserMsg *msg)
1869 {
1870         struct VhostUserMsg msg_reply;
1871         int ret;
1872
1873         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
1874                 return 0;
1875
1876         if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
1877                 ret = -1;
1878                 goto out;
1879         }
1880
1881         if (msg_reply.request.slave != msg->request.slave) {
1882                 RTE_LOG(ERR, VHOST_CONFIG,
1883                         "Received unexpected msg type (%u), expected %u\n",
1884                         msg_reply.request.slave, msg->request.slave);
1885                 ret = -1;
1886                 goto out;
1887         }
1888
1889         ret = msg_reply.payload.u64 ? -1 : 0;
1890
1891 out:
1892         rte_spinlock_unlock(&dev->slave_req_lock);
1893         return ret;
1894 }
1895
1896 int
1897 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1898 {
1899         int ret;
1900         struct VhostUserMsg msg = {
1901                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1902                 .flags = VHOST_USER_VERSION,
1903                 .size = sizeof(msg.payload.iotlb),
1904                 .payload.iotlb = {
1905                         .iova = iova,
1906                         .perm = perm,
1907                         .type = VHOST_IOTLB_MISS,
1908                 },
1909         };
1910
1911         ret = send_vhost_message(dev->slave_req_fd, &msg);
1912         if (ret < 0) {
1913                 RTE_LOG(ERR, VHOST_CONFIG,
1914                                 "Failed to send IOTLB miss message (%d)\n",
1915                                 ret);
1916                 return ret;
1917         }
1918
1919         return 0;
1920 }
1921
1922 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
1923                                                     int index, int fd,
1924                                                     uint64_t offset,
1925                                                     uint64_t size)
1926 {
1927         int ret;
1928         struct VhostUserMsg msg = {
1929                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1930                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
1931                 .size = sizeof(msg.payload.area),
1932                 .payload.area = {
1933                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
1934                         .size = size,
1935                         .offset = offset,
1936                 },
1937         };
1938
1939         if (fd < 0)
1940                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1941         else {
1942                 msg.fds[0] = fd;
1943                 msg.fd_num = 1;
1944         }
1945
1946         ret = send_vhost_slave_message(dev, &msg);
1947         if (ret < 0) {
1948                 RTE_LOG(ERR, VHOST_CONFIG,
1949                         "Failed to set host notifier (%d)\n", ret);
1950                 return ret;
1951         }
1952
1953         return process_slave_message_reply(dev, &msg);
1954 }
1955
1956 int vhost_user_host_notifier_ctrl(int vid, bool enable)
1957 {
1958         struct virtio_net *dev;
1959         struct rte_vdpa_device *vdpa_dev;
1960         int vfio_device_fd, did, ret = 0;
1961         uint64_t offset, size;
1962         unsigned int i;
1963
1964         dev = get_device(vid);
1965         if (!dev)
1966                 return -ENODEV;
1967
1968         did = dev->vdpa_dev_id;
1969         if (did < 0)
1970                 return -EINVAL;
1971
1972         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
1973             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
1974             !(dev->protocol_features &
1975                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
1976             !(dev->protocol_features &
1977                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
1978             !(dev->protocol_features &
1979                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
1980                 return -ENOTSUP;
1981
1982         vdpa_dev = rte_vdpa_get_device(did);
1983         if (!vdpa_dev)
1984                 return -ENODEV;
1985
1986         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
1987         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
1988
1989         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
1990         if (vfio_device_fd < 0)
1991                 return -ENOTSUP;
1992
1993         if (enable) {
1994                 for (i = 0; i < dev->nr_vring; i++) {
1995                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
1996                                         &size) < 0) {
1997                                 ret = -ENOTSUP;
1998                                 goto disable;
1999                         }
2000
2001                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
2002                                         vfio_device_fd, offset, size) < 0) {
2003                                 ret = -EFAULT;
2004                                 goto disable;
2005                         }
2006                 }
2007         } else {
2008 disable:
2009                 for (i = 0; i < dev->nr_vring; i++) {
2010                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
2011                                         0, 0);
2012                 }
2013         }
2014
2015         return ret;
2016 }