vhost: add notification for packed ring
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
71         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
72 };
73
74 static uint64_t
75 get_blk_size(int fd)
76 {
77         struct stat stat;
78         int ret;
79
80         ret = fstat(fd, &stat);
81         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
82 }
83
84 static void
85 free_mem_region(struct virtio_net *dev)
86 {
87         uint32_t i;
88         struct rte_vhost_mem_region *reg;
89
90         if (!dev || !dev->mem)
91                 return;
92
93         for (i = 0; i < dev->mem->nregions; i++) {
94                 reg = &dev->mem->regions[i];
95                 if (reg->host_user_addr) {
96                         munmap(reg->mmap_addr, reg->mmap_size);
97                         close(reg->fd);
98                 }
99         }
100 }
101
102 void
103 vhost_backend_cleanup(struct virtio_net *dev)
104 {
105         if (dev->mem) {
106                 free_mem_region(dev);
107                 rte_free(dev->mem);
108                 dev->mem = NULL;
109         }
110
111         free(dev->guest_pages);
112         dev->guest_pages = NULL;
113
114         if (dev->log_addr) {
115                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
116                 dev->log_addr = 0;
117         }
118
119         if (dev->slave_req_fd >= 0) {
120                 close(dev->slave_req_fd);
121                 dev->slave_req_fd = -1;
122         }
123 }
124
125 /*
126  * This function just returns success at the moment unless
127  * the device hasn't been initialised.
128  */
129 static int
130 vhost_user_set_owner(void)
131 {
132         return 0;
133 }
134
135 static int
136 vhost_user_reset_owner(struct virtio_net *dev)
137 {
138         vhost_destroy_device_notify(dev);
139
140         cleanup_device(dev, 0);
141         reset_device(dev);
142         return 0;
143 }
144
145 /*
146  * The features that we support are requested.
147  */
148 static uint64_t
149 vhost_user_get_features(struct virtio_net *dev)
150 {
151         uint64_t features = 0;
152
153         rte_vhost_driver_get_features(dev->ifname, &features);
154         return features;
155 }
156
157 /*
158  * The queue number that we support are requested.
159  */
160 static uint32_t
161 vhost_user_get_queue_num(struct virtio_net *dev)
162 {
163         uint32_t queue_num = 0;
164
165         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
166         return queue_num;
167 }
168
169 /*
170  * We receive the negotiated features supported by us and the virtio device.
171  */
172 static int
173 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
174 {
175         uint64_t vhost_features = 0;
176         struct rte_vdpa_device *vdpa_dev;
177         int did = -1;
178
179         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
180         if (features & ~vhost_features) {
181                 RTE_LOG(ERR, VHOST_CONFIG,
182                         "(%d) received invalid negotiated features.\n",
183                         dev->vid);
184                 return -1;
185         }
186
187         if (dev->flags & VIRTIO_DEV_RUNNING) {
188                 if (dev->features == features)
189                         return 0;
190
191                 /*
192                  * Error out if master tries to change features while device is
193                  * in running state. The exception being VHOST_F_LOG_ALL, which
194                  * is enabled when the live-migration starts.
195                  */
196                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
197                         RTE_LOG(ERR, VHOST_CONFIG,
198                                 "(%d) features changed while device is running.\n",
199                                 dev->vid);
200                         return -1;
201                 }
202
203                 if (dev->notify_ops->features_changed)
204                         dev->notify_ops->features_changed(dev->vid, features);
205         }
206
207         dev->features = features;
208         if (dev->features &
209                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
210                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
211         } else {
212                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
213         }
214         VHOST_LOG_DEBUG(VHOST_CONFIG,
215                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
216                 dev->vid,
217                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
218                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
219
220         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
221             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
222                 /*
223                  * Remove all but first queue pair if MQ hasn't been
224                  * negotiated. This is safe because the device is not
225                  * running at this stage.
226                  */
227                 while (dev->nr_vring > 2) {
228                         struct vhost_virtqueue *vq;
229
230                         vq = dev->virtqueue[--dev->nr_vring];
231                         if (!vq)
232                                 continue;
233
234                         dev->virtqueue[dev->nr_vring] = NULL;
235                         cleanup_vq(vq, 1);
236                         free_vq(dev, vq);
237                 }
238         }
239
240         did = dev->vdpa_dev_id;
241         vdpa_dev = rte_vdpa_get_device(did);
242         if (vdpa_dev && vdpa_dev->ops->set_features)
243                 vdpa_dev->ops->set_features(dev->vid);
244
245         return 0;
246 }
247
248 /*
249  * The virtio device sends us the size of the descriptor ring.
250  */
251 static int
252 vhost_user_set_vring_num(struct virtio_net *dev,
253                          VhostUserMsg *msg)
254 {
255         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
256
257         vq->size = msg->payload.state.num;
258
259         /* VIRTIO 1.0, 2.4 Virtqueues says:
260          *
261          *   Queue Size value is always a power of 2. The maximum Queue Size
262          *   value is 32768.
263          */
264         if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
265                 RTE_LOG(ERR, VHOST_CONFIG,
266                         "invalid virtqueue size %u\n", vq->size);
267                 return -1;
268         }
269
270         if (dev->dequeue_zero_copy) {
271                 vq->nr_zmbuf = 0;
272                 vq->last_zmbuf_idx = 0;
273                 vq->zmbuf_size = vq->size;
274                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
275                                          sizeof(struct zcopy_mbuf), 0);
276                 if (vq->zmbufs == NULL) {
277                         RTE_LOG(WARNING, VHOST_CONFIG,
278                                 "failed to allocate mem for zero copy; "
279                                 "zero copy is force disabled\n");
280                         dev->dequeue_zero_copy = 0;
281                 }
282                 TAILQ_INIT(&vq->zmbuf_list);
283         }
284
285         if (vq_is_packed(dev)) {
286                 vq->shadow_used_packed = rte_malloc(NULL,
287                                 vq->size *
288                                 sizeof(struct vring_used_elem_packed),
289                                 RTE_CACHE_LINE_SIZE);
290                 if (!vq->shadow_used_packed) {
291                         RTE_LOG(ERR, VHOST_CONFIG,
292                                         "failed to allocate memory for shadow used ring.\n");
293                         return -1;
294                 }
295
296         } else {
297                 vq->shadow_used_split = rte_malloc(NULL,
298                                 vq->size * sizeof(struct vring_used_elem),
299                                 RTE_CACHE_LINE_SIZE);
300                 if (!vq->shadow_used_split) {
301                         RTE_LOG(ERR, VHOST_CONFIG,
302                                         "failed to allocate memory for shadow used ring.\n");
303                         return -1;
304                 }
305         }
306
307         vq->batch_copy_elems = rte_malloc(NULL,
308                                 vq->size * sizeof(struct batch_copy_elem),
309                                 RTE_CACHE_LINE_SIZE);
310         if (!vq->batch_copy_elems) {
311                 RTE_LOG(ERR, VHOST_CONFIG,
312                         "failed to allocate memory for batching copy.\n");
313                 return -1;
314         }
315
316         return 0;
317 }
318
319 /*
320  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
321  * same numa node as the memory of vring descriptor.
322  */
323 #ifdef RTE_LIBRTE_VHOST_NUMA
324 static struct virtio_net*
325 numa_realloc(struct virtio_net *dev, int index)
326 {
327         int oldnode, newnode;
328         struct virtio_net *old_dev;
329         struct vhost_virtqueue *old_vq, *vq;
330         struct zcopy_mbuf *new_zmbuf;
331         struct vring_used_elem *new_shadow_used_split;
332         struct vring_used_elem_packed *new_shadow_used_packed;
333         struct batch_copy_elem *new_batch_copy_elems;
334         int ret;
335
336         old_dev = dev;
337         vq = old_vq = dev->virtqueue[index];
338
339         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
340                             MPOL_F_NODE | MPOL_F_ADDR);
341
342         /* check if we need to reallocate vq */
343         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
344                              MPOL_F_NODE | MPOL_F_ADDR);
345         if (ret) {
346                 RTE_LOG(ERR, VHOST_CONFIG,
347                         "Unable to get vq numa information.\n");
348                 return dev;
349         }
350         if (oldnode != newnode) {
351                 RTE_LOG(INFO, VHOST_CONFIG,
352                         "reallocate vq from %d to %d node\n", oldnode, newnode);
353                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
354                 if (!vq)
355                         return dev;
356
357                 memcpy(vq, old_vq, sizeof(*vq));
358                 TAILQ_INIT(&vq->zmbuf_list);
359
360                 new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
361                         sizeof(struct zcopy_mbuf), 0, newnode);
362                 if (new_zmbuf) {
363                         rte_free(vq->zmbufs);
364                         vq->zmbufs = new_zmbuf;
365                 }
366
367                 if (vq_is_packed(dev)) {
368                         new_shadow_used_packed = rte_malloc_socket(NULL,
369                                         vq->size *
370                                         sizeof(struct vring_used_elem_packed),
371                                         RTE_CACHE_LINE_SIZE,
372                                         newnode);
373                         if (new_shadow_used_packed) {
374                                 rte_free(vq->shadow_used_packed);
375                                 vq->shadow_used_packed = new_shadow_used_packed;
376                         }
377                 } else {
378                         new_shadow_used_split = rte_malloc_socket(NULL,
379                                         vq->size *
380                                         sizeof(struct vring_used_elem),
381                                         RTE_CACHE_LINE_SIZE,
382                                         newnode);
383                         if (new_shadow_used_split) {
384                                 rte_free(vq->shadow_used_split);
385                                 vq->shadow_used_split = new_shadow_used_split;
386                         }
387                 }
388
389                 new_batch_copy_elems = rte_malloc_socket(NULL,
390                         vq->size * sizeof(struct batch_copy_elem),
391                         RTE_CACHE_LINE_SIZE,
392                         newnode);
393                 if (new_batch_copy_elems) {
394                         rte_free(vq->batch_copy_elems);
395                         vq->batch_copy_elems = new_batch_copy_elems;
396                 }
397
398                 rte_free(old_vq);
399         }
400
401         /* check if we need to reallocate dev */
402         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
403                             MPOL_F_NODE | MPOL_F_ADDR);
404         if (ret) {
405                 RTE_LOG(ERR, VHOST_CONFIG,
406                         "Unable to get dev numa information.\n");
407                 goto out;
408         }
409         if (oldnode != newnode) {
410                 RTE_LOG(INFO, VHOST_CONFIG,
411                         "reallocate dev from %d to %d node\n",
412                         oldnode, newnode);
413                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
414                 if (!dev) {
415                         dev = old_dev;
416                         goto out;
417                 }
418
419                 memcpy(dev, old_dev, sizeof(*dev));
420                 rte_free(old_dev);
421         }
422
423 out:
424         dev->virtqueue[index] = vq;
425         vhost_devices[dev->vid] = dev;
426
427         if (old_vq != vq)
428                 vhost_user_iotlb_init(dev, index);
429
430         return dev;
431 }
432 #else
433 static struct virtio_net*
434 numa_realloc(struct virtio_net *dev, int index __rte_unused)
435 {
436         return dev;
437 }
438 #endif
439
440 /* Converts QEMU virtual address to Vhost virtual address. */
441 static uint64_t
442 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
443 {
444         struct rte_vhost_mem_region *r;
445         uint32_t i;
446
447         /* Find the region where the address lives. */
448         for (i = 0; i < dev->mem->nregions; i++) {
449                 r = &dev->mem->regions[i];
450
451                 if (qva >= r->guest_user_addr &&
452                     qva <  r->guest_user_addr + r->size) {
453
454                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
455                                 *len = r->guest_user_addr + r->size - qva;
456
457                         return qva - r->guest_user_addr +
458                                r->host_user_addr;
459                 }
460         }
461         *len = 0;
462
463         return 0;
464 }
465
466
467 /*
468  * Converts ring address to Vhost virtual address.
469  * If IOMMU is enabled, the ring address is a guest IO virtual address,
470  * else it is a QEMU virtual address.
471  */
472 static uint64_t
473 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
474                 uint64_t ra, uint64_t *size)
475 {
476         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
477                 uint64_t vva;
478
479                 vva = vhost_user_iotlb_cache_find(vq, ra,
480                                         size, VHOST_ACCESS_RW);
481                 if (!vva)
482                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
483
484                 return vva;
485         }
486
487         return qva_to_vva(dev, ra, size);
488 }
489
490 static struct virtio_net *
491 translate_ring_addresses(struct virtio_net *dev, int vq_index)
492 {
493         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
494         struct vhost_vring_addr *addr = &vq->ring_addrs;
495         uint64_t len;
496
497         if (vq_is_packed(dev)) {
498                 len = sizeof(struct vring_packed_desc) * vq->size;
499                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
500                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
501                 vq->log_guest_addr = 0;
502                 if (vq->desc_packed == NULL ||
503                                 len != sizeof(struct vring_packed_desc) *
504                                 vq->size) {
505                         RTE_LOG(DEBUG, VHOST_CONFIG,
506                                 "(%d) failed to map desc_packed ring.\n",
507                                 dev->vid);
508                         return dev;
509                 }
510
511                 dev = numa_realloc(dev, vq_index);
512                 vq = dev->virtqueue[vq_index];
513                 addr = &vq->ring_addrs;
514
515                 len = sizeof(struct vring_packed_desc_event);
516                 vq->driver_event = (struct vring_packed_desc_event *)
517                                         (uintptr_t)ring_addr_to_vva(dev,
518                                         vq, addr->avail_user_addr, &len);
519                 if (vq->driver_event == NULL ||
520                                 len != sizeof(struct vring_packed_desc_event)) {
521                         RTE_LOG(DEBUG, VHOST_CONFIG,
522                                 "(%d) failed to find driver area address.\n",
523                                 dev->vid);
524                         return dev;
525                 }
526
527                 len = sizeof(struct vring_packed_desc_event);
528                 vq->device_event = (struct vring_packed_desc_event *)
529                                         (uintptr_t)ring_addr_to_vva(dev,
530                                         vq, addr->used_user_addr, &len);
531                 if (vq->device_event == NULL ||
532                                 len != sizeof(struct vring_packed_desc_event)) {
533                         RTE_LOG(DEBUG, VHOST_CONFIG,
534                                 "(%d) failed to find device area address.\n",
535                                 dev->vid);
536                         return dev;
537                 }
538
539                 return dev;
540         }
541
542         /* The addresses are converted from QEMU virtual to Vhost virtual. */
543         if (vq->desc && vq->avail && vq->used)
544                 return dev;
545
546         len = sizeof(struct vring_desc) * vq->size;
547         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
548                         vq, addr->desc_user_addr, &len);
549         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
550                 RTE_LOG(DEBUG, VHOST_CONFIG,
551                         "(%d) failed to map desc ring.\n",
552                         dev->vid);
553                 return dev;
554         }
555
556         dev = numa_realloc(dev, vq_index);
557         vq = dev->virtqueue[vq_index];
558         addr = &vq->ring_addrs;
559
560         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
561         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
562                         vq, addr->avail_user_addr, &len);
563         if (vq->avail == 0 ||
564                         len != sizeof(struct vring_avail) +
565                         sizeof(uint16_t) * vq->size) {
566                 RTE_LOG(DEBUG, VHOST_CONFIG,
567                         "(%d) failed to map avail ring.\n",
568                         dev->vid);
569                 return dev;
570         }
571
572         len = sizeof(struct vring_used) +
573                 sizeof(struct vring_used_elem) * vq->size;
574         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
575                         vq, addr->used_user_addr, &len);
576         if (vq->used == 0 || len != sizeof(struct vring_used) +
577                         sizeof(struct vring_used_elem) * vq->size) {
578                 RTE_LOG(DEBUG, VHOST_CONFIG,
579                         "(%d) failed to map used ring.\n",
580                         dev->vid);
581                 return dev;
582         }
583
584         if (vq->last_used_idx != vq->used->idx) {
585                 RTE_LOG(WARNING, VHOST_CONFIG,
586                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
587                         "some packets maybe resent for Tx and dropped for Rx\n",
588                         vq->last_used_idx, vq->used->idx);
589                 vq->last_used_idx  = vq->used->idx;
590                 vq->last_avail_idx = vq->used->idx;
591         }
592
593         vq->log_guest_addr = addr->log_guest_addr;
594
595         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
596                         dev->vid, vq->desc);
597         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
598                         dev->vid, vq->avail);
599         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
600                         dev->vid, vq->used);
601         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
602                         dev->vid, vq->log_guest_addr);
603
604         return dev;
605 }
606
607 /*
608  * The virtio device sends us the desc, used and avail ring addresses.
609  * This function then converts these to our address space.
610  */
611 static int
612 vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg)
613 {
614         struct vhost_virtqueue *vq;
615         struct vhost_vring_addr *addr = &msg->payload.addr;
616         struct virtio_net *dev = *pdev;
617
618         if (dev->mem == NULL)
619                 return -1;
620
621         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
622         vq = dev->virtqueue[msg->payload.addr.index];
623
624         /*
625          * Rings addresses should not be interpreted as long as the ring is not
626          * started and enabled
627          */
628         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
629
630         vring_invalidate(dev, vq);
631
632         if (vq->enabled && (dev->features &
633                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
634                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
635                 if (!dev)
636                         return -1;
637
638                 *pdev = dev;
639         }
640
641         return 0;
642 }
643
644 /*
645  * The virtio device sends us the available ring last used index.
646  */
647 static int
648 vhost_user_set_vring_base(struct virtio_net *dev,
649                           VhostUserMsg *msg)
650 {
651         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
652                         msg->payload.state.num;
653         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
654                         msg->payload.state.num;
655
656         return 0;
657 }
658
659 static int
660 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
661                    uint64_t host_phys_addr, uint64_t size)
662 {
663         struct guest_page *page, *last_page;
664
665         if (dev->nr_guest_pages == dev->max_guest_pages) {
666                 dev->max_guest_pages *= 2;
667                 dev->guest_pages = realloc(dev->guest_pages,
668                                         dev->max_guest_pages * sizeof(*page));
669                 if (!dev->guest_pages) {
670                         RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
671                         return -1;
672                 }
673         }
674
675         if (dev->nr_guest_pages > 0) {
676                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
677                 /* merge if the two pages are continuous */
678                 if (host_phys_addr == last_page->host_phys_addr +
679                                       last_page->size) {
680                         last_page->size += size;
681                         return 0;
682                 }
683         }
684
685         page = &dev->guest_pages[dev->nr_guest_pages++];
686         page->guest_phys_addr = guest_phys_addr;
687         page->host_phys_addr  = host_phys_addr;
688         page->size = size;
689
690         return 0;
691 }
692
693 static int
694 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
695                 uint64_t page_size)
696 {
697         uint64_t reg_size = reg->size;
698         uint64_t host_user_addr  = reg->host_user_addr;
699         uint64_t guest_phys_addr = reg->guest_phys_addr;
700         uint64_t host_phys_addr;
701         uint64_t size;
702
703         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
704         size = page_size - (guest_phys_addr & (page_size - 1));
705         size = RTE_MIN(size, reg_size);
706
707         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
708                 return -1;
709
710         host_user_addr  += size;
711         guest_phys_addr += size;
712         reg_size -= size;
713
714         while (reg_size > 0) {
715                 size = RTE_MIN(reg_size, page_size);
716                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
717                                                   host_user_addr);
718                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
719                                 size) < 0)
720                         return -1;
721
722                 host_user_addr  += size;
723                 guest_phys_addr += size;
724                 reg_size -= size;
725         }
726
727         return 0;
728 }
729
730 #ifdef RTE_LIBRTE_VHOST_DEBUG
731 /* TODO: enable it only in debug mode? */
732 static void
733 dump_guest_pages(struct virtio_net *dev)
734 {
735         uint32_t i;
736         struct guest_page *page;
737
738         for (i = 0; i < dev->nr_guest_pages; i++) {
739                 page = &dev->guest_pages[i];
740
741                 RTE_LOG(INFO, VHOST_CONFIG,
742                         "guest physical page region %u\n"
743                         "\t guest_phys_addr: %" PRIx64 "\n"
744                         "\t host_phys_addr : %" PRIx64 "\n"
745                         "\t size           : %" PRIx64 "\n",
746                         i,
747                         page->guest_phys_addr,
748                         page->host_phys_addr,
749                         page->size);
750         }
751 }
752 #else
753 #define dump_guest_pages(dev)
754 #endif
755
756 static bool
757 vhost_memory_changed(struct VhostUserMemory *new,
758                      struct rte_vhost_memory *old)
759 {
760         uint32_t i;
761
762         if (new->nregions != old->nregions)
763                 return true;
764
765         for (i = 0; i < new->nregions; ++i) {
766                 VhostUserMemoryRegion *new_r = &new->regions[i];
767                 struct rte_vhost_mem_region *old_r = &old->regions[i];
768
769                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
770                         return true;
771                 if (new_r->memory_size != old_r->size)
772                         return true;
773                 if (new_r->userspace_addr != old_r->guest_user_addr)
774                         return true;
775         }
776
777         return false;
778 }
779
780 static int
781 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
782 {
783         struct virtio_net *dev = *pdev;
784         struct VhostUserMemory memory = pmsg->payload.memory;
785         struct rte_vhost_mem_region *reg;
786         void *mmap_addr;
787         uint64_t mmap_size;
788         uint64_t mmap_offset;
789         uint64_t alignment;
790         uint32_t i;
791         int populate;
792         int fd;
793
794         if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
795                 RTE_LOG(ERR, VHOST_CONFIG,
796                         "too many memory regions (%u)\n", memory.nregions);
797                 return -1;
798         }
799
800         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
801                 RTE_LOG(INFO, VHOST_CONFIG,
802                         "(%d) memory regions not changed\n", dev->vid);
803
804                 for (i = 0; i < memory.nregions; i++)
805                         close(pmsg->fds[i]);
806
807                 return 0;
808         }
809
810         if (dev->mem) {
811                 free_mem_region(dev);
812                 rte_free(dev->mem);
813                 dev->mem = NULL;
814         }
815
816         dev->nr_guest_pages = 0;
817         if (!dev->guest_pages) {
818                 dev->max_guest_pages = 8;
819                 dev->guest_pages = malloc(dev->max_guest_pages *
820                                                 sizeof(struct guest_page));
821                 if (dev->guest_pages == NULL) {
822                         RTE_LOG(ERR, VHOST_CONFIG,
823                                 "(%d) failed to allocate memory "
824                                 "for dev->guest_pages\n",
825                                 dev->vid);
826                         return -1;
827                 }
828         }
829
830         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
831                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
832         if (dev->mem == NULL) {
833                 RTE_LOG(ERR, VHOST_CONFIG,
834                         "(%d) failed to allocate memory for dev->mem\n",
835                         dev->vid);
836                 return -1;
837         }
838         dev->mem->nregions = memory.nregions;
839
840         for (i = 0; i < memory.nregions; i++) {
841                 fd  = pmsg->fds[i];
842                 reg = &dev->mem->regions[i];
843
844                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
845                 reg->guest_user_addr = memory.regions[i].userspace_addr;
846                 reg->size            = memory.regions[i].memory_size;
847                 reg->fd              = fd;
848
849                 mmap_offset = memory.regions[i].mmap_offset;
850
851                 /* Check for memory_size + mmap_offset overflow */
852                 if (mmap_offset >= -reg->size) {
853                         RTE_LOG(ERR, VHOST_CONFIG,
854                                 "mmap_offset (%#"PRIx64") and memory_size "
855                                 "(%#"PRIx64") overflow\n",
856                                 mmap_offset, reg->size);
857                         goto err_mmap;
858                 }
859
860                 mmap_size = reg->size + mmap_offset;
861
862                 /* mmap() without flag of MAP_ANONYMOUS, should be called
863                  * with length argument aligned with hugepagesz at older
864                  * longterm version Linux, like 2.6.32 and 3.2.72, or
865                  * mmap() will fail with EINVAL.
866                  *
867                  * to avoid failure, make sure in caller to keep length
868                  * aligned.
869                  */
870                 alignment = get_blk_size(fd);
871                 if (alignment == (uint64_t)-1) {
872                         RTE_LOG(ERR, VHOST_CONFIG,
873                                 "couldn't get hugepage size through fstat\n");
874                         goto err_mmap;
875                 }
876                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
877
878                 populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
879                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
880                                  MAP_SHARED | populate, fd, 0);
881
882                 if (mmap_addr == MAP_FAILED) {
883                         RTE_LOG(ERR, VHOST_CONFIG,
884                                 "mmap region %u failed.\n", i);
885                         goto err_mmap;
886                 }
887
888                 reg->mmap_addr = mmap_addr;
889                 reg->mmap_size = mmap_size;
890                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
891                                       mmap_offset;
892
893                 if (dev->dequeue_zero_copy)
894                         if (add_guest_pages(dev, reg, alignment) < 0) {
895                                 RTE_LOG(ERR, VHOST_CONFIG,
896                                         "adding guest pages to region %u failed.\n",
897                                         i);
898                                 goto err_mmap;
899                         }
900
901                 RTE_LOG(INFO, VHOST_CONFIG,
902                         "guest memory region %u, size: 0x%" PRIx64 "\n"
903                         "\t guest physical addr: 0x%" PRIx64 "\n"
904                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
905                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
906                         "\t mmap addr : 0x%" PRIx64 "\n"
907                         "\t mmap size : 0x%" PRIx64 "\n"
908                         "\t mmap align: 0x%" PRIx64 "\n"
909                         "\t mmap off  : 0x%" PRIx64 "\n",
910                         i, reg->size,
911                         reg->guest_phys_addr,
912                         reg->guest_user_addr,
913                         reg->host_user_addr,
914                         (uint64_t)(uintptr_t)mmap_addr,
915                         mmap_size,
916                         alignment,
917                         mmap_offset);
918         }
919
920         for (i = 0; i < dev->nr_vring; i++) {
921                 struct vhost_virtqueue *vq = dev->virtqueue[i];
922
923                 if (vq->desc || vq->avail || vq->used) {
924                         /*
925                          * If the memory table got updated, the ring addresses
926                          * need to be translated again as virtual addresses have
927                          * changed.
928                          */
929                         vring_invalidate(dev, vq);
930
931                         dev = translate_ring_addresses(dev, i);
932                         if (!dev)
933                                 return -1;
934
935                         *pdev = dev;
936                 }
937         }
938
939         dump_guest_pages(dev);
940
941         return 0;
942
943 err_mmap:
944         free_mem_region(dev);
945         rte_free(dev->mem);
946         dev->mem = NULL;
947         return -1;
948 }
949
950 static bool
951 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
952 {
953         bool rings_ok;
954
955         if (!vq)
956                 return false;
957
958         if (vq_is_packed(dev))
959                 rings_ok = !!vq->desc_packed;
960         else
961                 rings_ok = vq->desc && vq->avail && vq->used;
962
963         return rings_ok &&
964                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
965                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
966 }
967
968 static int
969 virtio_is_ready(struct virtio_net *dev)
970 {
971         struct vhost_virtqueue *vq;
972         uint32_t i;
973
974         if (dev->nr_vring == 0)
975                 return 0;
976
977         for (i = 0; i < dev->nr_vring; i++) {
978                 vq = dev->virtqueue[i];
979
980                 if (!vq_is_ready(dev, vq))
981                         return 0;
982         }
983
984         RTE_LOG(INFO, VHOST_CONFIG,
985                 "virtio is now ready for processing.\n");
986         return 1;
987 }
988
989 static void
990 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
991 {
992         struct vhost_vring_file file;
993         struct vhost_virtqueue *vq;
994
995         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
996         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
997                 file.fd = VIRTIO_INVALID_EVENTFD;
998         else
999                 file.fd = pmsg->fds[0];
1000         RTE_LOG(INFO, VHOST_CONFIG,
1001                 "vring call idx:%d file:%d\n", file.index, file.fd);
1002
1003         vq = dev->virtqueue[file.index];
1004         if (vq->callfd >= 0)
1005                 close(vq->callfd);
1006
1007         vq->callfd = file.fd;
1008 }
1009
1010 static void
1011 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *pmsg)
1012 {
1013         struct vhost_vring_file file;
1014         struct vhost_virtqueue *vq;
1015         struct virtio_net *dev = *pdev;
1016
1017         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1018         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1019                 file.fd = VIRTIO_INVALID_EVENTFD;
1020         else
1021                 file.fd = pmsg->fds[0];
1022         RTE_LOG(INFO, VHOST_CONFIG,
1023                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1024
1025         /* Interpret ring addresses only when ring is started. */
1026         dev = translate_ring_addresses(dev, file.index);
1027         if (!dev)
1028                 return;
1029
1030         *pdev = dev;
1031
1032         vq = dev->virtqueue[file.index];
1033
1034         /*
1035          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1036          * the ring starts already enabled. Otherwise, it is enabled via
1037          * the SET_VRING_ENABLE message.
1038          */
1039         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
1040                 vq->enabled = 1;
1041
1042         if (vq->kickfd >= 0)
1043                 close(vq->kickfd);
1044         vq->kickfd = file.fd;
1045 }
1046
1047 static void
1048 free_zmbufs(struct vhost_virtqueue *vq)
1049 {
1050         struct zcopy_mbuf *zmbuf, *next;
1051
1052         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1053              zmbuf != NULL; zmbuf = next) {
1054                 next = TAILQ_NEXT(zmbuf, next);
1055
1056                 rte_pktmbuf_free(zmbuf->mbuf);
1057                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1058         }
1059
1060         rte_free(vq->zmbufs);
1061 }
1062
1063 /*
1064  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1065  */
1066 static int
1067 vhost_user_get_vring_base(struct virtio_net *dev,
1068                           VhostUserMsg *msg)
1069 {
1070         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1071
1072         /* We have to stop the queue (virtio) if it is running. */
1073         vhost_destroy_device_notify(dev);
1074
1075         dev->flags &= ~VIRTIO_DEV_READY;
1076         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1077
1078         /* Here we are safe to get the last avail index */
1079         msg->payload.state.num = vq->last_avail_idx;
1080
1081         RTE_LOG(INFO, VHOST_CONFIG,
1082                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1083                 msg->payload.state.num);
1084         /*
1085          * Based on current qemu vhost-user implementation, this message is
1086          * sent and only sent in vhost_vring_stop.
1087          * TODO: cleanup the vring, it isn't usable since here.
1088          */
1089         if (vq->kickfd >= 0)
1090                 close(vq->kickfd);
1091
1092         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1093
1094         if (vq->callfd >= 0)
1095                 close(vq->callfd);
1096
1097         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1098
1099         if (dev->dequeue_zero_copy)
1100                 free_zmbufs(vq);
1101         if (vq_is_packed(dev)) {
1102                 rte_free(vq->shadow_used_packed);
1103                 vq->shadow_used_packed = NULL;
1104         } else {
1105                 rte_free(vq->shadow_used_split);
1106                 vq->shadow_used_split = NULL;
1107         }
1108
1109         rte_free(vq->batch_copy_elems);
1110         vq->batch_copy_elems = NULL;
1111
1112         return 0;
1113 }
1114
1115 /*
1116  * when virtio queues are ready to work, qemu will send us to
1117  * enable the virtio queue pair.
1118  */
1119 static int
1120 vhost_user_set_vring_enable(struct virtio_net *dev,
1121                             VhostUserMsg *msg)
1122 {
1123         int enable = (int)msg->payload.state.num;
1124         int index = (int)msg->payload.state.index;
1125         struct rte_vdpa_device *vdpa_dev;
1126         int did = -1;
1127
1128         RTE_LOG(INFO, VHOST_CONFIG,
1129                 "set queue enable: %d to qp idx: %d\n",
1130                 enable, index);
1131
1132         did = dev->vdpa_dev_id;
1133         vdpa_dev = rte_vdpa_get_device(did);
1134         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
1135                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
1136
1137         if (dev->notify_ops->vring_state_changed)
1138                 dev->notify_ops->vring_state_changed(dev->vid,
1139                                 index, enable);
1140
1141         dev->virtqueue[index]->enabled = enable;
1142
1143         return 0;
1144 }
1145
1146 static void
1147 vhost_user_get_protocol_features(struct virtio_net *dev,
1148                                  struct VhostUserMsg *msg)
1149 {
1150         uint64_t features, protocol_features;
1151
1152         rte_vhost_driver_get_features(dev->ifname, &features);
1153         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
1154
1155         /*
1156          * REPLY_ACK protocol feature is only mandatory for now
1157          * for IOMMU feature. If IOMMU is explicitly disabled by the
1158          * application, disable also REPLY_ACK feature for older buggy
1159          * Qemu versions (from v2.7.0 to v2.9.0).
1160          */
1161         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
1162                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
1163
1164         msg->payload.u64 = protocol_features;
1165         msg->size = sizeof(msg->payload.u64);
1166 }
1167
1168 static void
1169 vhost_user_set_protocol_features(struct virtio_net *dev,
1170                                  uint64_t protocol_features)
1171 {
1172         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
1173                 return;
1174
1175         dev->protocol_features = protocol_features;
1176 }
1177
1178 static int
1179 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
1180 {
1181         int fd = msg->fds[0];
1182         uint64_t size, off;
1183         void *addr;
1184
1185         if (fd < 0) {
1186                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
1187                 return -1;
1188         }
1189
1190         if (msg->size != sizeof(VhostUserLog)) {
1191                 RTE_LOG(ERR, VHOST_CONFIG,
1192                         "invalid log base msg size: %"PRId32" != %d\n",
1193                         msg->size, (int)sizeof(VhostUserLog));
1194                 return -1;
1195         }
1196
1197         size = msg->payload.log.mmap_size;
1198         off  = msg->payload.log.mmap_offset;
1199
1200         /* Don't allow mmap_offset to point outside the mmap region */
1201         if (off > size) {
1202                 RTE_LOG(ERR, VHOST_CONFIG,
1203                         "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
1204                         off, size);
1205                 return -1;
1206         }
1207
1208         RTE_LOG(INFO, VHOST_CONFIG,
1209                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1210                 size, off);
1211
1212         /*
1213          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1214          * fail when offset is not page size aligned.
1215          */
1216         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1217         close(fd);
1218         if (addr == MAP_FAILED) {
1219                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1220                 return -1;
1221         }
1222
1223         /*
1224          * Free previously mapped log memory on occasionally
1225          * multiple VHOST_USER_SET_LOG_BASE.
1226          */
1227         if (dev->log_addr) {
1228                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1229         }
1230         dev->log_addr = (uint64_t)(uintptr_t)addr;
1231         dev->log_base = dev->log_addr + off;
1232         dev->log_size = size;
1233
1234         return 0;
1235 }
1236
1237 /*
1238  * An rarp packet is constructed and broadcasted to notify switches about
1239  * the new location of the migrated VM, so that packets from outside will
1240  * not be lost after migration.
1241  *
1242  * However, we don't actually "send" a rarp packet here, instead, we set
1243  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1244  */
1245 static int
1246 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
1247 {
1248         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1249         struct rte_vdpa_device *vdpa_dev;
1250         int did = -1;
1251
1252         RTE_LOG(DEBUG, VHOST_CONFIG,
1253                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1254                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1255         memcpy(dev->mac.addr_bytes, mac, 6);
1256
1257         /*
1258          * Set the flag to inject a RARP broadcast packet at
1259          * rte_vhost_dequeue_burst().
1260          *
1261          * rte_smp_wmb() is for making sure the mac is copied
1262          * before the flag is set.
1263          */
1264         rte_smp_wmb();
1265         rte_atomic16_set(&dev->broadcast_rarp, 1);
1266         did = dev->vdpa_dev_id;
1267         vdpa_dev = rte_vdpa_get_device(did);
1268         if (vdpa_dev && vdpa_dev->ops->migration_done)
1269                 vdpa_dev->ops->migration_done(dev->vid);
1270
1271         return 0;
1272 }
1273
1274 static int
1275 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
1276 {
1277         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1278                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1279                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1280                                 msg->payload.u64);
1281
1282                 return -1;
1283         }
1284
1285         dev->mtu = msg->payload.u64;
1286
1287         return 0;
1288 }
1289
1290 static int
1291 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1292 {
1293         int fd = msg->fds[0];
1294
1295         if (fd < 0) {
1296                 RTE_LOG(ERR, VHOST_CONFIG,
1297                                 "Invalid file descriptor for slave channel (%d)\n",
1298                                 fd);
1299                 return -1;
1300         }
1301
1302         dev->slave_req_fd = fd;
1303
1304         return 0;
1305 }
1306
1307 static int
1308 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1309 {
1310         struct vhost_vring_addr *ra;
1311         uint64_t start, end;
1312
1313         start = imsg->iova;
1314         end = start + imsg->size;
1315
1316         ra = &vq->ring_addrs;
1317         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1318                 return 1;
1319         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1320                 return 1;
1321         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1322                 return 1;
1323
1324         return 0;
1325 }
1326
1327 static int
1328 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1329                                 struct vhost_iotlb_msg *imsg)
1330 {
1331         uint64_t istart, iend, vstart, vend;
1332
1333         istart = imsg->iova;
1334         iend = istart + imsg->size - 1;
1335
1336         vstart = (uintptr_t)vq->desc;
1337         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1338         if (vstart <= iend && istart <= vend)
1339                 return 1;
1340
1341         vstart = (uintptr_t)vq->avail;
1342         vend = vstart + sizeof(struct vring_avail);
1343         vend += sizeof(uint16_t) * vq->size - 1;
1344         if (vstart <= iend && istart <= vend)
1345                 return 1;
1346
1347         vstart = (uintptr_t)vq->used;
1348         vend = vstart + sizeof(struct vring_used);
1349         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1350         if (vstart <= iend && istart <= vend)
1351                 return 1;
1352
1353         return 0;
1354 }
1355
1356 static int
1357 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1358 {
1359         struct virtio_net *dev = *pdev;
1360         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1361         uint16_t i;
1362         uint64_t vva, len;
1363
1364         switch (imsg->type) {
1365         case VHOST_IOTLB_UPDATE:
1366                 len = imsg->size;
1367                 vva = qva_to_vva(dev, imsg->uaddr, &len);
1368                 if (!vva)
1369                         return -1;
1370
1371                 for (i = 0; i < dev->nr_vring; i++) {
1372                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1373
1374                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1375                                         len, imsg->perm);
1376
1377                         if (is_vring_iotlb_update(vq, imsg))
1378                                 *pdev = dev = translate_ring_addresses(dev, i);
1379                 }
1380                 break;
1381         case VHOST_IOTLB_INVALIDATE:
1382                 for (i = 0; i < dev->nr_vring; i++) {
1383                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1384
1385                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1386                                         imsg->size);
1387
1388                         if (is_vring_iotlb_invalidate(vq, imsg))
1389                                 vring_invalidate(dev, vq);
1390                 }
1391                 break;
1392         default:
1393                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1394                                 imsg->type);
1395                 return -1;
1396         }
1397
1398         return 0;
1399 }
1400
1401 /* return bytes# of read on success or negative val on failure. */
1402 static int
1403 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1404 {
1405         int ret;
1406
1407         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1408                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1409         if (ret <= 0)
1410                 return ret;
1411
1412         if (msg && msg->size) {
1413                 if (msg->size > sizeof(msg->payload)) {
1414                         RTE_LOG(ERR, VHOST_CONFIG,
1415                                 "invalid msg size: %d\n", msg->size);
1416                         return -1;
1417                 }
1418                 ret = read(sockfd, &msg->payload, msg->size);
1419                 if (ret <= 0)
1420                         return ret;
1421                 if (ret != (int)msg->size) {
1422                         RTE_LOG(ERR, VHOST_CONFIG,
1423                                 "read control message failed\n");
1424                         return -1;
1425                 }
1426         }
1427
1428         return ret;
1429 }
1430
1431 static int
1432 send_vhost_message(int sockfd, struct VhostUserMsg *msg, int *fds, int fd_num)
1433 {
1434         if (!msg)
1435                 return 0;
1436
1437         return send_fd_message(sockfd, (char *)msg,
1438                 VHOST_USER_HDR_SIZE + msg->size, fds, fd_num);
1439 }
1440
1441 static int
1442 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1443 {
1444         if (!msg)
1445                 return 0;
1446
1447         msg->flags &= ~VHOST_USER_VERSION_MASK;
1448         msg->flags &= ~VHOST_USER_NEED_REPLY;
1449         msg->flags |= VHOST_USER_VERSION;
1450         msg->flags |= VHOST_USER_REPLY_MASK;
1451
1452         return send_vhost_message(sockfd, msg, NULL, 0);
1453 }
1454
1455 static int
1456 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
1457                          int *fds, int fd_num)
1458 {
1459         int ret;
1460
1461         if (msg->flags & VHOST_USER_NEED_REPLY)
1462                 rte_spinlock_lock(&dev->slave_req_lock);
1463
1464         ret = send_vhost_message(dev->slave_req_fd, msg, fds, fd_num);
1465         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
1466                 rte_spinlock_unlock(&dev->slave_req_lock);
1467
1468         return ret;
1469 }
1470
1471 /*
1472  * Allocate a queue pair if it hasn't been allocated yet
1473  */
1474 static int
1475 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
1476 {
1477         uint16_t vring_idx;
1478
1479         switch (msg->request.master) {
1480         case VHOST_USER_SET_VRING_KICK:
1481         case VHOST_USER_SET_VRING_CALL:
1482         case VHOST_USER_SET_VRING_ERR:
1483                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1484                 break;
1485         case VHOST_USER_SET_VRING_NUM:
1486         case VHOST_USER_SET_VRING_BASE:
1487         case VHOST_USER_SET_VRING_ENABLE:
1488                 vring_idx = msg->payload.state.index;
1489                 break;
1490         case VHOST_USER_SET_VRING_ADDR:
1491                 vring_idx = msg->payload.addr.index;
1492                 break;
1493         default:
1494                 return 0;
1495         }
1496
1497         if (vring_idx >= VHOST_MAX_VRING) {
1498                 RTE_LOG(ERR, VHOST_CONFIG,
1499                         "invalid vring index: %u\n", vring_idx);
1500                 return -1;
1501         }
1502
1503         if (dev->virtqueue[vring_idx])
1504                 return 0;
1505
1506         return alloc_vring_queue(dev, vring_idx);
1507 }
1508
1509 static void
1510 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1511 {
1512         unsigned int i = 0;
1513         unsigned int vq_num = 0;
1514
1515         while (vq_num < dev->nr_vring) {
1516                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1517
1518                 if (vq) {
1519                         rte_spinlock_lock(&vq->access_lock);
1520                         vq_num++;
1521                 }
1522                 i++;
1523         }
1524 }
1525
1526 static void
1527 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1528 {
1529         unsigned int i = 0;
1530         unsigned int vq_num = 0;
1531
1532         while (vq_num < dev->nr_vring) {
1533                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1534
1535                 if (vq) {
1536                         rte_spinlock_unlock(&vq->access_lock);
1537                         vq_num++;
1538                 }
1539                 i++;
1540         }
1541 }
1542
1543 int
1544 vhost_user_msg_handler(int vid, int fd)
1545 {
1546         struct virtio_net *dev;
1547         struct VhostUserMsg msg;
1548         struct rte_vdpa_device *vdpa_dev;
1549         int did = -1;
1550         int ret;
1551         int unlock_required = 0;
1552         uint32_t skip_master = 0;
1553
1554         dev = get_device(vid);
1555         if (dev == NULL)
1556                 return -1;
1557
1558         if (!dev->notify_ops) {
1559                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1560                 if (!dev->notify_ops) {
1561                         RTE_LOG(ERR, VHOST_CONFIG,
1562                                 "failed to get callback ops for driver %s\n",
1563                                 dev->ifname);
1564                         return -1;
1565                 }
1566         }
1567
1568         ret = read_vhost_message(fd, &msg);
1569         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1570                 if (ret < 0)
1571                         RTE_LOG(ERR, VHOST_CONFIG,
1572                                 "vhost read message failed\n");
1573                 else if (ret == 0)
1574                         RTE_LOG(INFO, VHOST_CONFIG,
1575                                 "vhost peer closed\n");
1576                 else
1577                         RTE_LOG(ERR, VHOST_CONFIG,
1578                                 "vhost read incorrect message\n");
1579
1580                 return -1;
1581         }
1582
1583         ret = 0;
1584         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1585                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1586                         vhost_message_str[msg.request.master]);
1587         else
1588                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1589                         vhost_message_str[msg.request.master]);
1590
1591         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1592         if (ret < 0) {
1593                 RTE_LOG(ERR, VHOST_CONFIG,
1594                         "failed to alloc queue\n");
1595                 return -1;
1596         }
1597
1598         /*
1599          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1600          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1601          * and device is destroyed. destroy_device waits for queues to be
1602          * inactive, so it is safe. Otherwise taking the access_lock
1603          * would cause a dead lock.
1604          */
1605         switch (msg.request.master) {
1606         case VHOST_USER_SET_FEATURES:
1607         case VHOST_USER_SET_PROTOCOL_FEATURES:
1608         case VHOST_USER_SET_OWNER:
1609         case VHOST_USER_SET_MEM_TABLE:
1610         case VHOST_USER_SET_LOG_BASE:
1611         case VHOST_USER_SET_LOG_FD:
1612         case VHOST_USER_SET_VRING_NUM:
1613         case VHOST_USER_SET_VRING_ADDR:
1614         case VHOST_USER_SET_VRING_BASE:
1615         case VHOST_USER_SET_VRING_KICK:
1616         case VHOST_USER_SET_VRING_CALL:
1617         case VHOST_USER_SET_VRING_ERR:
1618         case VHOST_USER_SET_VRING_ENABLE:
1619         case VHOST_USER_SEND_RARP:
1620         case VHOST_USER_NET_SET_MTU:
1621         case VHOST_USER_SET_SLAVE_REQ_FD:
1622                 vhost_user_lock_all_queue_pairs(dev);
1623                 unlock_required = 1;
1624                 break;
1625         default:
1626                 break;
1627
1628         }
1629
1630         if (dev->extern_ops.pre_msg_handle) {
1631                 uint32_t need_reply;
1632
1633                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
1634                                 (void *)&msg, &need_reply, &skip_master);
1635                 if (ret < 0)
1636                         goto skip_to_reply;
1637
1638                 if (need_reply)
1639                         send_vhost_reply(fd, &msg);
1640
1641                 if (skip_master)
1642                         goto skip_to_post_handle;
1643         }
1644
1645         switch (msg.request.master) {
1646         case VHOST_USER_GET_FEATURES:
1647                 msg.payload.u64 = vhost_user_get_features(dev);
1648                 msg.size = sizeof(msg.payload.u64);
1649                 send_vhost_reply(fd, &msg);
1650                 break;
1651         case VHOST_USER_SET_FEATURES:
1652                 ret = vhost_user_set_features(dev, msg.payload.u64);
1653                 if (ret)
1654                         return -1;
1655                 break;
1656
1657         case VHOST_USER_GET_PROTOCOL_FEATURES:
1658                 vhost_user_get_protocol_features(dev, &msg);
1659                 send_vhost_reply(fd, &msg);
1660                 break;
1661         case VHOST_USER_SET_PROTOCOL_FEATURES:
1662                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1663                 break;
1664
1665         case VHOST_USER_SET_OWNER:
1666                 vhost_user_set_owner();
1667                 break;
1668         case VHOST_USER_RESET_OWNER:
1669                 vhost_user_reset_owner(dev);
1670                 break;
1671
1672         case VHOST_USER_SET_MEM_TABLE:
1673                 ret = vhost_user_set_mem_table(&dev, &msg);
1674                 break;
1675
1676         case VHOST_USER_SET_LOG_BASE:
1677                 vhost_user_set_log_base(dev, &msg);
1678
1679                 /* it needs a reply */
1680                 msg.size = sizeof(msg.payload.u64);
1681                 send_vhost_reply(fd, &msg);
1682                 break;
1683         case VHOST_USER_SET_LOG_FD:
1684                 close(msg.fds[0]);
1685                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1686                 break;
1687
1688         case VHOST_USER_SET_VRING_NUM:
1689                 vhost_user_set_vring_num(dev, &msg);
1690                 break;
1691         case VHOST_USER_SET_VRING_ADDR:
1692                 vhost_user_set_vring_addr(&dev, &msg);
1693                 break;
1694         case VHOST_USER_SET_VRING_BASE:
1695                 vhost_user_set_vring_base(dev, &msg);
1696                 break;
1697
1698         case VHOST_USER_GET_VRING_BASE:
1699                 vhost_user_get_vring_base(dev, &msg);
1700                 msg.size = sizeof(msg.payload.state);
1701                 send_vhost_reply(fd, &msg);
1702                 break;
1703
1704         case VHOST_USER_SET_VRING_KICK:
1705                 vhost_user_set_vring_kick(&dev, &msg);
1706                 break;
1707         case VHOST_USER_SET_VRING_CALL:
1708                 vhost_user_set_vring_call(dev, &msg);
1709                 break;
1710
1711         case VHOST_USER_SET_VRING_ERR:
1712                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1713                         close(msg.fds[0]);
1714                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1715                 break;
1716
1717         case VHOST_USER_GET_QUEUE_NUM:
1718                 msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
1719                 msg.size = sizeof(msg.payload.u64);
1720                 send_vhost_reply(fd, &msg);
1721                 break;
1722
1723         case VHOST_USER_SET_VRING_ENABLE:
1724                 vhost_user_set_vring_enable(dev, &msg);
1725                 break;
1726         case VHOST_USER_SEND_RARP:
1727                 vhost_user_send_rarp(dev, &msg);
1728                 break;
1729
1730         case VHOST_USER_NET_SET_MTU:
1731                 ret = vhost_user_net_set_mtu(dev, &msg);
1732                 break;
1733
1734         case VHOST_USER_SET_SLAVE_REQ_FD:
1735                 ret = vhost_user_set_req_fd(dev, &msg);
1736                 break;
1737
1738         case VHOST_USER_IOTLB_MSG:
1739                 ret = vhost_user_iotlb_msg(&dev, &msg);
1740                 break;
1741
1742         default:
1743                 ret = -1;
1744                 break;
1745         }
1746
1747 skip_to_post_handle:
1748         if (dev->extern_ops.post_msg_handle) {
1749                 uint32_t need_reply;
1750
1751                 ret = (*dev->extern_ops.post_msg_handle)(
1752                                 dev->vid, (void *)&msg, &need_reply);
1753                 if (ret < 0)
1754                         goto skip_to_reply;
1755
1756                 if (need_reply)
1757                         send_vhost_reply(fd, &msg);
1758         }
1759
1760 skip_to_reply:
1761         if (unlock_required)
1762                 vhost_user_unlock_all_queue_pairs(dev);
1763
1764         if (msg.flags & VHOST_USER_NEED_REPLY) {
1765                 msg.payload.u64 = !!ret;
1766                 msg.size = sizeof(msg.payload.u64);
1767                 send_vhost_reply(fd, &msg);
1768         }
1769
1770         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1771                 dev->flags |= VIRTIO_DEV_READY;
1772
1773                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1774                         if (dev->dequeue_zero_copy) {
1775                                 RTE_LOG(INFO, VHOST_CONFIG,
1776                                                 "dequeue zero copy is enabled\n");
1777                         }
1778
1779                         if (dev->notify_ops->new_device(dev->vid) == 0)
1780                                 dev->flags |= VIRTIO_DEV_RUNNING;
1781                 }
1782         }
1783
1784         did = dev->vdpa_dev_id;
1785         vdpa_dev = rte_vdpa_get_device(did);
1786         if (vdpa_dev && virtio_is_ready(dev) &&
1787                         !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
1788                         msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
1789                 if (vdpa_dev->ops->dev_conf)
1790                         vdpa_dev->ops->dev_conf(dev->vid);
1791                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
1792                 if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
1793                         RTE_LOG(INFO, VHOST_CONFIG,
1794                                 "(%d) software relay is used for vDPA, performance may be low.\n",
1795                                 dev->vid);
1796                 }
1797         }
1798
1799         return 0;
1800 }
1801
1802 static int process_slave_message_reply(struct virtio_net *dev,
1803                                        const VhostUserMsg *msg)
1804 {
1805         VhostUserMsg msg_reply;
1806         int ret;
1807
1808         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
1809                 return 0;
1810
1811         if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
1812                 ret = -1;
1813                 goto out;
1814         }
1815
1816         if (msg_reply.request.slave != msg->request.slave) {
1817                 RTE_LOG(ERR, VHOST_CONFIG,
1818                         "Received unexpected msg type (%u), expected %u\n",
1819                         msg_reply.request.slave, msg->request.slave);
1820                 ret = -1;
1821                 goto out;
1822         }
1823
1824         ret = msg_reply.payload.u64 ? -1 : 0;
1825
1826 out:
1827         rte_spinlock_unlock(&dev->slave_req_lock);
1828         return ret;
1829 }
1830
1831 int
1832 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1833 {
1834         int ret;
1835         struct VhostUserMsg msg = {
1836                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1837                 .flags = VHOST_USER_VERSION,
1838                 .size = sizeof(msg.payload.iotlb),
1839                 .payload.iotlb = {
1840                         .iova = iova,
1841                         .perm = perm,
1842                         .type = VHOST_IOTLB_MISS,
1843                 },
1844         };
1845
1846         ret = send_vhost_message(dev->slave_req_fd, &msg, NULL, 0);
1847         if (ret < 0) {
1848                 RTE_LOG(ERR, VHOST_CONFIG,
1849                                 "Failed to send IOTLB miss message (%d)\n",
1850                                 ret);
1851                 return ret;
1852         }
1853
1854         return 0;
1855 }
1856
1857 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
1858                                                     int index, int fd,
1859                                                     uint64_t offset,
1860                                                     uint64_t size)
1861 {
1862         int *fdp = NULL;
1863         size_t fd_num = 0;
1864         int ret;
1865         struct VhostUserMsg msg = {
1866                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1867                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
1868                 .size = sizeof(msg.payload.area),
1869                 .payload.area = {
1870                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
1871                         .size = size,
1872                         .offset = offset,
1873                 },
1874         };
1875
1876         if (fd < 0)
1877                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1878         else {
1879                 fdp = &fd;
1880                 fd_num = 1;
1881         }
1882
1883         ret = send_vhost_slave_message(dev, &msg, fdp, fd_num);
1884         if (ret < 0) {
1885                 RTE_LOG(ERR, VHOST_CONFIG,
1886                         "Failed to set host notifier (%d)\n", ret);
1887                 return ret;
1888         }
1889
1890         return process_slave_message_reply(dev, &msg);
1891 }
1892
1893 int vhost_user_host_notifier_ctrl(int vid, bool enable)
1894 {
1895         struct virtio_net *dev;
1896         struct rte_vdpa_device *vdpa_dev;
1897         int vfio_device_fd, did, ret = 0;
1898         uint64_t offset, size;
1899         unsigned int i;
1900
1901         dev = get_device(vid);
1902         if (!dev)
1903                 return -ENODEV;
1904
1905         did = dev->vdpa_dev_id;
1906         if (did < 0)
1907                 return -EINVAL;
1908
1909         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
1910             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
1911             !(dev->protocol_features &
1912                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
1913             !(dev->protocol_features &
1914                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
1915             !(dev->protocol_features &
1916                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
1917                 return -ENOTSUP;
1918
1919         vdpa_dev = rte_vdpa_get_device(did);
1920         if (!vdpa_dev)
1921                 return -ENODEV;
1922
1923         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
1924         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
1925
1926         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
1927         if (vfio_device_fd < 0)
1928                 return -ENOTSUP;
1929
1930         if (enable) {
1931                 for (i = 0; i < dev->nr_vring; i++) {
1932                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
1933                                         &size) < 0) {
1934                                 ret = -ENOTSUP;
1935                                 goto disable;
1936                         }
1937
1938                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
1939                                         vfio_device_fd, offset, size) < 0) {
1940                                 ret = -EFAULT;
1941                                 goto disable;
1942                         }
1943                 }
1944         } else {
1945 disable:
1946                 for (i = 0; i < dev->nr_vring; i++) {
1947                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
1948                                         0, 0);
1949                 }
1950         }
1951
1952         return ret;
1953 }