4deceb3e00d6b8b8b08251bb57558f0928f5394e
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/syscall.h>
33 #include <assert.h>
34 #ifdef RTE_LIBRTE_VHOST_NUMA
35 #include <numaif.h>
36 #endif
37 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
38 #include <linux/userfaultfd.h>
39 #endif
40 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
41 #include <linux/memfd.h>
42 #define MEMFD_SUPPORTED
43 #endif
44
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48
49 #include "iotlb.h"
50 #include "vhost.h"
51 #include "vhost_user.h"
52
53 #define VIRTIO_MIN_MTU 68
54 #define VIRTIO_MAX_MTU 65535
55
56 #define INFLIGHT_ALIGNMENT      64
57 #define INFLIGHT_VERSION        0x1
58
59 static const char *vhost_message_str[VHOST_USER_MAX] = {
60         [VHOST_USER_NONE] = "VHOST_USER_NONE",
61         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
62         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
63         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
64         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
65         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
66         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
67         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
68         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
69         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
70         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
71         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
72         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
73         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
74         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
75         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
76         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
77         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
78         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
79         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
80         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
81         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
82         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
83         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
84         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
85         [VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
86         [VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
87         [VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
88         [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
89         [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
90         [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS",
91         [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS",
92 };
93
94 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
95 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
96
97 static void
98 close_msg_fds(struct VhostUserMsg *msg)
99 {
100         int i;
101
102         for (i = 0; i < msg->fd_num; i++)
103                 close(msg->fds[i]);
104 }
105
106 /*
107  * Ensure the expected number of FDs is received,
108  * close all FDs and return an error if this is not the case.
109  */
110 static int
111 validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
112 {
113         if (msg->fd_num == expected_fds)
114                 return 0;
115
116         VHOST_LOG_CONFIG(ERR,
117                 " Expect %d FDs for request %s, received %d\n",
118                 expected_fds,
119                 vhost_message_str[msg->request.master],
120                 msg->fd_num);
121
122         close_msg_fds(msg);
123
124         return -1;
125 }
126
127 static uint64_t
128 get_blk_size(int fd)
129 {
130         struct stat stat;
131         int ret;
132
133         ret = fstat(fd, &stat);
134         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
135 }
136
137 static void
138 free_mem_region(struct virtio_net *dev)
139 {
140         uint32_t i;
141         struct rte_vhost_mem_region *reg;
142
143         if (!dev || !dev->mem)
144                 return;
145
146         for (i = 0; i < dev->mem->nregions; i++) {
147                 reg = &dev->mem->regions[i];
148                 if (reg->host_user_addr) {
149                         munmap(reg->mmap_addr, reg->mmap_size);
150                         close(reg->fd);
151                 }
152         }
153 }
154
155 void
156 vhost_backend_cleanup(struct virtio_net *dev)
157 {
158         if (dev->mem) {
159                 free_mem_region(dev);
160                 rte_free(dev->mem);
161                 dev->mem = NULL;
162         }
163
164         rte_free(dev->guest_pages);
165         dev->guest_pages = NULL;
166
167         if (dev->log_addr) {
168                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
169                 dev->log_addr = 0;
170         }
171
172         if (dev->inflight_info) {
173                 if (dev->inflight_info->addr) {
174                         munmap(dev->inflight_info->addr,
175                                dev->inflight_info->size);
176                         dev->inflight_info->addr = NULL;
177                 }
178
179                 if (dev->inflight_info->fd >= 0) {
180                         close(dev->inflight_info->fd);
181                         dev->inflight_info->fd = -1;
182                 }
183
184                 free(dev->inflight_info);
185                 dev->inflight_info = NULL;
186         }
187
188         if (dev->slave_req_fd >= 0) {
189                 close(dev->slave_req_fd);
190                 dev->slave_req_fd = -1;
191         }
192
193         if (dev->postcopy_ufd >= 0) {
194                 close(dev->postcopy_ufd);
195                 dev->postcopy_ufd = -1;
196         }
197
198         dev->postcopy_listening = 0;
199 }
200
201 static void
202 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
203                               int enable)
204 {
205         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
206         struct vhost_virtqueue *vq = dev->virtqueue[index];
207
208         /* Configure guest notifications on enable */
209         if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
210                 vhost_enable_guest_notification(dev, vq, vq->notif_enable);
211
212         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
213                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
214
215         if (dev->notify_ops->vring_state_changed)
216                 dev->notify_ops->vring_state_changed(dev->vid,
217                                 index, enable);
218 }
219
220 /*
221  * This function just returns success at the moment unless
222  * the device hasn't been initialised.
223  */
224 static int
225 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
226                         struct VhostUserMsg *msg,
227                         int main_fd __rte_unused)
228 {
229         if (validate_msg_fds(msg, 0) != 0)
230                 return RTE_VHOST_MSG_RESULT_ERR;
231
232         return RTE_VHOST_MSG_RESULT_OK;
233 }
234
235 static int
236 vhost_user_reset_owner(struct virtio_net **pdev,
237                         struct VhostUserMsg *msg,
238                         int main_fd __rte_unused)
239 {
240         struct virtio_net *dev = *pdev;
241
242         if (validate_msg_fds(msg, 0) != 0)
243                 return RTE_VHOST_MSG_RESULT_ERR;
244
245         vhost_destroy_device_notify(dev);
246
247         cleanup_device(dev, 0);
248         reset_device(dev);
249         return RTE_VHOST_MSG_RESULT_OK;
250 }
251
252 /*
253  * The features that we support are requested.
254  */
255 static int
256 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
257                         int main_fd __rte_unused)
258 {
259         struct virtio_net *dev = *pdev;
260         uint64_t features = 0;
261
262         if (validate_msg_fds(msg, 0) != 0)
263                 return RTE_VHOST_MSG_RESULT_ERR;
264
265         rte_vhost_driver_get_features(dev->ifname, &features);
266
267         msg->payload.u64 = features;
268         msg->size = sizeof(msg->payload.u64);
269         msg->fd_num = 0;
270
271         return RTE_VHOST_MSG_RESULT_REPLY;
272 }
273
274 /*
275  * The queue number that we support are requested.
276  */
277 static int
278 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
279                         int main_fd __rte_unused)
280 {
281         struct virtio_net *dev = *pdev;
282         uint32_t queue_num = 0;
283
284         if (validate_msg_fds(msg, 0) != 0)
285                 return RTE_VHOST_MSG_RESULT_ERR;
286
287         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
288
289         msg->payload.u64 = (uint64_t)queue_num;
290         msg->size = sizeof(msg->payload.u64);
291         msg->fd_num = 0;
292
293         return RTE_VHOST_MSG_RESULT_REPLY;
294 }
295
296 /*
297  * We receive the negotiated features supported by us and the virtio device.
298  */
299 static int
300 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
301                         int main_fd __rte_unused)
302 {
303         struct virtio_net *dev = *pdev;
304         uint64_t features = msg->payload.u64;
305         uint64_t vhost_features = 0;
306         struct rte_vdpa_device *vdpa_dev;
307
308         if (validate_msg_fds(msg, 0) != 0)
309                 return RTE_VHOST_MSG_RESULT_ERR;
310
311         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
312         if (features & ~vhost_features) {
313                 VHOST_LOG_CONFIG(ERR,
314                         "(%d) received invalid negotiated features.\n",
315                         dev->vid);
316                 dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
317                 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
318
319                 return RTE_VHOST_MSG_RESULT_ERR;
320         }
321
322         if (dev->flags & VIRTIO_DEV_RUNNING) {
323                 if (dev->features == features)
324                         return RTE_VHOST_MSG_RESULT_OK;
325
326                 /*
327                  * Error out if master tries to change features while device is
328                  * in running state. The exception being VHOST_F_LOG_ALL, which
329                  * is enabled when the live-migration starts.
330                  */
331                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
332                         VHOST_LOG_CONFIG(ERR,
333                                 "(%d) features changed while device is running.\n",
334                                 dev->vid);
335                         return RTE_VHOST_MSG_RESULT_ERR;
336                 }
337
338                 if (dev->notify_ops->features_changed)
339                         dev->notify_ops->features_changed(dev->vid, features);
340         }
341
342         dev->features = features;
343         if (dev->features &
344                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
345                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
346         } else {
347                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
348         }
349         VHOST_LOG_CONFIG(INFO,
350                 "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
351         VHOST_LOG_CONFIG(DEBUG,
352                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
353                 dev->vid,
354                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
355                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
356
357         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
358             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
359                 /*
360                  * Remove all but first queue pair if MQ hasn't been
361                  * negotiated. This is safe because the device is not
362                  * running at this stage.
363                  */
364                 while (dev->nr_vring > 2) {
365                         struct vhost_virtqueue *vq;
366
367                         vq = dev->virtqueue[--dev->nr_vring];
368                         if (!vq)
369                                 continue;
370
371                         dev->virtqueue[dev->nr_vring] = NULL;
372                         cleanup_vq(vq, 1);
373                         cleanup_vq_inflight(dev, vq);
374                         free_vq(dev, vq);
375                 }
376         }
377
378         vdpa_dev = dev->vdpa_dev;
379         if (vdpa_dev)
380                 vdpa_dev->ops->set_features(dev->vid);
381
382         dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED;
383         return RTE_VHOST_MSG_RESULT_OK;
384 }
385
386 /*
387  * The virtio device sends us the size of the descriptor ring.
388  */
389 static int
390 vhost_user_set_vring_num(struct virtio_net **pdev,
391                         struct VhostUserMsg *msg,
392                         int main_fd __rte_unused)
393 {
394         struct virtio_net *dev = *pdev;
395         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
396
397         if (validate_msg_fds(msg, 0) != 0)
398                 return RTE_VHOST_MSG_RESULT_ERR;
399
400         vq->size = msg->payload.state.num;
401
402         /* VIRTIO 1.0, 2.4 Virtqueues says:
403          *
404          *   Queue Size value is always a power of 2. The maximum Queue Size
405          *   value is 32768.
406          *
407          * VIRTIO 1.1 2.7 Virtqueues says:
408          *
409          *   Packed virtqueues support up to 2^15 entries each.
410          */
411         if (!vq_is_packed(dev)) {
412                 if (vq->size & (vq->size - 1)) {
413                         VHOST_LOG_CONFIG(ERR,
414                                 "invalid virtqueue size %u\n", vq->size);
415                         return RTE_VHOST_MSG_RESULT_ERR;
416                 }
417         }
418
419         if (vq->size > 32768) {
420                 VHOST_LOG_CONFIG(ERR,
421                         "invalid virtqueue size %u\n", vq->size);
422                 return RTE_VHOST_MSG_RESULT_ERR;
423         }
424
425         if (vq_is_packed(dev)) {
426                 if (vq->shadow_used_packed)
427                         rte_free(vq->shadow_used_packed);
428                 vq->shadow_used_packed = rte_malloc(NULL,
429                                 vq->size *
430                                 sizeof(struct vring_used_elem_packed),
431                                 RTE_CACHE_LINE_SIZE);
432                 if (!vq->shadow_used_packed) {
433                         VHOST_LOG_CONFIG(ERR,
434                                         "failed to allocate memory for shadow used ring.\n");
435                         return RTE_VHOST_MSG_RESULT_ERR;
436                 }
437
438         } else {
439                 if (vq->shadow_used_split)
440                         rte_free(vq->shadow_used_split);
441
442                 vq->shadow_used_split = rte_malloc(NULL,
443                                 vq->size * sizeof(struct vring_used_elem),
444                                 RTE_CACHE_LINE_SIZE);
445
446                 if (!vq->shadow_used_split) {
447                         VHOST_LOG_CONFIG(ERR,
448                                         "failed to allocate memory for vq internal data.\n");
449                         return RTE_VHOST_MSG_RESULT_ERR;
450                 }
451         }
452
453         if (vq->batch_copy_elems)
454                 rte_free(vq->batch_copy_elems);
455         vq->batch_copy_elems = rte_malloc(NULL,
456                                 vq->size * sizeof(struct batch_copy_elem),
457                                 RTE_CACHE_LINE_SIZE);
458         if (!vq->batch_copy_elems) {
459                 VHOST_LOG_CONFIG(ERR,
460                         "failed to allocate memory for batching copy.\n");
461                 return RTE_VHOST_MSG_RESULT_ERR;
462         }
463
464         return RTE_VHOST_MSG_RESULT_OK;
465 }
466
467 /*
468  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
469  * same numa node as the memory of vring descriptor.
470  */
471 #ifdef RTE_LIBRTE_VHOST_NUMA
472 static struct virtio_net*
473 numa_realloc(struct virtio_net *dev, int index)
474 {
475         int oldnode, newnode;
476         struct virtio_net *old_dev;
477         struct vhost_virtqueue *old_vq, *vq;
478         struct vring_used_elem *new_shadow_used_split;
479         struct vring_used_elem_packed *new_shadow_used_packed;
480         struct batch_copy_elem *new_batch_copy_elems;
481         int ret;
482
483         if (dev->flags & VIRTIO_DEV_RUNNING)
484                 return dev;
485
486         old_dev = dev;
487         vq = old_vq = dev->virtqueue[index];
488
489         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
490                             MPOL_F_NODE | MPOL_F_ADDR);
491
492         /* check if we need to reallocate vq */
493         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
494                              MPOL_F_NODE | MPOL_F_ADDR);
495         if (ret) {
496                 VHOST_LOG_CONFIG(ERR,
497                         "Unable to get vq numa information.\n");
498                 return dev;
499         }
500         if (oldnode != newnode) {
501                 VHOST_LOG_CONFIG(INFO,
502                         "reallocate vq from %d to %d node\n", oldnode, newnode);
503                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
504                 if (!vq)
505                         return dev;
506
507                 memcpy(vq, old_vq, sizeof(*vq));
508
509                 if (vq_is_packed(dev)) {
510                         new_shadow_used_packed = rte_malloc_socket(NULL,
511                                         vq->size *
512                                         sizeof(struct vring_used_elem_packed),
513                                         RTE_CACHE_LINE_SIZE,
514                                         newnode);
515                         if (new_shadow_used_packed) {
516                                 rte_free(vq->shadow_used_packed);
517                                 vq->shadow_used_packed = new_shadow_used_packed;
518                         }
519                 } else {
520                         new_shadow_used_split = rte_malloc_socket(NULL,
521                                         vq->size *
522                                         sizeof(struct vring_used_elem),
523                                         RTE_CACHE_LINE_SIZE,
524                                         newnode);
525                         if (new_shadow_used_split) {
526                                 rte_free(vq->shadow_used_split);
527                                 vq->shadow_used_split = new_shadow_used_split;
528                         }
529                 }
530
531                 new_batch_copy_elems = rte_malloc_socket(NULL,
532                         vq->size * sizeof(struct batch_copy_elem),
533                         RTE_CACHE_LINE_SIZE,
534                         newnode);
535                 if (new_batch_copy_elems) {
536                         rte_free(vq->batch_copy_elems);
537                         vq->batch_copy_elems = new_batch_copy_elems;
538                 }
539
540                 rte_free(old_vq);
541         }
542
543         /* check if we need to reallocate dev */
544         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
545                             MPOL_F_NODE | MPOL_F_ADDR);
546         if (ret) {
547                 VHOST_LOG_CONFIG(ERR,
548                         "Unable to get dev numa information.\n");
549                 goto out;
550         }
551         if (oldnode != newnode) {
552                 VHOST_LOG_CONFIG(INFO,
553                         "reallocate dev from %d to %d node\n",
554                         oldnode, newnode);
555                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
556                 if (!dev) {
557                         dev = old_dev;
558                         goto out;
559                 }
560
561                 memcpy(dev, old_dev, sizeof(*dev));
562                 rte_free(old_dev);
563         }
564
565 out:
566         dev->virtqueue[index] = vq;
567         vhost_devices[dev->vid] = dev;
568
569         if (old_vq != vq)
570                 vhost_user_iotlb_init(dev, index);
571
572         return dev;
573 }
574 #else
575 static struct virtio_net*
576 numa_realloc(struct virtio_net *dev, int index __rte_unused)
577 {
578         return dev;
579 }
580 #endif
581
582 /* Converts QEMU virtual address to Vhost virtual address. */
583 static uint64_t
584 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
585 {
586         struct rte_vhost_mem_region *r;
587         uint32_t i;
588
589         if (unlikely(!dev || !dev->mem))
590                 goto out_error;
591
592         /* Find the region where the address lives. */
593         for (i = 0; i < dev->mem->nregions; i++) {
594                 r = &dev->mem->regions[i];
595
596                 if (qva >= r->guest_user_addr &&
597                     qva <  r->guest_user_addr + r->size) {
598
599                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
600                                 *len = r->guest_user_addr + r->size - qva;
601
602                         return qva - r->guest_user_addr +
603                                r->host_user_addr;
604                 }
605         }
606 out_error:
607         *len = 0;
608
609         return 0;
610 }
611
612
613 /*
614  * Converts ring address to Vhost virtual address.
615  * If IOMMU is enabled, the ring address is a guest IO virtual address,
616  * else it is a QEMU virtual address.
617  */
618 static uint64_t
619 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
620                 uint64_t ra, uint64_t *size)
621 {
622         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
623                 uint64_t vva;
624
625                 vhost_user_iotlb_rd_lock(vq);
626                 vva = vhost_iova_to_vva(dev, vq, ra,
627                                         size, VHOST_ACCESS_RW);
628                 vhost_user_iotlb_rd_unlock(vq);
629
630                 return vva;
631         }
632
633         return qva_to_vva(dev, ra, size);
634 }
635
636 static uint64_t
637 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
638 {
639         uint64_t log_gpa;
640
641         vhost_user_iotlb_rd_lock(vq);
642         log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
643         vhost_user_iotlb_rd_unlock(vq);
644
645         return log_gpa;
646 }
647
648 static struct virtio_net *
649 translate_ring_addresses(struct virtio_net *dev, int vq_index)
650 {
651         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
652         struct vhost_vring_addr *addr = &vq->ring_addrs;
653         uint64_t len, expected_len;
654
655         if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
656                 vq->log_guest_addr =
657                         log_addr_to_gpa(dev, vq);
658                 if (vq->log_guest_addr == 0) {
659                         VHOST_LOG_CONFIG(DEBUG,
660                                 "(%d) failed to map log_guest_addr.\n",
661                                 dev->vid);
662                         return dev;
663                 }
664         }
665
666         if (vq_is_packed(dev)) {
667                 len = sizeof(struct vring_packed_desc) * vq->size;
668                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
669                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
670                 if (vq->desc_packed == NULL ||
671                                 len != sizeof(struct vring_packed_desc) *
672                                 vq->size) {
673                         VHOST_LOG_CONFIG(DEBUG,
674                                 "(%d) failed to map desc_packed ring.\n",
675                                 dev->vid);
676                         return dev;
677                 }
678
679                 dev = numa_realloc(dev, vq_index);
680                 vq = dev->virtqueue[vq_index];
681                 addr = &vq->ring_addrs;
682
683                 len = sizeof(struct vring_packed_desc_event);
684                 vq->driver_event = (struct vring_packed_desc_event *)
685                                         (uintptr_t)ring_addr_to_vva(dev,
686                                         vq, addr->avail_user_addr, &len);
687                 if (vq->driver_event == NULL ||
688                                 len != sizeof(struct vring_packed_desc_event)) {
689                         VHOST_LOG_CONFIG(DEBUG,
690                                 "(%d) failed to find driver area address.\n",
691                                 dev->vid);
692                         return dev;
693                 }
694
695                 len = sizeof(struct vring_packed_desc_event);
696                 vq->device_event = (struct vring_packed_desc_event *)
697                                         (uintptr_t)ring_addr_to_vva(dev,
698                                         vq, addr->used_user_addr, &len);
699                 if (vq->device_event == NULL ||
700                                 len != sizeof(struct vring_packed_desc_event)) {
701                         VHOST_LOG_CONFIG(DEBUG,
702                                 "(%d) failed to find device area address.\n",
703                                 dev->vid);
704                         return dev;
705                 }
706
707                 vq->access_ok = 1;
708                 return dev;
709         }
710
711         /* The addresses are converted from QEMU virtual to Vhost virtual. */
712         if (vq->desc && vq->avail && vq->used)
713                 return dev;
714
715         len = sizeof(struct vring_desc) * vq->size;
716         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
717                         vq, addr->desc_user_addr, &len);
718         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
719                 VHOST_LOG_CONFIG(DEBUG,
720                         "(%d) failed to map desc ring.\n",
721                         dev->vid);
722                 return dev;
723         }
724
725         dev = numa_realloc(dev, vq_index);
726         vq = dev->virtqueue[vq_index];
727         addr = &vq->ring_addrs;
728
729         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
730         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
731                 len += sizeof(uint16_t);
732         expected_len = len;
733         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
734                         vq, addr->avail_user_addr, &len);
735         if (vq->avail == 0 || len != expected_len) {
736                 VHOST_LOG_CONFIG(DEBUG,
737                         "(%d) failed to map avail ring.\n",
738                         dev->vid);
739                 return dev;
740         }
741
742         len = sizeof(struct vring_used) +
743                 sizeof(struct vring_used_elem) * vq->size;
744         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
745                 len += sizeof(uint16_t);
746         expected_len = len;
747         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
748                         vq, addr->used_user_addr, &len);
749         if (vq->used == 0 || len != expected_len) {
750                 VHOST_LOG_CONFIG(DEBUG,
751                         "(%d) failed to map used ring.\n",
752                         dev->vid);
753                 return dev;
754         }
755
756         if (vq->last_used_idx != vq->used->idx) {
757                 VHOST_LOG_CONFIG(WARNING,
758                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
759                         "some packets maybe resent for Tx and dropped for Rx\n",
760                         vq->last_used_idx, vq->used->idx);
761                 vq->last_used_idx  = vq->used->idx;
762                 vq->last_avail_idx = vq->used->idx;
763         }
764
765         vq->access_ok = 1;
766
767         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
768                         dev->vid, vq->desc);
769         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address avail: %p\n",
770                         dev->vid, vq->avail);
771         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address used: %p\n",
772                         dev->vid, vq->used);
773         VHOST_LOG_CONFIG(DEBUG, "(%d) log_guest_addr: %" PRIx64 "\n",
774                         dev->vid, vq->log_guest_addr);
775
776         return dev;
777 }
778
779 /*
780  * The virtio device sends us the desc, used and avail ring addresses.
781  * This function then converts these to our address space.
782  */
783 static int
784 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
785                         int main_fd __rte_unused)
786 {
787         struct virtio_net *dev = *pdev;
788         struct vhost_virtqueue *vq;
789         struct vhost_vring_addr *addr = &msg->payload.addr;
790         bool access_ok;
791
792         if (validate_msg_fds(msg, 0) != 0)
793                 return RTE_VHOST_MSG_RESULT_ERR;
794
795         if (dev->mem == NULL)
796                 return RTE_VHOST_MSG_RESULT_ERR;
797
798         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
799         vq = dev->virtqueue[msg->payload.addr.index];
800
801         access_ok = vq->access_ok;
802
803         /*
804          * Rings addresses should not be interpreted as long as the ring is not
805          * started and enabled
806          */
807         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
808
809         vring_invalidate(dev, vq);
810
811         if ((vq->enabled && (dev->features &
812                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
813                         access_ok) {
814                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
815                 if (!dev)
816                         return RTE_VHOST_MSG_RESULT_ERR;
817
818                 *pdev = dev;
819         }
820
821         return RTE_VHOST_MSG_RESULT_OK;
822 }
823
824 /*
825  * The virtio device sends us the available ring last used index.
826  */
827 static int
828 vhost_user_set_vring_base(struct virtio_net **pdev,
829                         struct VhostUserMsg *msg,
830                         int main_fd __rte_unused)
831 {
832         struct virtio_net *dev = *pdev;
833         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
834         uint64_t val = msg->payload.state.num;
835
836         if (validate_msg_fds(msg, 0) != 0)
837                 return RTE_VHOST_MSG_RESULT_ERR;
838
839         if (vq_is_packed(dev)) {
840                 /*
841                  * Bit[0:14]: avail index
842                  * Bit[15]: avail wrap counter
843                  */
844                 vq->last_avail_idx = val & 0x7fff;
845                 vq->avail_wrap_counter = !!(val & (0x1 << 15));
846                 /*
847                  * Set used index to same value as available one, as
848                  * their values should be the same since ring processing
849                  * was stopped at get time.
850                  */
851                 vq->last_used_idx = vq->last_avail_idx;
852                 vq->used_wrap_counter = vq->avail_wrap_counter;
853         } else {
854                 vq->last_used_idx = msg->payload.state.num;
855                 vq->last_avail_idx = msg->payload.state.num;
856         }
857
858         return RTE_VHOST_MSG_RESULT_OK;
859 }
860
861 static int
862 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
863                    uint64_t host_phys_addr, uint64_t size)
864 {
865         struct guest_page *page, *last_page;
866         struct guest_page *old_pages;
867
868         if (dev->nr_guest_pages == dev->max_guest_pages) {
869                 dev->max_guest_pages *= 2;
870                 old_pages = dev->guest_pages;
871                 dev->guest_pages = rte_realloc(dev->guest_pages,
872                                         dev->max_guest_pages * sizeof(*page),
873                                         RTE_CACHE_LINE_SIZE);
874                 if (dev->guest_pages == NULL) {
875                         VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
876                         rte_free(old_pages);
877                         return -1;
878                 }
879         }
880
881         if (dev->nr_guest_pages > 0) {
882                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
883                 /* merge if the two pages are continuous */
884                 if (host_phys_addr == last_page->host_phys_addr +
885                                       last_page->size) {
886                         last_page->size += size;
887                         return 0;
888                 }
889         }
890
891         page = &dev->guest_pages[dev->nr_guest_pages++];
892         page->guest_phys_addr = guest_phys_addr;
893         page->host_phys_addr  = host_phys_addr;
894         page->size = size;
895
896         return 0;
897 }
898
899 static int
900 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
901                 uint64_t page_size)
902 {
903         uint64_t reg_size = reg->size;
904         uint64_t host_user_addr  = reg->host_user_addr;
905         uint64_t guest_phys_addr = reg->guest_phys_addr;
906         uint64_t host_phys_addr;
907         uint64_t size;
908
909         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
910         size = page_size - (guest_phys_addr & (page_size - 1));
911         size = RTE_MIN(size, reg_size);
912
913         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
914                 return -1;
915
916         host_user_addr  += size;
917         guest_phys_addr += size;
918         reg_size -= size;
919
920         while (reg_size > 0) {
921                 size = RTE_MIN(reg_size, page_size);
922                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
923                                                   host_user_addr);
924                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
925                                 size) < 0)
926                         return -1;
927
928                 host_user_addr  += size;
929                 guest_phys_addr += size;
930                 reg_size -= size;
931         }
932
933         /* sort guest page array if over binary search threshold */
934         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
935                 qsort((void *)dev->guest_pages, dev->nr_guest_pages,
936                         sizeof(struct guest_page), guest_page_addrcmp);
937         }
938
939         return 0;
940 }
941
942 #ifdef RTE_LIBRTE_VHOST_DEBUG
943 /* TODO: enable it only in debug mode? */
944 static void
945 dump_guest_pages(struct virtio_net *dev)
946 {
947         uint32_t i;
948         struct guest_page *page;
949
950         for (i = 0; i < dev->nr_guest_pages; i++) {
951                 page = &dev->guest_pages[i];
952
953                 VHOST_LOG_CONFIG(INFO,
954                         "guest physical page region %u\n"
955                         "\t guest_phys_addr: %" PRIx64 "\n"
956                         "\t host_phys_addr : %" PRIx64 "\n"
957                         "\t size           : %" PRIx64 "\n",
958                         i,
959                         page->guest_phys_addr,
960                         page->host_phys_addr,
961                         page->size);
962         }
963 }
964 #else
965 #define dump_guest_pages(dev)
966 #endif
967
968 static bool
969 vhost_memory_changed(struct VhostUserMemory *new,
970                      struct rte_vhost_memory *old)
971 {
972         uint32_t i;
973
974         if (new->nregions != old->nregions)
975                 return true;
976
977         for (i = 0; i < new->nregions; ++i) {
978                 VhostUserMemoryRegion *new_r = &new->regions[i];
979                 struct rte_vhost_mem_region *old_r = &old->regions[i];
980
981                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
982                         return true;
983                 if (new_r->memory_size != old_r->size)
984                         return true;
985                 if (new_r->userspace_addr != old_r->guest_user_addr)
986                         return true;
987         }
988
989         return false;
990 }
991
992 static int
993 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
994                         int main_fd)
995 {
996         struct virtio_net *dev = *pdev;
997         struct VhostUserMemory *memory = &msg->payload.memory;
998         struct rte_vhost_mem_region *reg;
999         void *mmap_addr;
1000         uint64_t mmap_size;
1001         uint64_t mmap_offset;
1002         uint64_t alignment;
1003         uint32_t i;
1004         int populate;
1005         int fd;
1006
1007         if (validate_msg_fds(msg, memory->nregions) != 0)
1008                 return RTE_VHOST_MSG_RESULT_ERR;
1009
1010         if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
1011                 VHOST_LOG_CONFIG(ERR,
1012                         "too many memory regions (%u)\n", memory->nregions);
1013                 return RTE_VHOST_MSG_RESULT_ERR;
1014         }
1015
1016         if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1017                 VHOST_LOG_CONFIG(INFO,
1018                         "(%d) memory regions not changed\n", dev->vid);
1019
1020                 close_msg_fds(msg);
1021
1022                 return RTE_VHOST_MSG_RESULT_OK;
1023         }
1024
1025         if (dev->mem) {
1026                 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1027                         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1028
1029                         if (vdpa_dev && vdpa_dev->ops->dev_close)
1030                                 vdpa_dev->ops->dev_close(dev->vid);
1031                         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1032                 }
1033                 free_mem_region(dev);
1034                 rte_free(dev->mem);
1035                 dev->mem = NULL;
1036         }
1037
1038         /* Flush IOTLB cache as previous HVAs are now invalid */
1039         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1040                 for (i = 0; i < dev->nr_vring; i++)
1041                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
1042
1043         dev->nr_guest_pages = 0;
1044         if (dev->guest_pages == NULL) {
1045                 dev->max_guest_pages = 8;
1046                 dev->guest_pages = rte_zmalloc(NULL,
1047                                         dev->max_guest_pages *
1048                                         sizeof(struct guest_page),
1049                                         RTE_CACHE_LINE_SIZE);
1050                 if (dev->guest_pages == NULL) {
1051                         VHOST_LOG_CONFIG(ERR,
1052                                 "(%d) failed to allocate memory "
1053                                 "for dev->guest_pages\n",
1054                                 dev->vid);
1055                         return RTE_VHOST_MSG_RESULT_ERR;
1056                 }
1057         }
1058
1059         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1060                 sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
1061         if (dev->mem == NULL) {
1062                 VHOST_LOG_CONFIG(ERR,
1063                         "(%d) failed to allocate memory for dev->mem\n",
1064                         dev->vid);
1065                 return RTE_VHOST_MSG_RESULT_ERR;
1066         }
1067         dev->mem->nregions = memory->nregions;
1068
1069         for (i = 0; i < memory->nregions; i++) {
1070                 fd  = msg->fds[i];
1071                 reg = &dev->mem->regions[i];
1072
1073                 reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
1074                 reg->guest_user_addr = memory->regions[i].userspace_addr;
1075                 reg->size            = memory->regions[i].memory_size;
1076                 reg->fd              = fd;
1077
1078                 mmap_offset = memory->regions[i].mmap_offset;
1079
1080                 /* Check for memory_size + mmap_offset overflow */
1081                 if (mmap_offset >= -reg->size) {
1082                         VHOST_LOG_CONFIG(ERR,
1083                                 "mmap_offset (%#"PRIx64") and memory_size "
1084                                 "(%#"PRIx64") overflow\n",
1085                                 mmap_offset, reg->size);
1086                         goto err_mmap;
1087                 }
1088
1089                 mmap_size = reg->size + mmap_offset;
1090
1091                 /* mmap() without flag of MAP_ANONYMOUS, should be called
1092                  * with length argument aligned with hugepagesz at older
1093                  * longterm version Linux, like 2.6.32 and 3.2.72, or
1094                  * mmap() will fail with EINVAL.
1095                  *
1096                  * to avoid failure, make sure in caller to keep length
1097                  * aligned.
1098                  */
1099                 alignment = get_blk_size(fd);
1100                 if (alignment == (uint64_t)-1) {
1101                         VHOST_LOG_CONFIG(ERR,
1102                                 "couldn't get hugepage size through fstat\n");
1103                         goto err_mmap;
1104                 }
1105                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
1106                 if (mmap_size == 0) {
1107                         /*
1108                          * It could happen if initial mmap_size + alignment
1109                          * overflows the sizeof uint64, which could happen if
1110                          * either mmap_size or alignment value is wrong.
1111                          *
1112                          * mmap() kernel implementation would return an error,
1113                          * but better catch it before and provide useful info
1114                          * in the logs.
1115                          */
1116                         VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
1117                                         "or alignment (0x%" PRIx64 ") is invalid\n",
1118                                         reg->size + mmap_offset, alignment);
1119                         goto err_mmap;
1120                 }
1121
1122                 populate = dev->async_copy ? MAP_POPULATE : 0;
1123                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
1124                                  MAP_SHARED | populate, fd, 0);
1125
1126                 if (mmap_addr == MAP_FAILED) {
1127                         VHOST_LOG_CONFIG(ERR,
1128                                 "mmap region %u failed.\n", i);
1129                         goto err_mmap;
1130                 }
1131
1132                 reg->mmap_addr = mmap_addr;
1133                 reg->mmap_size = mmap_size;
1134                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
1135                                       mmap_offset;
1136
1137                 if (dev->async_copy)
1138                         if (add_guest_pages(dev, reg, alignment) < 0) {
1139                                 VHOST_LOG_CONFIG(ERR,
1140                                         "adding guest pages to region %u failed.\n",
1141                                         i);
1142                                 goto err_mmap;
1143                         }
1144
1145                 VHOST_LOG_CONFIG(INFO,
1146                         "guest memory region %u, size: 0x%" PRIx64 "\n"
1147                         "\t guest physical addr: 0x%" PRIx64 "\n"
1148                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
1149                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
1150                         "\t mmap addr : 0x%" PRIx64 "\n"
1151                         "\t mmap size : 0x%" PRIx64 "\n"
1152                         "\t mmap align: 0x%" PRIx64 "\n"
1153                         "\t mmap off  : 0x%" PRIx64 "\n",
1154                         i, reg->size,
1155                         reg->guest_phys_addr,
1156                         reg->guest_user_addr,
1157                         reg->host_user_addr,
1158                         (uint64_t)(uintptr_t)mmap_addr,
1159                         mmap_size,
1160                         alignment,
1161                         mmap_offset);
1162
1163                 if (dev->postcopy_listening) {
1164                         /*
1165                          * We haven't a better way right now than sharing
1166                          * DPDK's virtual address with Qemu, so that Qemu can
1167                          * retrieve the region offset when handling userfaults.
1168                          */
1169                         memory->regions[i].userspace_addr =
1170                                 reg->host_user_addr;
1171                 }
1172         }
1173         if (dev->postcopy_listening) {
1174                 /* Send the addresses back to qemu */
1175                 msg->fd_num = 0;
1176                 send_vhost_reply(main_fd, msg);
1177
1178                 /* Wait for qemu to acknolwedge it's got the addresses
1179                  * we've got to wait before we're allowed to generate faults.
1180                  */
1181                 VhostUserMsg ack_msg;
1182                 if (read_vhost_message(main_fd, &ack_msg) <= 0) {
1183                         VHOST_LOG_CONFIG(ERR,
1184                                 "Failed to read qemu ack on postcopy set-mem-table\n");
1185                         goto err_mmap;
1186                 }
1187
1188                 if (validate_msg_fds(&ack_msg, 0) != 0)
1189                         goto err_mmap;
1190
1191                 if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
1192                         VHOST_LOG_CONFIG(ERR,
1193                                 "Bad qemu ack on postcopy set-mem-table (%d)\n",
1194                                 ack_msg.request.master);
1195                         goto err_mmap;
1196                 }
1197
1198                 /* Now userfault register and we can use the memory */
1199                 for (i = 0; i < memory->nregions; i++) {
1200 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
1201                         reg = &dev->mem->regions[i];
1202                         struct uffdio_register reg_struct;
1203
1204                         /*
1205                          * Let's register all the mmap'ed area to ensure
1206                          * alignment on page boundary.
1207                          */
1208                         reg_struct.range.start =
1209                                 (uint64_t)(uintptr_t)reg->mmap_addr;
1210                         reg_struct.range.len = reg->mmap_size;
1211                         reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
1212
1213                         if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1214                                                 &reg_struct)) {
1215                                 VHOST_LOG_CONFIG(ERR,
1216                                         "Failed to register ufd for region %d: (ufd = %d) %s\n",
1217                                         i, dev->postcopy_ufd,
1218                                         strerror(errno));
1219                                 goto err_mmap;
1220                         }
1221                         VHOST_LOG_CONFIG(INFO,
1222                                 "\t userfaultfd registered for range : "
1223                                 "%" PRIx64 " - %" PRIx64 "\n",
1224                                 (uint64_t)reg_struct.range.start,
1225                                 (uint64_t)reg_struct.range.start +
1226                                 (uint64_t)reg_struct.range.len - 1);
1227 #else
1228                         goto err_mmap;
1229 #endif
1230                 }
1231         }
1232
1233         for (i = 0; i < dev->nr_vring; i++) {
1234                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1235
1236                 if (vq->desc || vq->avail || vq->used) {
1237                         /*
1238                          * If the memory table got updated, the ring addresses
1239                          * need to be translated again as virtual addresses have
1240                          * changed.
1241                          */
1242                         vring_invalidate(dev, vq);
1243
1244                         dev = translate_ring_addresses(dev, i);
1245                         if (!dev) {
1246                                 dev = *pdev;
1247                                 goto err_mmap;
1248                         }
1249
1250                         *pdev = dev;
1251                 }
1252         }
1253
1254         dump_guest_pages(dev);
1255
1256         return RTE_VHOST_MSG_RESULT_OK;
1257
1258 err_mmap:
1259         free_mem_region(dev);
1260         rte_free(dev->mem);
1261         dev->mem = NULL;
1262         return RTE_VHOST_MSG_RESULT_ERR;
1263 }
1264
1265 static bool
1266 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1267 {
1268         bool rings_ok;
1269
1270         if (!vq)
1271                 return false;
1272
1273         if (vq_is_packed(dev))
1274                 rings_ok = vq->desc_packed && vq->driver_event &&
1275                         vq->device_event;
1276         else
1277                 rings_ok = vq->desc && vq->avail && vq->used;
1278
1279         return rings_ok &&
1280                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1281                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1282                vq->enabled;
1283 }
1284
1285 #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u
1286
1287 static int
1288 virtio_is_ready(struct virtio_net *dev)
1289 {
1290         struct vhost_virtqueue *vq;
1291         uint32_t i, nr_vring = dev->nr_vring;
1292
1293         if (dev->flags & VIRTIO_DEV_READY)
1294                 return 1;
1295
1296         if (!dev->nr_vring)
1297                 return 0;
1298
1299         if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) {
1300                 nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY;
1301
1302                 if (dev->nr_vring < nr_vring)
1303                         return 0;
1304         }
1305
1306         for (i = 0; i < nr_vring; i++) {
1307                 vq = dev->virtqueue[i];
1308
1309                 if (!vq_is_ready(dev, vq))
1310                         return 0;
1311         }
1312
1313         /* If supported, ensure the frontend is really done with config */
1314         if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
1315                 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK))
1316                         return 0;
1317
1318         dev->flags |= VIRTIO_DEV_READY;
1319
1320         if (!(dev->flags & VIRTIO_DEV_RUNNING))
1321                 VHOST_LOG_CONFIG(INFO,
1322                         "virtio is now ready for processing.\n");
1323         return 1;
1324 }
1325
1326 static void *
1327 inflight_mem_alloc(const char *name, size_t size, int *fd)
1328 {
1329         void *ptr;
1330         int mfd = -1;
1331         char fname[20] = "/tmp/memfd-XXXXXX";
1332
1333         *fd = -1;
1334 #ifdef MEMFD_SUPPORTED
1335         mfd = memfd_create(name, MFD_CLOEXEC);
1336 #else
1337         RTE_SET_USED(name);
1338 #endif
1339         if (mfd == -1) {
1340                 mfd = mkstemp(fname);
1341                 if (mfd == -1) {
1342                         VHOST_LOG_CONFIG(ERR,
1343                                 "failed to get inflight buffer fd\n");
1344                         return NULL;
1345                 }
1346
1347                 unlink(fname);
1348         }
1349
1350         if (ftruncate(mfd, size) == -1) {
1351                 VHOST_LOG_CONFIG(ERR,
1352                         "failed to alloc inflight buffer\n");
1353                 close(mfd);
1354                 return NULL;
1355         }
1356
1357         ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
1358         if (ptr == MAP_FAILED) {
1359                 VHOST_LOG_CONFIG(ERR,
1360                         "failed to mmap inflight buffer\n");
1361                 close(mfd);
1362                 return NULL;
1363         }
1364
1365         *fd = mfd;
1366         return ptr;
1367 }
1368
1369 static uint32_t
1370 get_pervq_shm_size_split(uint16_t queue_size)
1371 {
1372         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
1373                                   queue_size + sizeof(uint64_t) +
1374                                   sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
1375 }
1376
1377 static uint32_t
1378 get_pervq_shm_size_packed(uint16_t queue_size)
1379 {
1380         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
1381                                   * queue_size + sizeof(uint64_t) +
1382                                   sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
1383                                   INFLIGHT_ALIGNMENT);
1384 }
1385
1386 static int
1387 vhost_user_get_inflight_fd(struct virtio_net **pdev,
1388                            VhostUserMsg *msg,
1389                            int main_fd __rte_unused)
1390 {
1391         struct rte_vhost_inflight_info_packed *inflight_packed;
1392         uint64_t pervq_inflight_size, mmap_size;
1393         uint16_t num_queues, queue_size;
1394         struct virtio_net *dev = *pdev;
1395         int fd, i, j;
1396         void *addr;
1397
1398         if (msg->size != sizeof(msg->payload.inflight)) {
1399                 VHOST_LOG_CONFIG(ERR,
1400                         "invalid get_inflight_fd message size is %d\n",
1401                         msg->size);
1402                 return RTE_VHOST_MSG_RESULT_ERR;
1403         }
1404
1405         if (dev->inflight_info == NULL) {
1406                 dev->inflight_info = calloc(1,
1407                                             sizeof(struct inflight_mem_info));
1408                 if (!dev->inflight_info) {
1409                         VHOST_LOG_CONFIG(ERR,
1410                                 "failed to alloc dev inflight area\n");
1411                         return RTE_VHOST_MSG_RESULT_ERR;
1412                 }
1413                 dev->inflight_info->fd = -1;
1414         }
1415
1416         num_queues = msg->payload.inflight.num_queues;
1417         queue_size = msg->payload.inflight.queue_size;
1418
1419         VHOST_LOG_CONFIG(INFO, "get_inflight_fd num_queues: %u\n",
1420                 msg->payload.inflight.num_queues);
1421         VHOST_LOG_CONFIG(INFO, "get_inflight_fd queue_size: %u\n",
1422                 msg->payload.inflight.queue_size);
1423
1424         if (vq_is_packed(dev))
1425                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1426         else
1427                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1428
1429         mmap_size = num_queues * pervq_inflight_size;
1430         addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
1431         if (!addr) {
1432                 VHOST_LOG_CONFIG(ERR,
1433                         "failed to alloc vhost inflight area\n");
1434                         msg->payload.inflight.mmap_size = 0;
1435                 return RTE_VHOST_MSG_RESULT_ERR;
1436         }
1437         memset(addr, 0, mmap_size);
1438
1439         if (dev->inflight_info->addr) {
1440                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1441                 dev->inflight_info->addr = NULL;
1442         }
1443
1444         if (dev->inflight_info->fd >= 0) {
1445                 close(dev->inflight_info->fd);
1446                 dev->inflight_info->fd = -1;
1447         }
1448
1449         dev->inflight_info->addr = addr;
1450         dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
1451         dev->inflight_info->fd = msg->fds[0] = fd;
1452         msg->payload.inflight.mmap_offset = 0;
1453         msg->fd_num = 1;
1454
1455         if (vq_is_packed(dev)) {
1456                 for (i = 0; i < num_queues; i++) {
1457                         inflight_packed =
1458                                 (struct rte_vhost_inflight_info_packed *)addr;
1459                         inflight_packed->used_wrap_counter = 1;
1460                         inflight_packed->old_used_wrap_counter = 1;
1461                         for (j = 0; j < queue_size; j++)
1462                                 inflight_packed->desc[j].next = j + 1;
1463                         addr = (void *)((char *)addr + pervq_inflight_size);
1464                 }
1465         }
1466
1467         VHOST_LOG_CONFIG(INFO,
1468                 "send inflight mmap_size: %"PRIu64"\n",
1469                 msg->payload.inflight.mmap_size);
1470         VHOST_LOG_CONFIG(INFO,
1471                 "send inflight mmap_offset: %"PRIu64"\n",
1472                 msg->payload.inflight.mmap_offset);
1473         VHOST_LOG_CONFIG(INFO,
1474                 "send inflight fd: %d\n", msg->fds[0]);
1475
1476         return RTE_VHOST_MSG_RESULT_REPLY;
1477 }
1478
1479 static int
1480 vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
1481                            int main_fd __rte_unused)
1482 {
1483         uint64_t mmap_size, mmap_offset;
1484         uint16_t num_queues, queue_size;
1485         struct virtio_net *dev = *pdev;
1486         uint32_t pervq_inflight_size;
1487         struct vhost_virtqueue *vq;
1488         void *addr;
1489         int fd, i;
1490
1491         fd = msg->fds[0];
1492         if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
1493                 VHOST_LOG_CONFIG(ERR,
1494                         "invalid set_inflight_fd message size is %d,fd is %d\n",
1495                         msg->size, fd);
1496                 return RTE_VHOST_MSG_RESULT_ERR;
1497         }
1498
1499         mmap_size = msg->payload.inflight.mmap_size;
1500         mmap_offset = msg->payload.inflight.mmap_offset;
1501         num_queues = msg->payload.inflight.num_queues;
1502         queue_size = msg->payload.inflight.queue_size;
1503
1504         if (vq_is_packed(dev))
1505                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1506         else
1507                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1508
1509         VHOST_LOG_CONFIG(INFO,
1510                 "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
1511         VHOST_LOG_CONFIG(INFO,
1512                 "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
1513         VHOST_LOG_CONFIG(INFO,
1514                 "set_inflight_fd num_queues: %u\n", num_queues);
1515         VHOST_LOG_CONFIG(INFO,
1516                 "set_inflight_fd queue_size: %u\n", queue_size);
1517         VHOST_LOG_CONFIG(INFO,
1518                 "set_inflight_fd fd: %d\n", fd);
1519         VHOST_LOG_CONFIG(INFO,
1520                 "set_inflight_fd pervq_inflight_size: %d\n",
1521                 pervq_inflight_size);
1522
1523         if (!dev->inflight_info) {
1524                 dev->inflight_info = calloc(1,
1525                                             sizeof(struct inflight_mem_info));
1526                 if (dev->inflight_info == NULL) {
1527                         VHOST_LOG_CONFIG(ERR,
1528                                 "failed to alloc dev inflight area\n");
1529                         return RTE_VHOST_MSG_RESULT_ERR;
1530                 }
1531                 dev->inflight_info->fd = -1;
1532         }
1533
1534         if (dev->inflight_info->addr) {
1535                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1536                 dev->inflight_info->addr = NULL;
1537         }
1538
1539         addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1540                     fd, mmap_offset);
1541         if (addr == MAP_FAILED) {
1542                 VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
1543                 return RTE_VHOST_MSG_RESULT_ERR;
1544         }
1545
1546         if (dev->inflight_info->fd >= 0) {
1547                 close(dev->inflight_info->fd);
1548                 dev->inflight_info->fd = -1;
1549         }
1550
1551         dev->inflight_info->fd = fd;
1552         dev->inflight_info->addr = addr;
1553         dev->inflight_info->size = mmap_size;
1554
1555         for (i = 0; i < num_queues; i++) {
1556                 vq = dev->virtqueue[i];
1557                 if (vq_is_packed(dev)) {
1558                         vq->inflight_packed = addr;
1559                         vq->inflight_packed->desc_num = queue_size;
1560                 } else {
1561                         vq->inflight_split = addr;
1562                         vq->inflight_split->desc_num = queue_size;
1563                 }
1564                 addr = (void *)((char *)addr + pervq_inflight_size);
1565         }
1566
1567         return RTE_VHOST_MSG_RESULT_OK;
1568 }
1569
1570 static int
1571 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1572                         int main_fd __rte_unused)
1573 {
1574         struct virtio_net *dev = *pdev;
1575         struct vhost_vring_file file;
1576         struct vhost_virtqueue *vq;
1577         int expected_fds;
1578
1579         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1580         if (validate_msg_fds(msg, expected_fds) != 0)
1581                 return RTE_VHOST_MSG_RESULT_ERR;
1582
1583         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1584         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1585                 file.fd = VIRTIO_INVALID_EVENTFD;
1586         else
1587                 file.fd = msg->fds[0];
1588         VHOST_LOG_CONFIG(INFO,
1589                 "vring call idx:%d file:%d\n", file.index, file.fd);
1590
1591         vq = dev->virtqueue[file.index];
1592
1593         if (vq->ready) {
1594                 vq->ready = 0;
1595                 vhost_user_notify_queue_state(dev, file.index, 0);
1596         }
1597
1598         if (vq->callfd >= 0)
1599                 close(vq->callfd);
1600
1601         vq->callfd = file.fd;
1602
1603         return RTE_VHOST_MSG_RESULT_OK;
1604 }
1605
1606 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1607                         struct VhostUserMsg *msg,
1608                         int main_fd __rte_unused)
1609 {
1610         int expected_fds;
1611
1612         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1613         if (validate_msg_fds(msg, expected_fds) != 0)
1614                 return RTE_VHOST_MSG_RESULT_ERR;
1615
1616         if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1617                 close(msg->fds[0]);
1618         VHOST_LOG_CONFIG(INFO, "not implemented\n");
1619
1620         return RTE_VHOST_MSG_RESULT_OK;
1621 }
1622
1623 static int
1624 resubmit_desc_compare(const void *a, const void *b)
1625 {
1626         const struct rte_vhost_resubmit_desc *desc0 = a;
1627         const struct rte_vhost_resubmit_desc *desc1 = b;
1628
1629         if (desc1->counter > desc0->counter)
1630                 return 1;
1631
1632         return -1;
1633 }
1634
1635 static int
1636 vhost_check_queue_inflights_split(struct virtio_net *dev,
1637                                   struct vhost_virtqueue *vq)
1638 {
1639         uint16_t i;
1640         uint16_t resubmit_num = 0, last_io, num;
1641         struct vring_used *used = vq->used;
1642         struct rte_vhost_resubmit_info *resubmit;
1643         struct rte_vhost_inflight_info_split *inflight_split;
1644
1645         if (!(dev->protocol_features &
1646             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1647                 return RTE_VHOST_MSG_RESULT_OK;
1648
1649         /* The frontend may still not support the inflight feature
1650          * although we negotiate the protocol feature.
1651          */
1652         if ((!vq->inflight_split))
1653                 return RTE_VHOST_MSG_RESULT_OK;
1654
1655         if (!vq->inflight_split->version) {
1656                 vq->inflight_split->version = INFLIGHT_VERSION;
1657                 return RTE_VHOST_MSG_RESULT_OK;
1658         }
1659
1660         if (vq->resubmit_inflight)
1661                 return RTE_VHOST_MSG_RESULT_OK;
1662
1663         inflight_split = vq->inflight_split;
1664         vq->global_counter = 0;
1665         last_io = inflight_split->last_inflight_io;
1666
1667         if (inflight_split->used_idx != used->idx) {
1668                 inflight_split->desc[last_io].inflight = 0;
1669                 rte_smp_mb();
1670                 inflight_split->used_idx = used->idx;
1671         }
1672
1673         for (i = 0; i < inflight_split->desc_num; i++) {
1674                 if (inflight_split->desc[i].inflight == 1)
1675                         resubmit_num++;
1676         }
1677
1678         vq->last_avail_idx += resubmit_num;
1679
1680         if (resubmit_num) {
1681                 resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1682                 if (!resubmit) {
1683                         VHOST_LOG_CONFIG(ERR,
1684                                 "failed to allocate memory for resubmit info.\n");
1685                         return RTE_VHOST_MSG_RESULT_ERR;
1686                 }
1687
1688                 resubmit->resubmit_list = calloc(resubmit_num,
1689                         sizeof(struct rte_vhost_resubmit_desc));
1690                 if (!resubmit->resubmit_list) {
1691                         VHOST_LOG_CONFIG(ERR,
1692                                 "failed to allocate memory for inflight desc.\n");
1693                         free(resubmit);
1694                         return RTE_VHOST_MSG_RESULT_ERR;
1695                 }
1696
1697                 num = 0;
1698                 for (i = 0; i < vq->inflight_split->desc_num; i++) {
1699                         if (vq->inflight_split->desc[i].inflight == 1) {
1700                                 resubmit->resubmit_list[num].index = i;
1701                                 resubmit->resubmit_list[num].counter =
1702                                         inflight_split->desc[i].counter;
1703                                 num++;
1704                         }
1705                 }
1706                 resubmit->resubmit_num = num;
1707
1708                 if (resubmit->resubmit_num > 1)
1709                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1710                               sizeof(struct rte_vhost_resubmit_desc),
1711                               resubmit_desc_compare);
1712
1713                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1714                 vq->resubmit_inflight = resubmit;
1715         }
1716
1717         return RTE_VHOST_MSG_RESULT_OK;
1718 }
1719
1720 static int
1721 vhost_check_queue_inflights_packed(struct virtio_net *dev,
1722                                    struct vhost_virtqueue *vq)
1723 {
1724         uint16_t i;
1725         uint16_t resubmit_num = 0, old_used_idx, num;
1726         struct rte_vhost_resubmit_info *resubmit;
1727         struct rte_vhost_inflight_info_packed *inflight_packed;
1728
1729         if (!(dev->protocol_features &
1730             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1731                 return RTE_VHOST_MSG_RESULT_OK;
1732
1733         /* The frontend may still not support the inflight feature
1734          * although we negotiate the protocol feature.
1735          */
1736         if ((!vq->inflight_packed))
1737                 return RTE_VHOST_MSG_RESULT_OK;
1738
1739         if (!vq->inflight_packed->version) {
1740                 vq->inflight_packed->version = INFLIGHT_VERSION;
1741                 return RTE_VHOST_MSG_RESULT_OK;
1742         }
1743
1744         if (vq->resubmit_inflight)
1745                 return RTE_VHOST_MSG_RESULT_OK;
1746
1747         inflight_packed = vq->inflight_packed;
1748         vq->global_counter = 0;
1749         old_used_idx = inflight_packed->old_used_idx;
1750
1751         if (inflight_packed->used_idx != old_used_idx) {
1752                 if (inflight_packed->desc[old_used_idx].inflight == 0) {
1753                         inflight_packed->old_used_idx =
1754                                 inflight_packed->used_idx;
1755                         inflight_packed->old_used_wrap_counter =
1756                                 inflight_packed->used_wrap_counter;
1757                         inflight_packed->old_free_head =
1758                                 inflight_packed->free_head;
1759                 } else {
1760                         inflight_packed->used_idx =
1761                                 inflight_packed->old_used_idx;
1762                         inflight_packed->used_wrap_counter =
1763                                 inflight_packed->old_used_wrap_counter;
1764                         inflight_packed->free_head =
1765                                 inflight_packed->old_free_head;
1766                 }
1767         }
1768
1769         for (i = 0; i < inflight_packed->desc_num; i++) {
1770                 if (inflight_packed->desc[i].inflight == 1)
1771                         resubmit_num++;
1772         }
1773
1774         if (resubmit_num) {
1775                 resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1776                 if (resubmit == NULL) {
1777                         VHOST_LOG_CONFIG(ERR,
1778                                 "failed to allocate memory for resubmit info.\n");
1779                         return RTE_VHOST_MSG_RESULT_ERR;
1780                 }
1781
1782                 resubmit->resubmit_list = calloc(resubmit_num,
1783                         sizeof(struct rte_vhost_resubmit_desc));
1784                 if (resubmit->resubmit_list == NULL) {
1785                         VHOST_LOG_CONFIG(ERR,
1786                                 "failed to allocate memory for resubmit desc.\n");
1787                         free(resubmit);
1788                         return RTE_VHOST_MSG_RESULT_ERR;
1789                 }
1790
1791                 num = 0;
1792                 for (i = 0; i < inflight_packed->desc_num; i++) {
1793                         if (vq->inflight_packed->desc[i].inflight == 1) {
1794                                 resubmit->resubmit_list[num].index = i;
1795                                 resubmit->resubmit_list[num].counter =
1796                                         inflight_packed->desc[i].counter;
1797                                 num++;
1798                         }
1799                 }
1800                 resubmit->resubmit_num = num;
1801
1802                 if (resubmit->resubmit_num > 1)
1803                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1804                               sizeof(struct rte_vhost_resubmit_desc),
1805                               resubmit_desc_compare);
1806
1807                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1808                 vq->resubmit_inflight = resubmit;
1809         }
1810
1811         return RTE_VHOST_MSG_RESULT_OK;
1812 }
1813
1814 static int
1815 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
1816                         int main_fd __rte_unused)
1817 {
1818         struct virtio_net *dev = *pdev;
1819         struct vhost_vring_file file;
1820         struct vhost_virtqueue *vq;
1821         int expected_fds;
1822
1823         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1824         if (validate_msg_fds(msg, expected_fds) != 0)
1825                 return RTE_VHOST_MSG_RESULT_ERR;
1826
1827         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1828         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1829                 file.fd = VIRTIO_INVALID_EVENTFD;
1830         else
1831                 file.fd = msg->fds[0];
1832         VHOST_LOG_CONFIG(INFO,
1833                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1834
1835         /* Interpret ring addresses only when ring is started. */
1836         dev = translate_ring_addresses(dev, file.index);
1837         if (!dev)
1838                 return RTE_VHOST_MSG_RESULT_ERR;
1839
1840         *pdev = dev;
1841
1842         vq = dev->virtqueue[file.index];
1843
1844         /*
1845          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1846          * the ring starts already enabled. Otherwise, it is enabled via
1847          * the SET_VRING_ENABLE message.
1848          */
1849         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
1850                 vq->enabled = 1;
1851                 if (dev->notify_ops->vring_state_changed)
1852                         dev->notify_ops->vring_state_changed(
1853                                 dev->vid, file.index, 1);
1854         }
1855
1856         if (vq->ready) {
1857                 vq->ready = 0;
1858                 vhost_user_notify_queue_state(dev, file.index, 0);
1859         }
1860
1861         if (vq->kickfd >= 0)
1862                 close(vq->kickfd);
1863         vq->kickfd = file.fd;
1864
1865         if (vq_is_packed(dev)) {
1866                 if (vhost_check_queue_inflights_packed(dev, vq)) {
1867                         VHOST_LOG_CONFIG(ERR,
1868                                 "failed to inflights for vq: %d\n", file.index);
1869                         return RTE_VHOST_MSG_RESULT_ERR;
1870                 }
1871         } else {
1872                 if (vhost_check_queue_inflights_split(dev, vq)) {
1873                         VHOST_LOG_CONFIG(ERR,
1874                                 "failed to inflights for vq: %d\n", file.index);
1875                         return RTE_VHOST_MSG_RESULT_ERR;
1876                 }
1877         }
1878
1879         return RTE_VHOST_MSG_RESULT_OK;
1880 }
1881
1882 /*
1883  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1884  */
1885 static int
1886 vhost_user_get_vring_base(struct virtio_net **pdev,
1887                         struct VhostUserMsg *msg,
1888                         int main_fd __rte_unused)
1889 {
1890         struct virtio_net *dev = *pdev;
1891         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1892         uint64_t val;
1893
1894         if (validate_msg_fds(msg, 0) != 0)
1895                 return RTE_VHOST_MSG_RESULT_ERR;
1896
1897         /* We have to stop the queue (virtio) if it is running. */
1898         vhost_destroy_device_notify(dev);
1899
1900         dev->flags &= ~VIRTIO_DEV_READY;
1901         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1902
1903         /* Here we are safe to get the indexes */
1904         if (vq_is_packed(dev)) {
1905                 /*
1906                  * Bit[0:14]: avail index
1907                  * Bit[15]: avail wrap counter
1908                  */
1909                 val = vq->last_avail_idx & 0x7fff;
1910                 val |= vq->avail_wrap_counter << 15;
1911                 msg->payload.state.num = val;
1912         } else {
1913                 msg->payload.state.num = vq->last_avail_idx;
1914         }
1915
1916         VHOST_LOG_CONFIG(INFO,
1917                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1918                 msg->payload.state.num);
1919         /*
1920          * Based on current qemu vhost-user implementation, this message is
1921          * sent and only sent in vhost_vring_stop.
1922          * TODO: cleanup the vring, it isn't usable since here.
1923          */
1924         if (vq->kickfd >= 0)
1925                 close(vq->kickfd);
1926
1927         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1928
1929         if (vq->callfd >= 0)
1930                 close(vq->callfd);
1931
1932         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1933
1934         vq->signalled_used_valid = false;
1935
1936         if (vq_is_packed(dev)) {
1937                 rte_free(vq->shadow_used_packed);
1938                 vq->shadow_used_packed = NULL;
1939         } else {
1940                 rte_free(vq->shadow_used_split);
1941                 vq->shadow_used_split = NULL;
1942                 if (vq->async_pkts_pending)
1943                         rte_free(vq->async_pkts_pending);
1944                 if (vq->async_pending_info)
1945                         rte_free(vq->async_pending_info);
1946                 vq->async_pkts_pending = NULL;
1947                 vq->async_pending_info = NULL;
1948         }
1949
1950         rte_free(vq->batch_copy_elems);
1951         vq->batch_copy_elems = NULL;
1952
1953         msg->size = sizeof(msg->payload.state);
1954         msg->fd_num = 0;
1955
1956         vring_invalidate(dev, vq);
1957
1958         return RTE_VHOST_MSG_RESULT_REPLY;
1959 }
1960
1961 /*
1962  * when virtio queues are ready to work, qemu will send us to
1963  * enable the virtio queue pair.
1964  */
1965 static int
1966 vhost_user_set_vring_enable(struct virtio_net **pdev,
1967                         struct VhostUserMsg *msg,
1968                         int main_fd __rte_unused)
1969 {
1970         struct virtio_net *dev = *pdev;
1971         int enable = (int)msg->payload.state.num;
1972         int index = (int)msg->payload.state.index;
1973
1974         if (validate_msg_fds(msg, 0) != 0)
1975                 return RTE_VHOST_MSG_RESULT_ERR;
1976
1977         VHOST_LOG_CONFIG(INFO,
1978                 "set queue enable: %d to qp idx: %d\n",
1979                 enable, index);
1980
1981         if (!enable && dev->virtqueue[index]->async_registered) {
1982                 if (dev->virtqueue[index]->async_pkts_inflight_n) {
1983                         VHOST_LOG_CONFIG(ERR, "failed to disable vring. "
1984                         "async inflight packets must be completed first\n");
1985                         return RTE_VHOST_MSG_RESULT_ERR;
1986                 }
1987         }
1988
1989         dev->virtqueue[index]->enabled = enable;
1990
1991         return RTE_VHOST_MSG_RESULT_OK;
1992 }
1993
1994 static int
1995 vhost_user_get_protocol_features(struct virtio_net **pdev,
1996                         struct VhostUserMsg *msg,
1997                         int main_fd __rte_unused)
1998 {
1999         struct virtio_net *dev = *pdev;
2000         uint64_t features, protocol_features;
2001
2002         if (validate_msg_fds(msg, 0) != 0)
2003                 return RTE_VHOST_MSG_RESULT_ERR;
2004
2005         rte_vhost_driver_get_features(dev->ifname, &features);
2006         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2007
2008         msg->payload.u64 = protocol_features;
2009         msg->size = sizeof(msg->payload.u64);
2010         msg->fd_num = 0;
2011
2012         return RTE_VHOST_MSG_RESULT_REPLY;
2013 }
2014
2015 static int
2016 vhost_user_set_protocol_features(struct virtio_net **pdev,
2017                         struct VhostUserMsg *msg,
2018                         int main_fd __rte_unused)
2019 {
2020         struct virtio_net *dev = *pdev;
2021         uint64_t protocol_features = msg->payload.u64;
2022         uint64_t slave_protocol_features = 0;
2023
2024         if (validate_msg_fds(msg, 0) != 0)
2025                 return RTE_VHOST_MSG_RESULT_ERR;
2026
2027         rte_vhost_driver_get_protocol_features(dev->ifname,
2028                         &slave_protocol_features);
2029         if (protocol_features & ~slave_protocol_features) {
2030                 VHOST_LOG_CONFIG(ERR,
2031                         "(%d) received invalid protocol features.\n",
2032                         dev->vid);
2033                 return RTE_VHOST_MSG_RESULT_ERR;
2034         }
2035
2036         dev->protocol_features = protocol_features;
2037         VHOST_LOG_CONFIG(INFO,
2038                 "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
2039                 dev->protocol_features);
2040
2041         return RTE_VHOST_MSG_RESULT_OK;
2042 }
2043
2044 static int
2045 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
2046                         int main_fd __rte_unused)
2047 {
2048         struct virtio_net *dev = *pdev;
2049         int fd = msg->fds[0];
2050         uint64_t size, off;
2051         void *addr;
2052
2053         if (validate_msg_fds(msg, 1) != 0)
2054                 return RTE_VHOST_MSG_RESULT_ERR;
2055
2056         if (fd < 0) {
2057                 VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
2058                 return RTE_VHOST_MSG_RESULT_ERR;
2059         }
2060
2061         if (msg->size != sizeof(VhostUserLog)) {
2062                 VHOST_LOG_CONFIG(ERR,
2063                         "invalid log base msg size: %"PRId32" != %d\n",
2064                         msg->size, (int)sizeof(VhostUserLog));
2065                 return RTE_VHOST_MSG_RESULT_ERR;
2066         }
2067
2068         size = msg->payload.log.mmap_size;
2069         off  = msg->payload.log.mmap_offset;
2070
2071         /* Check for mmap size and offset overflow. */
2072         if (off >= -size) {
2073                 VHOST_LOG_CONFIG(ERR,
2074                         "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
2075                         off, size);
2076                 return RTE_VHOST_MSG_RESULT_ERR;
2077         }
2078
2079         VHOST_LOG_CONFIG(INFO,
2080                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
2081                 size, off);
2082
2083         /*
2084          * mmap from 0 to workaround a hugepage mmap bug: mmap will
2085          * fail when offset is not page size aligned.
2086          */
2087         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2088         close(fd);
2089         if (addr == MAP_FAILED) {
2090                 VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
2091                 return RTE_VHOST_MSG_RESULT_ERR;
2092         }
2093
2094         /*
2095          * Free previously mapped log memory on occasionally
2096          * multiple VHOST_USER_SET_LOG_BASE.
2097          */
2098         if (dev->log_addr) {
2099                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2100         }
2101         dev->log_addr = (uint64_t)(uintptr_t)addr;
2102         dev->log_base = dev->log_addr + off;
2103         dev->log_size = size;
2104
2105         /*
2106          * The spec is not clear about it (yet), but QEMU doesn't expect
2107          * any payload in the reply.
2108          */
2109         msg->size = 0;
2110         msg->fd_num = 0;
2111
2112         return RTE_VHOST_MSG_RESULT_REPLY;
2113 }
2114
2115 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
2116                         struct VhostUserMsg *msg,
2117                         int main_fd __rte_unused)
2118 {
2119         if (validate_msg_fds(msg, 1) != 0)
2120                 return RTE_VHOST_MSG_RESULT_ERR;
2121
2122         close(msg->fds[0]);
2123         VHOST_LOG_CONFIG(INFO, "not implemented.\n");
2124
2125         return RTE_VHOST_MSG_RESULT_OK;
2126 }
2127
2128 /*
2129  * An rarp packet is constructed and broadcasted to notify switches about
2130  * the new location of the migrated VM, so that packets from outside will
2131  * not be lost after migration.
2132  *
2133  * However, we don't actually "send" a rarp packet here, instead, we set
2134  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
2135  */
2136 static int
2137 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
2138                         int main_fd __rte_unused)
2139 {
2140         struct virtio_net *dev = *pdev;
2141         uint8_t *mac = (uint8_t *)&msg->payload.u64;
2142         struct rte_vdpa_device *vdpa_dev;
2143
2144         if (validate_msg_fds(msg, 0) != 0)
2145                 return RTE_VHOST_MSG_RESULT_ERR;
2146
2147         VHOST_LOG_CONFIG(DEBUG,
2148                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
2149                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2150         memcpy(dev->mac.addr_bytes, mac, 6);
2151
2152         /*
2153          * Set the flag to inject a RARP broadcast packet at
2154          * rte_vhost_dequeue_burst().
2155          *
2156          * __ATOMIC_RELEASE ordering is for making sure the mac is
2157          * copied before the flag is set.
2158          */
2159         __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
2160         vdpa_dev = dev->vdpa_dev;
2161         if (vdpa_dev && vdpa_dev->ops->migration_done)
2162                 vdpa_dev->ops->migration_done(dev->vid);
2163
2164         return RTE_VHOST_MSG_RESULT_OK;
2165 }
2166
2167 static int
2168 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
2169                         int main_fd __rte_unused)
2170 {
2171         struct virtio_net *dev = *pdev;
2172
2173         if (validate_msg_fds(msg, 0) != 0)
2174                 return RTE_VHOST_MSG_RESULT_ERR;
2175
2176         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
2177                         msg->payload.u64 > VIRTIO_MAX_MTU) {
2178                 VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
2179                                 msg->payload.u64);
2180
2181                 return RTE_VHOST_MSG_RESULT_ERR;
2182         }
2183
2184         dev->mtu = msg->payload.u64;
2185
2186         return RTE_VHOST_MSG_RESULT_OK;
2187 }
2188
2189 static int
2190 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
2191                         int main_fd __rte_unused)
2192 {
2193         struct virtio_net *dev = *pdev;
2194         int fd = msg->fds[0];
2195
2196         if (validate_msg_fds(msg, 1) != 0)
2197                 return RTE_VHOST_MSG_RESULT_ERR;
2198
2199         if (fd < 0) {
2200                 VHOST_LOG_CONFIG(ERR,
2201                                 "Invalid file descriptor for slave channel (%d)\n",
2202                                 fd);
2203                 return RTE_VHOST_MSG_RESULT_ERR;
2204         }
2205
2206         if (dev->slave_req_fd >= 0)
2207                 close(dev->slave_req_fd);
2208
2209         dev->slave_req_fd = fd;
2210
2211         return RTE_VHOST_MSG_RESULT_OK;
2212 }
2213
2214 static int
2215 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2216 {
2217         struct vhost_vring_addr *ra;
2218         uint64_t start, end, len;
2219
2220         start = imsg->iova;
2221         end = start + imsg->size;
2222
2223         ra = &vq->ring_addrs;
2224         len = sizeof(struct vring_desc) * vq->size;
2225         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2226                 return 1;
2227
2228         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2229         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2230                 return 1;
2231
2232         len = sizeof(struct vring_used) +
2233                sizeof(struct vring_used_elem) * vq->size;
2234         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2235                 return 1;
2236
2237         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2238                 len = sizeof(uint64_t);
2239                 if (ra->log_guest_addr < end &&
2240                     (ra->log_guest_addr + len) > start)
2241                         return 1;
2242         }
2243
2244         return 0;
2245 }
2246
2247 static int
2248 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2249 {
2250         struct vhost_vring_addr *ra;
2251         uint64_t start, end, len;
2252
2253         start = imsg->iova;
2254         end = start + imsg->size;
2255
2256         ra = &vq->ring_addrs;
2257         len = sizeof(struct vring_packed_desc) * vq->size;
2258         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2259                 return 1;
2260
2261         len = sizeof(struct vring_packed_desc_event);
2262         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2263                 return 1;
2264
2265         len = sizeof(struct vring_packed_desc_event);
2266         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2267                 return 1;
2268
2269         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2270                 len = sizeof(uint64_t);
2271                 if (ra->log_guest_addr < end &&
2272                     (ra->log_guest_addr + len) > start)
2273                         return 1;
2274         }
2275
2276         return 0;
2277 }
2278
2279 static int is_vring_iotlb(struct virtio_net *dev,
2280                           struct vhost_virtqueue *vq,
2281                           struct vhost_iotlb_msg *imsg)
2282 {
2283         if (vq_is_packed(dev))
2284                 return is_vring_iotlb_packed(vq, imsg);
2285         else
2286                 return is_vring_iotlb_split(vq, imsg);
2287 }
2288
2289 static int
2290 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
2291                         int main_fd __rte_unused)
2292 {
2293         struct virtio_net *dev = *pdev;
2294         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
2295         uint16_t i;
2296         uint64_t vva, len;
2297
2298         if (validate_msg_fds(msg, 0) != 0)
2299                 return RTE_VHOST_MSG_RESULT_ERR;
2300
2301         switch (imsg->type) {
2302         case VHOST_IOTLB_UPDATE:
2303                 len = imsg->size;
2304                 vva = qva_to_vva(dev, imsg->uaddr, &len);
2305                 if (!vva)
2306                         return RTE_VHOST_MSG_RESULT_ERR;
2307
2308                 for (i = 0; i < dev->nr_vring; i++) {
2309                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2310
2311                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
2312                                         len, imsg->perm);
2313
2314                         if (is_vring_iotlb(dev, vq, imsg))
2315                                 *pdev = dev = translate_ring_addresses(dev, i);
2316                 }
2317                 break;
2318         case VHOST_IOTLB_INVALIDATE:
2319                 for (i = 0; i < dev->nr_vring; i++) {
2320                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2321
2322                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
2323                                         imsg->size);
2324
2325                         if (is_vring_iotlb(dev, vq, imsg))
2326                                 vring_invalidate(dev, vq);
2327                 }
2328                 break;
2329         default:
2330                 VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
2331                                 imsg->type);
2332                 return RTE_VHOST_MSG_RESULT_ERR;
2333         }
2334
2335         return RTE_VHOST_MSG_RESULT_OK;
2336 }
2337
2338 static int
2339 vhost_user_set_postcopy_advise(struct virtio_net **pdev,
2340                         struct VhostUserMsg *msg,
2341                         int main_fd __rte_unused)
2342 {
2343         struct virtio_net *dev = *pdev;
2344 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
2345         struct uffdio_api api_struct;
2346
2347         if (validate_msg_fds(msg, 0) != 0)
2348                 return RTE_VHOST_MSG_RESULT_ERR;
2349
2350         dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2351
2352         if (dev->postcopy_ufd == -1) {
2353                 VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
2354                         strerror(errno));
2355                 return RTE_VHOST_MSG_RESULT_ERR;
2356         }
2357         api_struct.api = UFFD_API;
2358         api_struct.features = 0;
2359         if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2360                 VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
2361                         strerror(errno));
2362                 close(dev->postcopy_ufd);
2363                 dev->postcopy_ufd = -1;
2364                 return RTE_VHOST_MSG_RESULT_ERR;
2365         }
2366         msg->fds[0] = dev->postcopy_ufd;
2367         msg->fd_num = 1;
2368
2369         return RTE_VHOST_MSG_RESULT_REPLY;
2370 #else
2371         dev->postcopy_ufd = -1;
2372         msg->fd_num = 0;
2373
2374         return RTE_VHOST_MSG_RESULT_ERR;
2375 #endif
2376 }
2377
2378 static int
2379 vhost_user_set_postcopy_listen(struct virtio_net **pdev,
2380                         struct VhostUserMsg *msg __rte_unused,
2381                         int main_fd __rte_unused)
2382 {
2383         struct virtio_net *dev = *pdev;
2384
2385         if (validate_msg_fds(msg, 0) != 0)
2386                 return RTE_VHOST_MSG_RESULT_ERR;
2387
2388         if (dev->mem && dev->mem->nregions) {
2389                 VHOST_LOG_CONFIG(ERR,
2390                         "Regions already registered at postcopy-listen\n");
2391                 return RTE_VHOST_MSG_RESULT_ERR;
2392         }
2393         dev->postcopy_listening = 1;
2394
2395         return RTE_VHOST_MSG_RESULT_OK;
2396 }
2397
2398 static int
2399 vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
2400                         int main_fd __rte_unused)
2401 {
2402         struct virtio_net *dev = *pdev;
2403
2404         if (validate_msg_fds(msg, 0) != 0)
2405                 return RTE_VHOST_MSG_RESULT_ERR;
2406
2407         dev->postcopy_listening = 0;
2408         if (dev->postcopy_ufd >= 0) {
2409                 close(dev->postcopy_ufd);
2410                 dev->postcopy_ufd = -1;
2411         }
2412
2413         msg->payload.u64 = 0;
2414         msg->size = sizeof(msg->payload.u64);
2415         msg->fd_num = 0;
2416
2417         return RTE_VHOST_MSG_RESULT_REPLY;
2418 }
2419
2420 static int
2421 vhost_user_get_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2422                       int main_fd __rte_unused)
2423 {
2424         struct virtio_net *dev = *pdev;
2425
2426         if (validate_msg_fds(msg, 0) != 0)
2427                 return RTE_VHOST_MSG_RESULT_ERR;
2428
2429         msg->payload.u64 = dev->status;
2430         msg->size = sizeof(msg->payload.u64);
2431         msg->fd_num = 0;
2432
2433         return RTE_VHOST_MSG_RESULT_REPLY;
2434 }
2435
2436 static int
2437 vhost_user_set_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2438                         int main_fd __rte_unused)
2439 {
2440         struct virtio_net *dev = *pdev;
2441
2442         if (validate_msg_fds(msg, 0) != 0)
2443                 return RTE_VHOST_MSG_RESULT_ERR;
2444
2445         /* As per Virtio specification, the device status is 8bits long */
2446         if (msg->payload.u64 > UINT8_MAX) {
2447                 VHOST_LOG_CONFIG(ERR, "Invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
2448                                 msg->payload.u64);
2449                 return RTE_VHOST_MSG_RESULT_ERR;
2450         }
2451
2452         dev->status = msg->payload.u64;
2453
2454         if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
2455             (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
2456                 VHOST_LOG_CONFIG(ERR, "FEATURES_OK bit is set but feature negotiation failed\n");
2457                 /*
2458                  * Clear the bit to let the driver know about the feature
2459                  * negotiation failure
2460                  */
2461                 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
2462         }
2463
2464         VHOST_LOG_CONFIG(INFO, "New device status(0x%08x):\n"
2465                         "\t-RESET: %u\n"
2466                         "\t-ACKNOWLEDGE: %u\n"
2467                         "\t-DRIVER: %u\n"
2468                         "\t-FEATURES_OK: %u\n"
2469                         "\t-DRIVER_OK: %u\n"
2470                         "\t-DEVICE_NEED_RESET: %u\n"
2471                         "\t-FAILED: %u\n",
2472                         dev->status,
2473                         (dev->status == VIRTIO_DEVICE_STATUS_RESET),
2474                         !!(dev->status & VIRTIO_DEVICE_STATUS_ACK),
2475                         !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER),
2476                         !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK),
2477                         !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK),
2478                         !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET),
2479                         !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
2480
2481         return RTE_VHOST_MSG_RESULT_OK;
2482 }
2483
2484 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
2485                                         struct VhostUserMsg *msg,
2486                                         int main_fd);
2487 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
2488         [VHOST_USER_NONE] = NULL,
2489         [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
2490         [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
2491         [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
2492         [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
2493         [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
2494         [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
2495         [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
2496         [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
2497         [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
2498         [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
2499         [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
2500         [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
2501         [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
2502         [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
2503         [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
2504         [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
2505         [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
2506         [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
2507         [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
2508         [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
2509         [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
2510         [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
2511         [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
2512         [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
2513         [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
2514         [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
2515         [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
2516         [VHOST_USER_SET_STATUS] = vhost_user_set_status,
2517         [VHOST_USER_GET_STATUS] = vhost_user_get_status,
2518 };
2519
2520 /* return bytes# of read on success or negative val on failure. */
2521 static int
2522 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2523 {
2524         int ret;
2525
2526         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2527                 msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
2528         if (ret <= 0) {
2529                 return ret;
2530         } else if (ret != VHOST_USER_HDR_SIZE) {
2531                 VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
2532                 close_msg_fds(msg);
2533                 return -1;
2534         }
2535
2536         if (msg->size) {
2537                 if (msg->size > sizeof(msg->payload)) {
2538                         VHOST_LOG_CONFIG(ERR,
2539                                 "invalid msg size: %d\n", msg->size);
2540                         return -1;
2541                 }
2542                 ret = read(sockfd, &msg->payload, msg->size);
2543                 if (ret <= 0)
2544                         return ret;
2545                 if (ret != (int)msg->size) {
2546                         VHOST_LOG_CONFIG(ERR,
2547                                 "read control message failed\n");
2548                         return -1;
2549                 }
2550         }
2551
2552         return ret;
2553 }
2554
2555 static int
2556 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2557 {
2558         if (!msg)
2559                 return 0;
2560
2561         return send_fd_message(sockfd, (char *)msg,
2562                 VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
2563 }
2564
2565 static int
2566 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
2567 {
2568         if (!msg)
2569                 return 0;
2570
2571         msg->flags &= ~VHOST_USER_VERSION_MASK;
2572         msg->flags &= ~VHOST_USER_NEED_REPLY;
2573         msg->flags |= VHOST_USER_VERSION;
2574         msg->flags |= VHOST_USER_REPLY_MASK;
2575
2576         return send_vhost_message(sockfd, msg);
2577 }
2578
2579 static int
2580 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
2581 {
2582         int ret;
2583
2584         if (msg->flags & VHOST_USER_NEED_REPLY)
2585                 rte_spinlock_lock(&dev->slave_req_lock);
2586
2587         ret = send_vhost_message(dev->slave_req_fd, msg);
2588         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
2589                 rte_spinlock_unlock(&dev->slave_req_lock);
2590
2591         return ret;
2592 }
2593
2594 /*
2595  * Allocate a queue pair if it hasn't been allocated yet
2596  */
2597 static int
2598 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
2599                         struct VhostUserMsg *msg)
2600 {
2601         uint32_t vring_idx;
2602
2603         switch (msg->request.master) {
2604         case VHOST_USER_SET_VRING_KICK:
2605         case VHOST_USER_SET_VRING_CALL:
2606         case VHOST_USER_SET_VRING_ERR:
2607                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2608                 break;
2609         case VHOST_USER_SET_VRING_NUM:
2610         case VHOST_USER_SET_VRING_BASE:
2611         case VHOST_USER_SET_VRING_ENABLE:
2612                 vring_idx = msg->payload.state.index;
2613                 break;
2614         case VHOST_USER_SET_VRING_ADDR:
2615                 vring_idx = msg->payload.addr.index;
2616                 break;
2617         default:
2618                 return 0;
2619         }
2620
2621         if (vring_idx >= VHOST_MAX_VRING) {
2622                 VHOST_LOG_CONFIG(ERR,
2623                         "invalid vring index: %u\n", vring_idx);
2624                 return -1;
2625         }
2626
2627         if (dev->virtqueue[vring_idx])
2628                 return 0;
2629
2630         return alloc_vring_queue(dev, vring_idx);
2631 }
2632
2633 static void
2634 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
2635 {
2636         unsigned int i = 0;
2637         unsigned int vq_num = 0;
2638
2639         while (vq_num < dev->nr_vring) {
2640                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2641
2642                 if (vq) {
2643                         rte_spinlock_lock(&vq->access_lock);
2644                         vq_num++;
2645                 }
2646                 i++;
2647         }
2648 }
2649
2650 static void
2651 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
2652 {
2653         unsigned int i = 0;
2654         unsigned int vq_num = 0;
2655
2656         while (vq_num < dev->nr_vring) {
2657                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2658
2659                 if (vq) {
2660                         rte_spinlock_unlock(&vq->access_lock);
2661                         vq_num++;
2662                 }
2663                 i++;
2664         }
2665 }
2666
2667 int
2668 vhost_user_msg_handler(int vid, int fd)
2669 {
2670         struct virtio_net *dev;
2671         struct VhostUserMsg msg;
2672         struct rte_vdpa_device *vdpa_dev;
2673         int ret;
2674         int unlock_required = 0;
2675         bool handled;
2676         int request;
2677         uint32_t i;
2678
2679         dev = get_device(vid);
2680         if (dev == NULL)
2681                 return -1;
2682
2683         if (!dev->notify_ops) {
2684                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
2685                 if (!dev->notify_ops) {
2686                         VHOST_LOG_CONFIG(ERR,
2687                                 "failed to get callback ops for driver %s\n",
2688                                 dev->ifname);
2689                         return -1;
2690                 }
2691         }
2692
2693         ret = read_vhost_message(fd, &msg);
2694         if (ret <= 0) {
2695                 if (ret < 0)
2696                         VHOST_LOG_CONFIG(ERR,
2697                                 "vhost read message failed\n");
2698                 else
2699                         VHOST_LOG_CONFIG(INFO,
2700                                 "vhost peer closed\n");
2701
2702                 return -1;
2703         }
2704
2705         ret = 0;
2706         request = msg.request.master;
2707         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
2708                         vhost_message_str[request]) {
2709                 if (request != VHOST_USER_IOTLB_MSG)
2710                         VHOST_LOG_CONFIG(INFO, "read message %s\n",
2711                                 vhost_message_str[request]);
2712                 else
2713                         VHOST_LOG_CONFIG(DEBUG, "read message %s\n",
2714                                 vhost_message_str[request]);
2715         } else {
2716                 VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
2717         }
2718
2719         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
2720         if (ret < 0) {
2721                 VHOST_LOG_CONFIG(ERR,
2722                         "failed to alloc queue\n");
2723                 return -1;
2724         }
2725
2726         /*
2727          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
2728          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
2729          * and device is destroyed. destroy_device waits for queues to be
2730          * inactive, so it is safe. Otherwise taking the access_lock
2731          * would cause a dead lock.
2732          */
2733         switch (request) {
2734         case VHOST_USER_SET_FEATURES:
2735         case VHOST_USER_SET_PROTOCOL_FEATURES:
2736         case VHOST_USER_SET_OWNER:
2737         case VHOST_USER_SET_MEM_TABLE:
2738         case VHOST_USER_SET_LOG_BASE:
2739         case VHOST_USER_SET_LOG_FD:
2740         case VHOST_USER_SET_VRING_NUM:
2741         case VHOST_USER_SET_VRING_ADDR:
2742         case VHOST_USER_SET_VRING_BASE:
2743         case VHOST_USER_SET_VRING_KICK:
2744         case VHOST_USER_SET_VRING_CALL:
2745         case VHOST_USER_SET_VRING_ERR:
2746         case VHOST_USER_SET_VRING_ENABLE:
2747         case VHOST_USER_SEND_RARP:
2748         case VHOST_USER_NET_SET_MTU:
2749         case VHOST_USER_SET_SLAVE_REQ_FD:
2750                 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2751                         vhost_user_lock_all_queue_pairs(dev);
2752                         unlock_required = 1;
2753                 }
2754                 break;
2755         default:
2756                 break;
2757
2758         }
2759
2760         handled = false;
2761         if (dev->extern_ops.pre_msg_handle) {
2762                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
2763                                 (void *)&msg);
2764                 switch (ret) {
2765                 case RTE_VHOST_MSG_RESULT_REPLY:
2766                         send_vhost_reply(fd, &msg);
2767                         /* Fall-through */
2768                 case RTE_VHOST_MSG_RESULT_ERR:
2769                 case RTE_VHOST_MSG_RESULT_OK:
2770                         handled = true;
2771                         goto skip_to_post_handle;
2772                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2773                 default:
2774                         break;
2775                 }
2776         }
2777
2778         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
2779                 if (!vhost_message_handlers[request])
2780                         goto skip_to_post_handle;
2781                 ret = vhost_message_handlers[request](&dev, &msg, fd);
2782
2783                 switch (ret) {
2784                 case RTE_VHOST_MSG_RESULT_ERR:
2785                         VHOST_LOG_CONFIG(ERR,
2786                                 "Processing %s failed.\n",
2787                                 vhost_message_str[request]);
2788                         handled = true;
2789                         break;
2790                 case RTE_VHOST_MSG_RESULT_OK:
2791                         VHOST_LOG_CONFIG(DEBUG,
2792                                 "Processing %s succeeded.\n",
2793                                 vhost_message_str[request]);
2794                         handled = true;
2795                         break;
2796                 case RTE_VHOST_MSG_RESULT_REPLY:
2797                         VHOST_LOG_CONFIG(DEBUG,
2798                                 "Processing %s succeeded and needs reply.\n",
2799                                 vhost_message_str[request]);
2800                         send_vhost_reply(fd, &msg);
2801                         handled = true;
2802                         break;
2803                 default:
2804                         break;
2805                 }
2806         }
2807
2808 skip_to_post_handle:
2809         if (ret != RTE_VHOST_MSG_RESULT_ERR &&
2810                         dev->extern_ops.post_msg_handle) {
2811                 ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
2812                                 (void *)&msg);
2813                 switch (ret) {
2814                 case RTE_VHOST_MSG_RESULT_REPLY:
2815                         send_vhost_reply(fd, &msg);
2816                         /* Fall-through */
2817                 case RTE_VHOST_MSG_RESULT_ERR:
2818                 case RTE_VHOST_MSG_RESULT_OK:
2819                         handled = true;
2820                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2821                 default:
2822                         break;
2823                 }
2824         }
2825
2826         if (unlock_required)
2827                 vhost_user_unlock_all_queue_pairs(dev);
2828
2829         /* If message was not handled at this stage, treat it as an error */
2830         if (!handled) {
2831                 VHOST_LOG_CONFIG(ERR,
2832                         "vhost message (req: %d) was not handled.\n", request);
2833                 close_msg_fds(&msg);
2834                 ret = RTE_VHOST_MSG_RESULT_ERR;
2835         }
2836
2837         /*
2838          * If the request required a reply that was already sent,
2839          * this optional reply-ack won't be sent as the
2840          * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
2841          */
2842         if (msg.flags & VHOST_USER_NEED_REPLY) {
2843                 msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
2844                 msg.size = sizeof(msg.payload.u64);
2845                 msg.fd_num = 0;
2846                 send_vhost_reply(fd, &msg);
2847         } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
2848                 VHOST_LOG_CONFIG(ERR,
2849                         "vhost message handling failed.\n");
2850                 return -1;
2851         }
2852
2853         for (i = 0; i < dev->nr_vring; i++) {
2854                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2855                 bool cur_ready = vq_is_ready(dev, vq);
2856
2857                 if (cur_ready != (vq && vq->ready)) {
2858                         vq->ready = cur_ready;
2859                         vhost_user_notify_queue_state(dev, i, cur_ready);
2860                 }
2861         }
2862
2863
2864         if (!virtio_is_ready(dev))
2865                 goto out;
2866
2867         /*
2868          * Virtio is now ready. If not done already, it is time
2869          * to notify the application it can process the rings and
2870          * configure the vDPA device if present.
2871          */
2872
2873         if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
2874                 if (dev->notify_ops->new_device(dev->vid) == 0)
2875                         dev->flags |= VIRTIO_DEV_RUNNING;
2876         }
2877
2878         vdpa_dev = dev->vdpa_dev;
2879         if (!vdpa_dev)
2880                 goto out;
2881
2882         if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2883                 if (vdpa_dev->ops->dev_conf(dev->vid))
2884                         VHOST_LOG_CONFIG(ERR,
2885                                          "Failed to configure vDPA device\n");
2886                 else
2887                         dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
2888         }
2889
2890 out:
2891         return 0;
2892 }
2893
2894 static int process_slave_message_reply(struct virtio_net *dev,
2895                                        const struct VhostUserMsg *msg)
2896 {
2897         struct VhostUserMsg msg_reply;
2898         int ret;
2899
2900         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
2901                 return 0;
2902
2903         ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
2904         if (ret <= 0) {
2905                 if (ret < 0)
2906                         VHOST_LOG_CONFIG(ERR,
2907                                 "vhost read slave message reply failed\n");
2908                 else
2909                         VHOST_LOG_CONFIG(INFO,
2910                                 "vhost peer closed\n");
2911                 ret = -1;
2912                 goto out;
2913         }
2914
2915         ret = 0;
2916         if (msg_reply.request.slave != msg->request.slave) {
2917                 VHOST_LOG_CONFIG(ERR,
2918                         "Received unexpected msg type (%u), expected %u\n",
2919                         msg_reply.request.slave, msg->request.slave);
2920                 ret = -1;
2921                 goto out;
2922         }
2923
2924         ret = msg_reply.payload.u64 ? -1 : 0;
2925
2926 out:
2927         rte_spinlock_unlock(&dev->slave_req_lock);
2928         return ret;
2929 }
2930
2931 int
2932 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
2933 {
2934         int ret;
2935         struct VhostUserMsg msg = {
2936                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
2937                 .flags = VHOST_USER_VERSION,
2938                 .size = sizeof(msg.payload.iotlb),
2939                 .payload.iotlb = {
2940                         .iova = iova,
2941                         .perm = perm,
2942                         .type = VHOST_IOTLB_MISS,
2943                 },
2944         };
2945
2946         ret = send_vhost_message(dev->slave_req_fd, &msg);
2947         if (ret < 0) {
2948                 VHOST_LOG_CONFIG(ERR,
2949                                 "Failed to send IOTLB miss message (%d)\n",
2950                                 ret);
2951                 return ret;
2952         }
2953
2954         return 0;
2955 }
2956
2957 static int
2958 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply)
2959 {
2960         int ret;
2961         struct VhostUserMsg msg = {
2962                 .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG,
2963                 .flags = VHOST_USER_VERSION,
2964                 .size = 0,
2965         };
2966
2967         if (need_reply)
2968                 msg.flags |= VHOST_USER_NEED_REPLY;
2969
2970         ret = send_vhost_slave_message(dev, &msg);
2971         if (ret < 0) {
2972                 VHOST_LOG_CONFIG(ERR,
2973                                 "Failed to send config change (%d)\n",
2974                                 ret);
2975                 return ret;
2976         }
2977
2978         return process_slave_message_reply(dev, &msg);
2979 }
2980
2981 int
2982 rte_vhost_slave_config_change(int vid, bool need_reply)
2983 {
2984         struct virtio_net *dev;
2985
2986         dev = get_device(vid);
2987         if (!dev)
2988                 return -ENODEV;
2989
2990         return vhost_user_slave_config_change(dev, need_reply);
2991 }
2992
2993 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
2994                                                     int index, int fd,
2995                                                     uint64_t offset,
2996                                                     uint64_t size)
2997 {
2998         int ret;
2999         struct VhostUserMsg msg = {
3000                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
3001                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
3002                 .size = sizeof(msg.payload.area),
3003                 .payload.area = {
3004                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
3005                         .size = size,
3006                         .offset = offset,
3007                 },
3008         };
3009
3010         if (fd < 0)
3011                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
3012         else {
3013                 msg.fds[0] = fd;
3014                 msg.fd_num = 1;
3015         }
3016
3017         ret = send_vhost_slave_message(dev, &msg);
3018         if (ret < 0) {
3019                 VHOST_LOG_CONFIG(ERR,
3020                         "Failed to set host notifier (%d)\n", ret);
3021                 return ret;
3022         }
3023
3024         return process_slave_message_reply(dev, &msg);
3025 }
3026
3027 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
3028 {
3029         struct virtio_net *dev;
3030         struct rte_vdpa_device *vdpa_dev;
3031         int vfio_device_fd, ret = 0;
3032         uint64_t offset, size;
3033         unsigned int i, q_start, q_last;
3034
3035         dev = get_device(vid);
3036         if (!dev)
3037                 return -ENODEV;
3038
3039         vdpa_dev = dev->vdpa_dev;
3040         if (vdpa_dev == NULL)
3041                 return -ENODEV;
3042
3043         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3044             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3045             !(dev->protocol_features &
3046                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
3047             !(dev->protocol_features &
3048                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
3049             !(dev->protocol_features &
3050                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
3051                 return -ENOTSUP;
3052
3053         if (qid == RTE_VHOST_QUEUE_ALL) {
3054                 q_start = 0;
3055                 q_last = dev->nr_vring - 1;
3056         } else {
3057                 if (qid >= dev->nr_vring)
3058                         return -EINVAL;
3059                 q_start = qid;
3060                 q_last = qid;
3061         }
3062
3063         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
3064         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
3065
3066         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
3067         if (vfio_device_fd < 0)
3068                 return -ENOTSUP;
3069
3070         if (enable) {
3071                 for (i = q_start; i <= q_last; i++) {
3072                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
3073                                         &size) < 0) {
3074                                 ret = -ENOTSUP;
3075                                 goto disable;
3076                         }
3077
3078                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
3079                                         vfio_device_fd, offset, size) < 0) {
3080                                 ret = -EFAULT;
3081                                 goto disable;
3082                         }
3083                 }
3084         } else {
3085 disable:
3086                 for (i = q_start; i <= q_last; i++) {
3087                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
3088                                         0, 0);
3089                 }
3090         }
3091
3092         return ret;
3093 }