vhost: add log when setting vring base
[dpdk.git] / lib / vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/syscall.h>
33 #include <assert.h>
34 #ifdef RTE_LIBRTE_VHOST_NUMA
35 #include <numaif.h>
36 #endif
37 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
38 #include <linux/userfaultfd.h>
39 #endif
40 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
41 #include <linux/memfd.h>
42 #define MEMFD_SUPPORTED
43 #endif
44
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48 #include <rte_vfio.h>
49 #include <rte_errno.h>
50
51 #include "iotlb.h"
52 #include "vhost.h"
53 #include "vhost_user.h"
54
55 #define VIRTIO_MIN_MTU 68
56 #define VIRTIO_MAX_MTU 65535
57
58 #define INFLIGHT_ALIGNMENT      64
59 #define INFLIGHT_VERSION        0x1
60
61 static const char *vhost_message_str[VHOST_USER_MAX] = {
62         [VHOST_USER_NONE] = "VHOST_USER_NONE",
63         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
64         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
65         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
66         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
67         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
68         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
69         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
70         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
71         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
72         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
73         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
74         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
75         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
76         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
77         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
78         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
79         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
80         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
81         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
82         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
83         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
84         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
85         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
86         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
87         [VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
88         [VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
89         [VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
90         [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
91         [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
92         [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS",
93         [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS",
94 };
95
96 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
97 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
98
99 static void
100 close_msg_fds(struct VhostUserMsg *msg)
101 {
102         int i;
103
104         for (i = 0; i < msg->fd_num; i++) {
105                 int fd = msg->fds[i];
106
107                 if (fd == -1)
108                         continue;
109
110                 msg->fds[i] = -1;
111                 close(fd);
112         }
113 }
114
115 /*
116  * Ensure the expected number of FDs is received,
117  * close all FDs and return an error if this is not the case.
118  */
119 static int
120 validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
121 {
122         if (msg->fd_num == expected_fds)
123                 return 0;
124
125         VHOST_LOG_CONFIG(ERR,
126                 " Expect %d FDs for request %s, received %d\n",
127                 expected_fds,
128                 vhost_message_str[msg->request.master],
129                 msg->fd_num);
130
131         close_msg_fds(msg);
132
133         return -1;
134 }
135
136 static uint64_t
137 get_blk_size(int fd)
138 {
139         struct stat stat;
140         int ret;
141
142         ret = fstat(fd, &stat);
143         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
144 }
145
146 static int
147 async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
148 {
149         uint64_t host_iova;
150         int ret = 0;
151
152         host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
153         if (do_map) {
154                 /* Add mapped region into the default container of DPDK. */
155                 ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
156                                                  region->host_user_addr,
157                                                  host_iova,
158                                                  region->size);
159                 if (ret) {
160                         /*
161                          * DMA device may bind with kernel driver, in this case,
162                          * we don't need to program IOMMU manually. However, if no
163                          * device is bound with vfio/uio in DPDK, and vfio kernel
164                          * module is loaded, the API will still be called and return
165                          * with ENODEV/ENOSUP.
166                          *
167                          * DPDK vfio only returns ENODEV/ENOSUP in very similar
168                          * situations(vfio either unsupported, or supported
169                          * but no devices found). Either way, no mappings could be
170                          * performed. We treat it as normal case in async path.
171                          */
172                         if (rte_errno == ENODEV || rte_errno == ENOTSUP)
173                                 return 0;
174
175                         VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
176                         /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
177                         return 0;
178                 }
179
180         } else {
181                 /* Remove mapped region from the default container of DPDK. */
182                 ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
183                                                    region->host_user_addr,
184                                                    host_iova,
185                                                    region->size);
186                 if (ret) {
187                         /* like DMA map, ignore the kernel driver case when unmap. */
188                         if (rte_errno == EINVAL)
189                                 return 0;
190
191                         VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
192                         return ret;
193                 }
194         }
195
196         return ret;
197 }
198
199 static void
200 free_mem_region(struct virtio_net *dev)
201 {
202         uint32_t i;
203         struct rte_vhost_mem_region *reg;
204
205         if (!dev || !dev->mem)
206                 return;
207
208         for (i = 0; i < dev->mem->nregions; i++) {
209                 reg = &dev->mem->regions[i];
210                 if (reg->host_user_addr) {
211                         if (dev->async_copy && rte_vfio_is_enabled("vfio"))
212                                 async_dma_map(reg, false);
213
214                         munmap(reg->mmap_addr, reg->mmap_size);
215                         close(reg->fd);
216                 }
217         }
218 }
219
220 void
221 vhost_backend_cleanup(struct virtio_net *dev)
222 {
223         if (dev->mem) {
224                 free_mem_region(dev);
225                 rte_free(dev->mem);
226                 dev->mem = NULL;
227         }
228
229         rte_free(dev->guest_pages);
230         dev->guest_pages = NULL;
231
232         if (dev->log_addr) {
233                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
234                 dev->log_addr = 0;
235         }
236
237         if (dev->inflight_info) {
238                 if (dev->inflight_info->addr) {
239                         munmap(dev->inflight_info->addr,
240                                dev->inflight_info->size);
241                         dev->inflight_info->addr = NULL;
242                 }
243
244                 if (dev->inflight_info->fd >= 0) {
245                         close(dev->inflight_info->fd);
246                         dev->inflight_info->fd = -1;
247                 }
248
249                 rte_free(dev->inflight_info);
250                 dev->inflight_info = NULL;
251         }
252
253         if (dev->slave_req_fd >= 0) {
254                 close(dev->slave_req_fd);
255                 dev->slave_req_fd = -1;
256         }
257
258         if (dev->postcopy_ufd >= 0) {
259                 close(dev->postcopy_ufd);
260                 dev->postcopy_ufd = -1;
261         }
262
263         dev->postcopy_listening = 0;
264 }
265
266 static void
267 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
268                               int enable)
269 {
270         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
271         struct vhost_virtqueue *vq = dev->virtqueue[index];
272
273         /* Configure guest notifications on enable */
274         if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
275                 vhost_enable_guest_notification(dev, vq, vq->notif_enable);
276
277         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
278                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
279
280         if (dev->notify_ops->vring_state_changed)
281                 dev->notify_ops->vring_state_changed(dev->vid,
282                                 index, enable);
283 }
284
285 /*
286  * This function just returns success at the moment unless
287  * the device hasn't been initialised.
288  */
289 static int
290 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
291                         struct VhostUserMsg *msg,
292                         int main_fd __rte_unused)
293 {
294         if (validate_msg_fds(msg, 0) != 0)
295                 return RTE_VHOST_MSG_RESULT_ERR;
296
297         return RTE_VHOST_MSG_RESULT_OK;
298 }
299
300 static int
301 vhost_user_reset_owner(struct virtio_net **pdev,
302                         struct VhostUserMsg *msg,
303                         int main_fd __rte_unused)
304 {
305         struct virtio_net *dev = *pdev;
306
307         if (validate_msg_fds(msg, 0) != 0)
308                 return RTE_VHOST_MSG_RESULT_ERR;
309
310         vhost_destroy_device_notify(dev);
311
312         cleanup_device(dev, 0);
313         reset_device(dev);
314         return RTE_VHOST_MSG_RESULT_OK;
315 }
316
317 /*
318  * The features that we support are requested.
319  */
320 static int
321 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
322                         int main_fd __rte_unused)
323 {
324         struct virtio_net *dev = *pdev;
325         uint64_t features = 0;
326
327         if (validate_msg_fds(msg, 0) != 0)
328                 return RTE_VHOST_MSG_RESULT_ERR;
329
330         rte_vhost_driver_get_features(dev->ifname, &features);
331
332         msg->payload.u64 = features;
333         msg->size = sizeof(msg->payload.u64);
334         msg->fd_num = 0;
335
336         return RTE_VHOST_MSG_RESULT_REPLY;
337 }
338
339 /*
340  * The queue number that we support are requested.
341  */
342 static int
343 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
344                         int main_fd __rte_unused)
345 {
346         struct virtio_net *dev = *pdev;
347         uint32_t queue_num = 0;
348
349         if (validate_msg_fds(msg, 0) != 0)
350                 return RTE_VHOST_MSG_RESULT_ERR;
351
352         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
353
354         msg->payload.u64 = (uint64_t)queue_num;
355         msg->size = sizeof(msg->payload.u64);
356         msg->fd_num = 0;
357
358         return RTE_VHOST_MSG_RESULT_REPLY;
359 }
360
361 /*
362  * We receive the negotiated features supported by us and the virtio device.
363  */
364 static int
365 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
366                         int main_fd __rte_unused)
367 {
368         struct virtio_net *dev = *pdev;
369         uint64_t features = msg->payload.u64;
370         uint64_t vhost_features = 0;
371         struct rte_vdpa_device *vdpa_dev;
372
373         if (validate_msg_fds(msg, 0) != 0)
374                 return RTE_VHOST_MSG_RESULT_ERR;
375
376         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
377         if (features & ~vhost_features) {
378                 VHOST_LOG_CONFIG(ERR,
379                         "(%d) received invalid negotiated features.\n",
380                         dev->vid);
381                 dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
382                 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
383
384                 return RTE_VHOST_MSG_RESULT_ERR;
385         }
386
387         if (dev->flags & VIRTIO_DEV_RUNNING) {
388                 if (dev->features == features)
389                         return RTE_VHOST_MSG_RESULT_OK;
390
391                 /*
392                  * Error out if master tries to change features while device is
393                  * in running state. The exception being VHOST_F_LOG_ALL, which
394                  * is enabled when the live-migration starts.
395                  */
396                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
397                         VHOST_LOG_CONFIG(ERR,
398                                 "(%d) features changed while device is running.\n",
399                                 dev->vid);
400                         return RTE_VHOST_MSG_RESULT_ERR;
401                 }
402
403                 if (dev->notify_ops->features_changed)
404                         dev->notify_ops->features_changed(dev->vid, features);
405         }
406
407         dev->features = features;
408         if (dev->features &
409                 ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
410                  (1ULL << VIRTIO_F_VERSION_1) |
411                  (1ULL << VIRTIO_F_RING_PACKED))) {
412                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
413         } else {
414                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
415         }
416         VHOST_LOG_CONFIG(INFO,
417                 "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
418         VHOST_LOG_CONFIG(DEBUG,
419                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
420                 dev->vid,
421                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
422                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
423
424         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
425             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
426                 /*
427                  * Remove all but first queue pair if MQ hasn't been
428                  * negotiated. This is safe because the device is not
429                  * running at this stage.
430                  */
431                 while (dev->nr_vring > 2) {
432                         struct vhost_virtqueue *vq;
433
434                         vq = dev->virtqueue[--dev->nr_vring];
435                         if (!vq)
436                                 continue;
437
438                         dev->virtqueue[dev->nr_vring] = NULL;
439                         cleanup_vq(vq, 1);
440                         cleanup_vq_inflight(dev, vq);
441                         free_vq(dev, vq);
442                 }
443         }
444
445         vdpa_dev = dev->vdpa_dev;
446         if (vdpa_dev)
447                 vdpa_dev->ops->set_features(dev->vid);
448
449         dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED;
450         return RTE_VHOST_MSG_RESULT_OK;
451 }
452
453 /*
454  * The virtio device sends us the size of the descriptor ring.
455  */
456 static int
457 vhost_user_set_vring_num(struct virtio_net **pdev,
458                         struct VhostUserMsg *msg,
459                         int main_fd __rte_unused)
460 {
461         struct virtio_net *dev = *pdev;
462         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
463
464         if (validate_msg_fds(msg, 0) != 0)
465                 return RTE_VHOST_MSG_RESULT_ERR;
466
467         if (msg->payload.state.num > 32768) {
468                 VHOST_LOG_CONFIG(ERR, "invalid virtqueue size %u\n", msg->payload.state.num);
469                 return RTE_VHOST_MSG_RESULT_ERR;
470         }
471
472         vq->size = msg->payload.state.num;
473
474         /* VIRTIO 1.0, 2.4 Virtqueues says:
475          *
476          *   Queue Size value is always a power of 2. The maximum Queue Size
477          *   value is 32768.
478          *
479          * VIRTIO 1.1 2.7 Virtqueues says:
480          *
481          *   Packed virtqueues support up to 2^15 entries each.
482          */
483         if (!vq_is_packed(dev)) {
484                 if (vq->size & (vq->size - 1)) {
485                         VHOST_LOG_CONFIG(ERR,
486                                 "invalid virtqueue size %u\n", vq->size);
487                         return RTE_VHOST_MSG_RESULT_ERR;
488                 }
489         }
490
491         if (vq_is_packed(dev)) {
492                 if (vq->shadow_used_packed)
493                         rte_free(vq->shadow_used_packed);
494                 vq->shadow_used_packed = rte_malloc_socket(NULL,
495                                 vq->size *
496                                 sizeof(struct vring_used_elem_packed),
497                                 RTE_CACHE_LINE_SIZE, vq->numa_node);
498                 if (!vq->shadow_used_packed) {
499                         VHOST_LOG_CONFIG(ERR,
500                                         "failed to allocate memory for shadow used ring.\n");
501                         return RTE_VHOST_MSG_RESULT_ERR;
502                 }
503
504         } else {
505                 if (vq->shadow_used_split)
506                         rte_free(vq->shadow_used_split);
507
508                 vq->shadow_used_split = rte_malloc_socket(NULL,
509                                 vq->size * sizeof(struct vring_used_elem),
510                                 RTE_CACHE_LINE_SIZE, vq->numa_node);
511
512                 if (!vq->shadow_used_split) {
513                         VHOST_LOG_CONFIG(ERR,
514                                         "failed to allocate memory for vq internal data.\n");
515                         return RTE_VHOST_MSG_RESULT_ERR;
516                 }
517         }
518
519         if (vq->batch_copy_elems)
520                 rte_free(vq->batch_copy_elems);
521         vq->batch_copy_elems = rte_malloc_socket(NULL,
522                                 vq->size * sizeof(struct batch_copy_elem),
523                                 RTE_CACHE_LINE_SIZE, vq->numa_node);
524         if (!vq->batch_copy_elems) {
525                 VHOST_LOG_CONFIG(ERR,
526                         "failed to allocate memory for batching copy.\n");
527                 return RTE_VHOST_MSG_RESULT_ERR;
528         }
529
530         return RTE_VHOST_MSG_RESULT_OK;
531 }
532
533 /*
534  * Reallocate virtio_dev, vhost_virtqueue and related data structures to
535  * make them on the same numa node as the memory of vring descriptor.
536  */
537 #ifdef RTE_LIBRTE_VHOST_NUMA
538 static struct virtio_net*
539 numa_realloc(struct virtio_net *dev, int index)
540 {
541         int node, dev_node;
542         struct virtio_net *old_dev;
543         struct vhost_virtqueue *vq;
544         struct batch_copy_elem *bce;
545         struct guest_page *gp;
546         struct rte_vhost_memory *mem;
547         size_t mem_size;
548         int ret;
549
550         old_dev = dev;
551         vq = dev->virtqueue[index];
552
553         /*
554          * If VQ is ready, it is too late to reallocate, it certainly already
555          * happened anyway on VHOST_USER_SET_VRING_ADRR.
556          */
557         if (vq->ready)
558                 return dev;
559
560         ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR);
561         if (ret) {
562                 VHOST_LOG_CONFIG(ERR, "Unable to get virtqueue %d numa information.\n", index);
563                 return dev;
564         }
565
566         if (node == vq->numa_node)
567                 goto out_dev_realloc;
568
569         vq = rte_realloc_socket(vq, sizeof(*vq), 0, node);
570         if (!vq) {
571                 VHOST_LOG_CONFIG(ERR, "Failed to realloc virtqueue %d on node %d\n",
572                                 index, node);
573                 return dev;
574         }
575
576         if (vq != dev->virtqueue[index]) {
577                 VHOST_LOG_CONFIG(INFO, "reallocated virtqueue on node %d\n", node);
578                 dev->virtqueue[index] = vq;
579                 vhost_user_iotlb_init(dev, index);
580         }
581
582         if (vq_is_packed(dev)) {
583                 struct vring_used_elem_packed *sup;
584
585                 sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup),
586                                 RTE_CACHE_LINE_SIZE, node);
587                 if (!sup) {
588                         VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow packed on node %d\n", node);
589                         return dev;
590                 }
591                 vq->shadow_used_packed = sup;
592         } else {
593                 struct vring_used_elem *sus;
594
595                 sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus),
596                                 RTE_CACHE_LINE_SIZE, node);
597                 if (!sus) {
598                         VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow split on node %d\n", node);
599                         return dev;
600                 }
601                 vq->shadow_used_split = sus;
602         }
603
604         bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce),
605                         RTE_CACHE_LINE_SIZE, node);
606         if (!bce) {
607                 VHOST_LOG_CONFIG(ERR, "Failed to realloc batch copy elem on node %d\n", node);
608                 return dev;
609         }
610         vq->batch_copy_elems = bce;
611
612         if (vq->log_cache) {
613                 struct log_cache_entry *lc;
614
615                 lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node);
616                 if (!lc) {
617                         VHOST_LOG_CONFIG(ERR, "Failed to realloc log cache on node %d\n", node);
618                         return dev;
619                 }
620                 vq->log_cache = lc;
621         }
622
623         if (vq->resubmit_inflight) {
624                 struct rte_vhost_resubmit_info *ri;
625
626                 ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node);
627                 if (!ri) {
628                         VHOST_LOG_CONFIG(ERR, "Failed to realloc resubmit inflight on node %d\n",
629                                         node);
630                         return dev;
631                 }
632                 vq->resubmit_inflight = ri;
633
634                 if (ri->resubmit_list) {
635                         struct rte_vhost_resubmit_desc *rd;
636
637                         rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num,
638                                         0, node);
639                         if (!rd) {
640                                 VHOST_LOG_CONFIG(ERR, "Failed to realloc resubmit list on node %d\n",
641                                                 node);
642                                 return dev;
643                         }
644                         ri->resubmit_list = rd;
645                 }
646         }
647
648         vq->numa_node = node;
649
650 out_dev_realloc:
651
652         if (dev->flags & VIRTIO_DEV_RUNNING)
653                 return dev;
654
655         ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR);
656         if (ret) {
657                 VHOST_LOG_CONFIG(ERR, "Unable to get Virtio dev %d numa information.\n", dev->vid);
658                 return dev;
659         }
660
661         if (dev_node == node)
662                 return dev;
663
664         dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node);
665         if (!dev) {
666                 VHOST_LOG_CONFIG(ERR, "Failed to realloc dev on node %d\n", node);
667                 return old_dev;
668         }
669
670         VHOST_LOG_CONFIG(INFO, "reallocated device on node %d\n", node);
671         vhost_devices[dev->vid] = dev;
672
673         mem_size = sizeof(struct rte_vhost_memory) +
674                 sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
675         mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
676         if (!mem) {
677                 VHOST_LOG_CONFIG(ERR, "Failed to realloc mem table on node %d\n", node);
678                 return dev;
679         }
680         dev->mem = mem;
681
682         gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
683                         RTE_CACHE_LINE_SIZE, node);
684         if (!gp) {
685                 VHOST_LOG_CONFIG(ERR, "Failed to realloc guest pages on node %d\n", node);
686                 return dev;
687         }
688         dev->guest_pages = gp;
689
690         return dev;
691 }
692 #else
693 static struct virtio_net*
694 numa_realloc(struct virtio_net *dev, int index __rte_unused)
695 {
696         return dev;
697 }
698 #endif
699
700 /* Converts QEMU virtual address to Vhost virtual address. */
701 static uint64_t
702 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
703 {
704         struct rte_vhost_mem_region *r;
705         uint32_t i;
706
707         if (unlikely(!dev || !dev->mem))
708                 goto out_error;
709
710         /* Find the region where the address lives. */
711         for (i = 0; i < dev->mem->nregions; i++) {
712                 r = &dev->mem->regions[i];
713
714                 if (qva >= r->guest_user_addr &&
715                     qva <  r->guest_user_addr + r->size) {
716
717                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
718                                 *len = r->guest_user_addr + r->size - qva;
719
720                         return qva - r->guest_user_addr +
721                                r->host_user_addr;
722                 }
723         }
724 out_error:
725         *len = 0;
726
727         return 0;
728 }
729
730
731 /*
732  * Converts ring address to Vhost virtual address.
733  * If IOMMU is enabled, the ring address is a guest IO virtual address,
734  * else it is a QEMU virtual address.
735  */
736 static uint64_t
737 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
738                 uint64_t ra, uint64_t *size)
739 {
740         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
741                 uint64_t vva;
742
743                 vhost_user_iotlb_rd_lock(vq);
744                 vva = vhost_iova_to_vva(dev, vq, ra,
745                                         size, VHOST_ACCESS_RW);
746                 vhost_user_iotlb_rd_unlock(vq);
747
748                 return vva;
749         }
750
751         return qva_to_vva(dev, ra, size);
752 }
753
754 static uint64_t
755 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
756 {
757         uint64_t log_gpa;
758
759         vhost_user_iotlb_rd_lock(vq);
760         log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
761         vhost_user_iotlb_rd_unlock(vq);
762
763         return log_gpa;
764 }
765
766 static struct virtio_net *
767 translate_ring_addresses(struct virtio_net *dev, int vq_index)
768 {
769         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
770         struct vhost_vring_addr *addr = &vq->ring_addrs;
771         uint64_t len, expected_len;
772
773         if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
774                 vq->log_guest_addr =
775                         log_addr_to_gpa(dev, vq);
776                 if (vq->log_guest_addr == 0) {
777                         VHOST_LOG_CONFIG(DEBUG,
778                                 "(%d) failed to map log_guest_addr.\n",
779                                 dev->vid);
780                         return dev;
781                 }
782         }
783
784         if (vq_is_packed(dev)) {
785                 len = sizeof(struct vring_packed_desc) * vq->size;
786                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
787                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
788                 if (vq->desc_packed == NULL ||
789                                 len != sizeof(struct vring_packed_desc) *
790                                 vq->size) {
791                         VHOST_LOG_CONFIG(DEBUG,
792                                 "(%d) failed to map desc_packed ring.\n",
793                                 dev->vid);
794                         return dev;
795                 }
796
797                 dev = numa_realloc(dev, vq_index);
798                 vq = dev->virtqueue[vq_index];
799                 addr = &vq->ring_addrs;
800
801                 len = sizeof(struct vring_packed_desc_event);
802                 vq->driver_event = (struct vring_packed_desc_event *)
803                                         (uintptr_t)ring_addr_to_vva(dev,
804                                         vq, addr->avail_user_addr, &len);
805                 if (vq->driver_event == NULL ||
806                                 len != sizeof(struct vring_packed_desc_event)) {
807                         VHOST_LOG_CONFIG(DEBUG,
808                                 "(%d) failed to find driver area address.\n",
809                                 dev->vid);
810                         return dev;
811                 }
812
813                 len = sizeof(struct vring_packed_desc_event);
814                 vq->device_event = (struct vring_packed_desc_event *)
815                                         (uintptr_t)ring_addr_to_vva(dev,
816                                         vq, addr->used_user_addr, &len);
817                 if (vq->device_event == NULL ||
818                                 len != sizeof(struct vring_packed_desc_event)) {
819                         VHOST_LOG_CONFIG(DEBUG,
820                                 "(%d) failed to find device area address.\n",
821                                 dev->vid);
822                         return dev;
823                 }
824
825                 vq->access_ok = true;
826                 return dev;
827         }
828
829         /* The addresses are converted from QEMU virtual to Vhost virtual. */
830         if (vq->desc && vq->avail && vq->used)
831                 return dev;
832
833         len = sizeof(struct vring_desc) * vq->size;
834         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
835                         vq, addr->desc_user_addr, &len);
836         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
837                 VHOST_LOG_CONFIG(DEBUG,
838                         "(%d) failed to map desc ring.\n",
839                         dev->vid);
840                 return dev;
841         }
842
843         dev = numa_realloc(dev, vq_index);
844         vq = dev->virtqueue[vq_index];
845         addr = &vq->ring_addrs;
846
847         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
848         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
849                 len += sizeof(uint16_t);
850         expected_len = len;
851         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
852                         vq, addr->avail_user_addr, &len);
853         if (vq->avail == 0 || len != expected_len) {
854                 VHOST_LOG_CONFIG(DEBUG,
855                         "(%d) failed to map avail ring.\n",
856                         dev->vid);
857                 return dev;
858         }
859
860         len = sizeof(struct vring_used) +
861                 sizeof(struct vring_used_elem) * vq->size;
862         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
863                 len += sizeof(uint16_t);
864         expected_len = len;
865         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
866                         vq, addr->used_user_addr, &len);
867         if (vq->used == 0 || len != expected_len) {
868                 VHOST_LOG_CONFIG(DEBUG,
869                         "(%d) failed to map used ring.\n",
870                         dev->vid);
871                 return dev;
872         }
873
874         if (vq->last_used_idx != vq->used->idx) {
875                 VHOST_LOG_CONFIG(WARNING,
876                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
877                         "some packets maybe resent for Tx and dropped for Rx\n",
878                         vq->last_used_idx, vq->used->idx);
879                 vq->last_used_idx  = vq->used->idx;
880                 vq->last_avail_idx = vq->used->idx;
881         }
882
883         vq->access_ok = true;
884
885         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
886                         dev->vid, vq->desc);
887         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address avail: %p\n",
888                         dev->vid, vq->avail);
889         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address used: %p\n",
890                         dev->vid, vq->used);
891         VHOST_LOG_CONFIG(DEBUG, "(%d) log_guest_addr: %" PRIx64 "\n",
892                         dev->vid, vq->log_guest_addr);
893
894         return dev;
895 }
896
897 /*
898  * The virtio device sends us the desc, used and avail ring addresses.
899  * This function then converts these to our address space.
900  */
901 static int
902 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
903                         int main_fd __rte_unused)
904 {
905         struct virtio_net *dev = *pdev;
906         struct vhost_virtqueue *vq;
907         struct vhost_vring_addr *addr = &msg->payload.addr;
908         bool access_ok;
909
910         if (validate_msg_fds(msg, 0) != 0)
911                 return RTE_VHOST_MSG_RESULT_ERR;
912
913         if (dev->mem == NULL)
914                 return RTE_VHOST_MSG_RESULT_ERR;
915
916         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
917         vq = dev->virtqueue[msg->payload.addr.index];
918
919         access_ok = vq->access_ok;
920
921         /*
922          * Rings addresses should not be interpreted as long as the ring is not
923          * started and enabled
924          */
925         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
926
927         vring_invalidate(dev, vq);
928
929         if ((vq->enabled && (dev->features &
930                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
931                         access_ok) {
932                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
933                 if (!dev)
934                         return RTE_VHOST_MSG_RESULT_ERR;
935
936                 *pdev = dev;
937         }
938
939         return RTE_VHOST_MSG_RESULT_OK;
940 }
941
942 /*
943  * The virtio device sends us the available ring last used index.
944  */
945 static int
946 vhost_user_set_vring_base(struct virtio_net **pdev,
947                         struct VhostUserMsg *msg,
948                         int main_fd __rte_unused)
949 {
950         struct virtio_net *dev = *pdev;
951         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
952         uint64_t val = msg->payload.state.num;
953
954         if (validate_msg_fds(msg, 0) != 0)
955                 return RTE_VHOST_MSG_RESULT_ERR;
956
957         if (vq_is_packed(dev)) {
958                 /*
959                  * Bit[0:14]: avail index
960                  * Bit[15]: avail wrap counter
961                  */
962                 vq->last_avail_idx = val & 0x7fff;
963                 vq->avail_wrap_counter = !!(val & (0x1 << 15));
964                 /*
965                  * Set used index to same value as available one, as
966                  * their values should be the same since ring processing
967                  * was stopped at get time.
968                  */
969                 vq->last_used_idx = vq->last_avail_idx;
970                 vq->used_wrap_counter = vq->avail_wrap_counter;
971         } else {
972                 vq->last_used_idx = msg->payload.state.num;
973                 vq->last_avail_idx = msg->payload.state.num;
974         }
975
976         VHOST_LOG_CONFIG(INFO,
977                 "(%s) vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n",
978                 dev->ifname, msg->payload.state.index, vq->last_used_idx,
979                 vq->last_avail_idx);
980
981         return RTE_VHOST_MSG_RESULT_OK;
982 }
983
984 static int
985 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
986                    uint64_t host_phys_addr, uint64_t size)
987 {
988         struct guest_page *page, *last_page;
989         struct guest_page *old_pages;
990
991         if (dev->nr_guest_pages == dev->max_guest_pages) {
992                 dev->max_guest_pages *= 2;
993                 old_pages = dev->guest_pages;
994                 dev->guest_pages = rte_realloc(dev->guest_pages,
995                                         dev->max_guest_pages * sizeof(*page),
996                                         RTE_CACHE_LINE_SIZE);
997                 if (dev->guest_pages == NULL) {
998                         VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
999                         rte_free(old_pages);
1000                         return -1;
1001                 }
1002         }
1003
1004         if (dev->nr_guest_pages > 0) {
1005                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
1006                 /* merge if the two pages are continuous */
1007                 if (host_phys_addr == last_page->host_phys_addr +
1008                                       last_page->size) {
1009                         last_page->size += size;
1010                         return 0;
1011                 }
1012         }
1013
1014         page = &dev->guest_pages[dev->nr_guest_pages++];
1015         page->guest_phys_addr = guest_phys_addr;
1016         page->host_phys_addr  = host_phys_addr;
1017         page->size = size;
1018
1019         return 0;
1020 }
1021
1022 static int
1023 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
1024                 uint64_t page_size)
1025 {
1026         uint64_t reg_size = reg->size;
1027         uint64_t host_user_addr  = reg->host_user_addr;
1028         uint64_t guest_phys_addr = reg->guest_phys_addr;
1029         uint64_t host_phys_addr;
1030         uint64_t size;
1031
1032         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
1033         size = page_size - (guest_phys_addr & (page_size - 1));
1034         size = RTE_MIN(size, reg_size);
1035
1036         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
1037                 return -1;
1038
1039         host_user_addr  += size;
1040         guest_phys_addr += size;
1041         reg_size -= size;
1042
1043         while (reg_size > 0) {
1044                 size = RTE_MIN(reg_size, page_size);
1045                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
1046                                                   host_user_addr);
1047                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
1048                                 size) < 0)
1049                         return -1;
1050
1051                 host_user_addr  += size;
1052                 guest_phys_addr += size;
1053                 reg_size -= size;
1054         }
1055
1056         /* sort guest page array if over binary search threshold */
1057         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
1058                 qsort((void *)dev->guest_pages, dev->nr_guest_pages,
1059                         sizeof(struct guest_page), guest_page_addrcmp);
1060         }
1061
1062         return 0;
1063 }
1064
1065 #ifdef RTE_LIBRTE_VHOST_DEBUG
1066 /* TODO: enable it only in debug mode? */
1067 static void
1068 dump_guest_pages(struct virtio_net *dev)
1069 {
1070         uint32_t i;
1071         struct guest_page *page;
1072
1073         for (i = 0; i < dev->nr_guest_pages; i++) {
1074                 page = &dev->guest_pages[i];
1075
1076                 VHOST_LOG_CONFIG(INFO,
1077                         "guest physical page region %u\n"
1078                         "\t guest_phys_addr: %" PRIx64 "\n"
1079                         "\t host_phys_addr : %" PRIx64 "\n"
1080                         "\t size           : %" PRIx64 "\n",
1081                         i,
1082                         page->guest_phys_addr,
1083                         page->host_phys_addr,
1084                         page->size);
1085         }
1086 }
1087 #else
1088 #define dump_guest_pages(dev)
1089 #endif
1090
1091 static bool
1092 vhost_memory_changed(struct VhostUserMemory *new,
1093                      struct rte_vhost_memory *old)
1094 {
1095         uint32_t i;
1096
1097         if (new->nregions != old->nregions)
1098                 return true;
1099
1100         for (i = 0; i < new->nregions; ++i) {
1101                 VhostUserMemoryRegion *new_r = &new->regions[i];
1102                 struct rte_vhost_mem_region *old_r = &old->regions[i];
1103
1104                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
1105                         return true;
1106                 if (new_r->memory_size != old_r->size)
1107                         return true;
1108                 if (new_r->userspace_addr != old_r->guest_user_addr)
1109                         return true;
1110         }
1111
1112         return false;
1113 }
1114
1115 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
1116 static int
1117 vhost_user_postcopy_region_register(struct virtio_net *dev,
1118                 struct rte_vhost_mem_region *reg)
1119 {
1120         struct uffdio_register reg_struct;
1121
1122         /*
1123          * Let's register all the mmapped area to ensure
1124          * alignment on page boundary.
1125          */
1126         reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;
1127         reg_struct.range.len = reg->mmap_size;
1128         reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
1129
1130         if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1131                                 &reg_struct)) {
1132                 VHOST_LOG_CONFIG(ERR, "Failed to register ufd for region "
1133                                 "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n",
1134                                 (uint64_t)reg_struct.range.start,
1135                                 (uint64_t)reg_struct.range.start +
1136                                 (uint64_t)reg_struct.range.len - 1,
1137                                 dev->postcopy_ufd,
1138                                 strerror(errno));
1139                 return -1;
1140         }
1141
1142         VHOST_LOG_CONFIG(INFO, "\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n",
1143                         (uint64_t)reg_struct.range.start,
1144                         (uint64_t)reg_struct.range.start +
1145                         (uint64_t)reg_struct.range.len - 1);
1146
1147         return 0;
1148 }
1149 #else
1150 static int
1151 vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused,
1152                 struct rte_vhost_mem_region *reg __rte_unused)
1153 {
1154         return -1;
1155 }
1156 #endif
1157
1158 static int
1159 vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
1160                 struct VhostUserMsg *msg)
1161 {
1162         struct VhostUserMemory *memory;
1163         struct rte_vhost_mem_region *reg;
1164         VhostUserMsg ack_msg;
1165         uint32_t i;
1166
1167         if (!dev->postcopy_listening)
1168                 return 0;
1169
1170         /*
1171          * We haven't a better way right now than sharing
1172          * DPDK's virtual address with Qemu, so that Qemu can
1173          * retrieve the region offset when handling userfaults.
1174          */
1175         memory = &msg->payload.memory;
1176         for (i = 0; i < memory->nregions; i++) {
1177                 reg = &dev->mem->regions[i];
1178                 memory->regions[i].userspace_addr = reg->host_user_addr;
1179         }
1180
1181         /* Send the addresses back to qemu */
1182         msg->fd_num = 0;
1183         send_vhost_reply(main_fd, msg);
1184
1185         /* Wait for qemu to acknowledge it got the addresses
1186          * we've got to wait before we're allowed to generate faults.
1187          */
1188         if (read_vhost_message(main_fd, &ack_msg) <= 0) {
1189                 VHOST_LOG_CONFIG(ERR,
1190                                 "Failed to read qemu ack on postcopy set-mem-table\n");
1191                 return -1;
1192         }
1193
1194         if (validate_msg_fds(&ack_msg, 0) != 0)
1195                 return -1;
1196
1197         if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
1198                 VHOST_LOG_CONFIG(ERR,
1199                                 "Bad qemu ack on postcopy set-mem-table (%d)\n",
1200                                 ack_msg.request.master);
1201                 return -1;
1202         }
1203
1204         /* Now userfault register and we can use the memory */
1205         for (i = 0; i < memory->nregions; i++) {
1206                 reg = &dev->mem->regions[i];
1207                 if (vhost_user_postcopy_region_register(dev, reg) < 0)
1208                         return -1;
1209         }
1210
1211         return 0;
1212 }
1213
1214 static int
1215 vhost_user_mmap_region(struct virtio_net *dev,
1216                 struct rte_vhost_mem_region *region,
1217                 uint64_t mmap_offset)
1218 {
1219         void *mmap_addr;
1220         uint64_t mmap_size;
1221         uint64_t alignment;
1222         int populate;
1223         int ret;
1224
1225         /* Check for memory_size + mmap_offset overflow */
1226         if (mmap_offset >= -region->size) {
1227                 VHOST_LOG_CONFIG(ERR,
1228                                 "mmap_offset (%#"PRIx64") and memory_size "
1229                                 "(%#"PRIx64") overflow\n",
1230                                 mmap_offset, region->size);
1231                 return -1;
1232         }
1233
1234         mmap_size = region->size + mmap_offset;
1235
1236         /* mmap() without flag of MAP_ANONYMOUS, should be called with length
1237          * argument aligned with hugepagesz at older longterm version Linux,
1238          * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL.
1239          *
1240          * To avoid failure, make sure in caller to keep length aligned.
1241          */
1242         alignment = get_blk_size(region->fd);
1243         if (alignment == (uint64_t)-1) {
1244                 VHOST_LOG_CONFIG(ERR,
1245                                 "couldn't get hugepage size through fstat\n");
1246                 return -1;
1247         }
1248         mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
1249         if (mmap_size == 0) {
1250                 /*
1251                  * It could happen if initial mmap_size + alignment overflows
1252                  * the sizeof uint64, which could happen if either mmap_size or
1253                  * alignment value is wrong.
1254                  *
1255                  * mmap() kernel implementation would return an error, but
1256                  * better catch it before and provide useful info in the logs.
1257                  */
1258                 VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
1259                                 "or alignment (0x%" PRIx64 ") is invalid\n",
1260                                 region->size + mmap_offset, alignment);
1261                 return -1;
1262         }
1263
1264         populate = dev->async_copy ? MAP_POPULATE : 0;
1265         mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
1266                         MAP_SHARED | populate, region->fd, 0);
1267
1268         if (mmap_addr == MAP_FAILED) {
1269                 VHOST_LOG_CONFIG(ERR, "mmap failed (%s).\n", strerror(errno));
1270                 return -1;
1271         }
1272
1273         region->mmap_addr = mmap_addr;
1274         region->mmap_size = mmap_size;
1275         region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
1276
1277         if (dev->async_copy) {
1278                 if (add_guest_pages(dev, region, alignment) < 0) {
1279                         VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
1280                         return -1;
1281                 }
1282
1283                 if (rte_vfio_is_enabled("vfio")) {
1284                         ret = async_dma_map(region, true);
1285                         if (ret) {
1286                                 VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
1287                                 return -1;
1288                         }
1289                 }
1290         }
1291
1292         VHOST_LOG_CONFIG(INFO,
1293                         "guest memory region size: 0x%" PRIx64 "\n"
1294                         "\t guest physical addr: 0x%" PRIx64 "\n"
1295                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
1296                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
1297                         "\t mmap addr : 0x%" PRIx64 "\n"
1298                         "\t mmap size : 0x%" PRIx64 "\n"
1299                         "\t mmap align: 0x%" PRIx64 "\n"
1300                         "\t mmap off  : 0x%" PRIx64 "\n",
1301                         region->size,
1302                         region->guest_phys_addr,
1303                         region->guest_user_addr,
1304                         region->host_user_addr,
1305                         (uint64_t)(uintptr_t)mmap_addr,
1306                         mmap_size,
1307                         alignment,
1308                         mmap_offset);
1309
1310         return 0;
1311 }
1312
1313 static int
1314 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
1315                         int main_fd)
1316 {
1317         struct virtio_net *dev = *pdev;
1318         struct VhostUserMemory *memory = &msg->payload.memory;
1319         struct rte_vhost_mem_region *reg;
1320         int numa_node = SOCKET_ID_ANY;
1321         uint64_t mmap_offset;
1322         uint32_t i;
1323         bool async_notify = false;
1324
1325         if (validate_msg_fds(msg, memory->nregions) != 0)
1326                 return RTE_VHOST_MSG_RESULT_ERR;
1327
1328         if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
1329                 VHOST_LOG_CONFIG(ERR,
1330                         "too many memory regions (%u)\n", memory->nregions);
1331                 goto close_msg_fds;
1332         }
1333
1334         if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1335                 VHOST_LOG_CONFIG(INFO,
1336                         "(%d) memory regions not changed\n", dev->vid);
1337
1338                 close_msg_fds(msg);
1339
1340                 return RTE_VHOST_MSG_RESULT_OK;
1341         }
1342
1343         if (dev->mem) {
1344                 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1345                         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1346
1347                         if (vdpa_dev && vdpa_dev->ops->dev_close)
1348                                 vdpa_dev->ops->dev_close(dev->vid);
1349                         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1350                 }
1351
1352                 /* notify the vhost application to stop DMA transfers */
1353                 if (dev->async_copy && dev->notify_ops->vring_state_changed) {
1354                         for (i = 0; i < dev->nr_vring; i++) {
1355                                 dev->notify_ops->vring_state_changed(dev->vid,
1356                                                 i, 0);
1357                         }
1358                         async_notify = true;
1359                 }
1360
1361                 free_mem_region(dev);
1362                 rte_free(dev->mem);
1363                 dev->mem = NULL;
1364         }
1365
1366         /* Flush IOTLB cache as previous HVAs are now invalid */
1367         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1368                 for (i = 0; i < dev->nr_vring; i++)
1369                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
1370
1371         /*
1372          * If VQ 0 has already been allocated, try to allocate on the same
1373          * NUMA node. It can be reallocated later in numa_realloc().
1374          */
1375         if (dev->nr_vring > 0)
1376                 numa_node = dev->virtqueue[0]->numa_node;
1377
1378         dev->nr_guest_pages = 0;
1379         if (dev->guest_pages == NULL) {
1380                 dev->max_guest_pages = 8;
1381                 dev->guest_pages = rte_zmalloc_socket(NULL,
1382                                         dev->max_guest_pages *
1383                                         sizeof(struct guest_page),
1384                                         RTE_CACHE_LINE_SIZE,
1385                                         numa_node);
1386                 if (dev->guest_pages == NULL) {
1387                         VHOST_LOG_CONFIG(ERR,
1388                                 "(%d) failed to allocate memory "
1389                                 "for dev->guest_pages\n",
1390                                 dev->vid);
1391                         goto close_msg_fds;
1392                 }
1393         }
1394
1395         dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1396                 sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);
1397         if (dev->mem == NULL) {
1398                 VHOST_LOG_CONFIG(ERR,
1399                         "(%d) failed to allocate memory for dev->mem\n",
1400                         dev->vid);
1401                 goto free_guest_pages;
1402         }
1403
1404         for (i = 0; i < memory->nregions; i++) {
1405                 reg = &dev->mem->regions[i];
1406
1407                 reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
1408                 reg->guest_user_addr = memory->regions[i].userspace_addr;
1409                 reg->size            = memory->regions[i].memory_size;
1410                 reg->fd              = msg->fds[i];
1411
1412                 /*
1413                  * Assign invalid file descriptor value to avoid double
1414                  * closing on error path.
1415                  */
1416                 msg->fds[i] = -1;
1417
1418                 mmap_offset = memory->regions[i].mmap_offset;
1419
1420                 if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
1421                         VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i);
1422                         goto free_mem_table;
1423                 }
1424
1425                 dev->mem->nregions++;
1426         }
1427
1428         if (vhost_user_postcopy_register(dev, main_fd, msg) < 0)
1429                 goto free_mem_table;
1430
1431         for (i = 0; i < dev->nr_vring; i++) {
1432                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1433
1434                 if (!vq)
1435                         continue;
1436
1437                 if (vq->desc || vq->avail || vq->used) {
1438                         /*
1439                          * If the memory table got updated, the ring addresses
1440                          * need to be translated again as virtual addresses have
1441                          * changed.
1442                          */
1443                         vring_invalidate(dev, vq);
1444
1445                         dev = translate_ring_addresses(dev, i);
1446                         if (!dev) {
1447                                 dev = *pdev;
1448                                 goto free_mem_table;
1449                         }
1450
1451                         *pdev = dev;
1452                 }
1453         }
1454
1455         dump_guest_pages(dev);
1456
1457         if (async_notify) {
1458                 for (i = 0; i < dev->nr_vring; i++)
1459                         dev->notify_ops->vring_state_changed(dev->vid, i, 1);
1460         }
1461
1462         return RTE_VHOST_MSG_RESULT_OK;
1463
1464 free_mem_table:
1465         free_mem_region(dev);
1466         rte_free(dev->mem);
1467         dev->mem = NULL;
1468
1469 free_guest_pages:
1470         rte_free(dev->guest_pages);
1471         dev->guest_pages = NULL;
1472 close_msg_fds:
1473         close_msg_fds(msg);
1474         return RTE_VHOST_MSG_RESULT_ERR;
1475 }
1476
1477 static bool
1478 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1479 {
1480         bool rings_ok;
1481
1482         if (!vq)
1483                 return false;
1484
1485         if (vq_is_packed(dev))
1486                 rings_ok = vq->desc_packed && vq->driver_event &&
1487                         vq->device_event;
1488         else
1489                 rings_ok = vq->desc && vq->avail && vq->used;
1490
1491         return rings_ok &&
1492                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1493                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1494                vq->enabled;
1495 }
1496
1497 #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u
1498
1499 static int
1500 virtio_is_ready(struct virtio_net *dev)
1501 {
1502         struct vhost_virtqueue *vq;
1503         uint32_t i, nr_vring = dev->nr_vring;
1504
1505         if (dev->flags & VIRTIO_DEV_READY)
1506                 return 1;
1507
1508         if (!dev->nr_vring)
1509                 return 0;
1510
1511         if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) {
1512                 nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY;
1513
1514                 if (dev->nr_vring < nr_vring)
1515                         return 0;
1516         }
1517
1518         for (i = 0; i < nr_vring; i++) {
1519                 vq = dev->virtqueue[i];
1520
1521                 if (!vq_is_ready(dev, vq))
1522                         return 0;
1523         }
1524
1525         /* If supported, ensure the frontend is really done with config */
1526         if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
1527                 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK))
1528                         return 0;
1529
1530         dev->flags |= VIRTIO_DEV_READY;
1531
1532         if (!(dev->flags & VIRTIO_DEV_RUNNING))
1533                 VHOST_LOG_CONFIG(INFO,
1534                         "virtio is now ready for processing.\n");
1535         return 1;
1536 }
1537
1538 static void *
1539 inflight_mem_alloc(const char *name, size_t size, int *fd)
1540 {
1541         void *ptr;
1542         int mfd = -1;
1543         char fname[20] = "/tmp/memfd-XXXXXX";
1544
1545         *fd = -1;
1546 #ifdef MEMFD_SUPPORTED
1547         mfd = memfd_create(name, MFD_CLOEXEC);
1548 #else
1549         RTE_SET_USED(name);
1550 #endif
1551         if (mfd == -1) {
1552                 mfd = mkstemp(fname);
1553                 if (mfd == -1) {
1554                         VHOST_LOG_CONFIG(ERR,
1555                                 "failed to get inflight buffer fd\n");
1556                         return NULL;
1557                 }
1558
1559                 unlink(fname);
1560         }
1561
1562         if (ftruncate(mfd, size) == -1) {
1563                 VHOST_LOG_CONFIG(ERR,
1564                         "failed to alloc inflight buffer\n");
1565                 close(mfd);
1566                 return NULL;
1567         }
1568
1569         ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
1570         if (ptr == MAP_FAILED) {
1571                 VHOST_LOG_CONFIG(ERR,
1572                         "failed to mmap inflight buffer\n");
1573                 close(mfd);
1574                 return NULL;
1575         }
1576
1577         *fd = mfd;
1578         return ptr;
1579 }
1580
1581 static uint32_t
1582 get_pervq_shm_size_split(uint16_t queue_size)
1583 {
1584         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
1585                                   queue_size + sizeof(uint64_t) +
1586                                   sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
1587 }
1588
1589 static uint32_t
1590 get_pervq_shm_size_packed(uint16_t queue_size)
1591 {
1592         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
1593                                   * queue_size + sizeof(uint64_t) +
1594                                   sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
1595                                   INFLIGHT_ALIGNMENT);
1596 }
1597
1598 static int
1599 vhost_user_get_inflight_fd(struct virtio_net **pdev,
1600                            VhostUserMsg *msg,
1601                            int main_fd __rte_unused)
1602 {
1603         struct rte_vhost_inflight_info_packed *inflight_packed;
1604         uint64_t pervq_inflight_size, mmap_size;
1605         uint16_t num_queues, queue_size;
1606         struct virtio_net *dev = *pdev;
1607         int fd, i, j;
1608         int numa_node = SOCKET_ID_ANY;
1609         void *addr;
1610
1611         if (msg->size != sizeof(msg->payload.inflight)) {
1612                 VHOST_LOG_CONFIG(ERR,
1613                         "invalid get_inflight_fd message size is %d\n",
1614                         msg->size);
1615                 return RTE_VHOST_MSG_RESULT_ERR;
1616         }
1617
1618         /*
1619          * If VQ 0 has already been allocated, try to allocate on the same
1620          * NUMA node. It can be reallocated later in numa_realloc().
1621          */
1622         if (dev->nr_vring > 0)
1623                 numa_node = dev->virtqueue[0]->numa_node;
1624
1625         if (dev->inflight_info == NULL) {
1626                 dev->inflight_info = rte_zmalloc_socket("inflight_info",
1627                                 sizeof(struct inflight_mem_info), 0, numa_node);
1628                 if (!dev->inflight_info) {
1629                         VHOST_LOG_CONFIG(ERR,
1630                                 "failed to alloc dev inflight area\n");
1631                         return RTE_VHOST_MSG_RESULT_ERR;
1632                 }
1633                 dev->inflight_info->fd = -1;
1634         }
1635
1636         num_queues = msg->payload.inflight.num_queues;
1637         queue_size = msg->payload.inflight.queue_size;
1638
1639         VHOST_LOG_CONFIG(INFO, "get_inflight_fd num_queues: %u\n",
1640                 msg->payload.inflight.num_queues);
1641         VHOST_LOG_CONFIG(INFO, "get_inflight_fd queue_size: %u\n",
1642                 msg->payload.inflight.queue_size);
1643
1644         if (vq_is_packed(dev))
1645                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1646         else
1647                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1648
1649         mmap_size = num_queues * pervq_inflight_size;
1650         addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
1651         if (!addr) {
1652                 VHOST_LOG_CONFIG(ERR,
1653                         "failed to alloc vhost inflight area\n");
1654                         msg->payload.inflight.mmap_size = 0;
1655                 return RTE_VHOST_MSG_RESULT_ERR;
1656         }
1657         memset(addr, 0, mmap_size);
1658
1659         if (dev->inflight_info->addr) {
1660                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1661                 dev->inflight_info->addr = NULL;
1662         }
1663
1664         if (dev->inflight_info->fd >= 0) {
1665                 close(dev->inflight_info->fd);
1666                 dev->inflight_info->fd = -1;
1667         }
1668
1669         dev->inflight_info->addr = addr;
1670         dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
1671         dev->inflight_info->fd = msg->fds[0] = fd;
1672         msg->payload.inflight.mmap_offset = 0;
1673         msg->fd_num = 1;
1674
1675         if (vq_is_packed(dev)) {
1676                 for (i = 0; i < num_queues; i++) {
1677                         inflight_packed =
1678                                 (struct rte_vhost_inflight_info_packed *)addr;
1679                         inflight_packed->used_wrap_counter = 1;
1680                         inflight_packed->old_used_wrap_counter = 1;
1681                         for (j = 0; j < queue_size; j++)
1682                                 inflight_packed->desc[j].next = j + 1;
1683                         addr = (void *)((char *)addr + pervq_inflight_size);
1684                 }
1685         }
1686
1687         VHOST_LOG_CONFIG(INFO,
1688                 "send inflight mmap_size: %"PRIu64"\n",
1689                 msg->payload.inflight.mmap_size);
1690         VHOST_LOG_CONFIG(INFO,
1691                 "send inflight mmap_offset: %"PRIu64"\n",
1692                 msg->payload.inflight.mmap_offset);
1693         VHOST_LOG_CONFIG(INFO,
1694                 "send inflight fd: %d\n", msg->fds[0]);
1695
1696         return RTE_VHOST_MSG_RESULT_REPLY;
1697 }
1698
1699 static int
1700 vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
1701                            int main_fd __rte_unused)
1702 {
1703         uint64_t mmap_size, mmap_offset;
1704         uint16_t num_queues, queue_size;
1705         struct virtio_net *dev = *pdev;
1706         uint32_t pervq_inflight_size;
1707         struct vhost_virtqueue *vq;
1708         void *addr;
1709         int fd, i;
1710         int numa_node = SOCKET_ID_ANY;
1711
1712         fd = msg->fds[0];
1713         if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
1714                 VHOST_LOG_CONFIG(ERR,
1715                         "invalid set_inflight_fd message size is %d,fd is %d\n",
1716                         msg->size, fd);
1717                 return RTE_VHOST_MSG_RESULT_ERR;
1718         }
1719
1720         mmap_size = msg->payload.inflight.mmap_size;
1721         mmap_offset = msg->payload.inflight.mmap_offset;
1722         num_queues = msg->payload.inflight.num_queues;
1723         queue_size = msg->payload.inflight.queue_size;
1724
1725         if (vq_is_packed(dev))
1726                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1727         else
1728                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1729
1730         VHOST_LOG_CONFIG(INFO,
1731                 "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
1732         VHOST_LOG_CONFIG(INFO,
1733                 "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
1734         VHOST_LOG_CONFIG(INFO,
1735                 "set_inflight_fd num_queues: %u\n", num_queues);
1736         VHOST_LOG_CONFIG(INFO,
1737                 "set_inflight_fd queue_size: %u\n", queue_size);
1738         VHOST_LOG_CONFIG(INFO,
1739                 "set_inflight_fd fd: %d\n", fd);
1740         VHOST_LOG_CONFIG(INFO,
1741                 "set_inflight_fd pervq_inflight_size: %d\n",
1742                 pervq_inflight_size);
1743
1744         /*
1745          * If VQ 0 has already been allocated, try to allocate on the same
1746          * NUMA node. It can be reallocated later in numa_realloc().
1747          */
1748         if (dev->nr_vring > 0)
1749                 numa_node = dev->virtqueue[0]->numa_node;
1750
1751         if (!dev->inflight_info) {
1752                 dev->inflight_info = rte_zmalloc_socket("inflight_info",
1753                                 sizeof(struct inflight_mem_info), 0, numa_node);
1754                 if (dev->inflight_info == NULL) {
1755                         VHOST_LOG_CONFIG(ERR,
1756                                 "failed to alloc dev inflight area\n");
1757                         return RTE_VHOST_MSG_RESULT_ERR;
1758                 }
1759                 dev->inflight_info->fd = -1;
1760         }
1761
1762         if (dev->inflight_info->addr) {
1763                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1764                 dev->inflight_info->addr = NULL;
1765         }
1766
1767         addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1768                     fd, mmap_offset);
1769         if (addr == MAP_FAILED) {
1770                 VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
1771                 return RTE_VHOST_MSG_RESULT_ERR;
1772         }
1773
1774         if (dev->inflight_info->fd >= 0) {
1775                 close(dev->inflight_info->fd);
1776                 dev->inflight_info->fd = -1;
1777         }
1778
1779         dev->inflight_info->fd = fd;
1780         dev->inflight_info->addr = addr;
1781         dev->inflight_info->size = mmap_size;
1782
1783         for (i = 0; i < num_queues; i++) {
1784                 vq = dev->virtqueue[i];
1785                 if (!vq)
1786                         continue;
1787
1788                 if (vq_is_packed(dev)) {
1789                         vq->inflight_packed = addr;
1790                         vq->inflight_packed->desc_num = queue_size;
1791                 } else {
1792                         vq->inflight_split = addr;
1793                         vq->inflight_split->desc_num = queue_size;
1794                 }
1795                 addr = (void *)((char *)addr + pervq_inflight_size);
1796         }
1797
1798         return RTE_VHOST_MSG_RESULT_OK;
1799 }
1800
1801 static int
1802 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1803                         int main_fd __rte_unused)
1804 {
1805         struct virtio_net *dev = *pdev;
1806         struct vhost_vring_file file;
1807         struct vhost_virtqueue *vq;
1808         int expected_fds;
1809
1810         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1811         if (validate_msg_fds(msg, expected_fds) != 0)
1812                 return RTE_VHOST_MSG_RESULT_ERR;
1813
1814         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1815         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1816                 file.fd = VIRTIO_INVALID_EVENTFD;
1817         else
1818                 file.fd = msg->fds[0];
1819         VHOST_LOG_CONFIG(INFO,
1820                 "vring call idx:%d file:%d\n", file.index, file.fd);
1821
1822         vq = dev->virtqueue[file.index];
1823
1824         if (vq->ready) {
1825                 vq->ready = false;
1826                 vhost_user_notify_queue_state(dev, file.index, 0);
1827         }
1828
1829         if (vq->callfd >= 0)
1830                 close(vq->callfd);
1831
1832         vq->callfd = file.fd;
1833
1834         return RTE_VHOST_MSG_RESULT_OK;
1835 }
1836
1837 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1838                         struct VhostUserMsg *msg,
1839                         int main_fd __rte_unused)
1840 {
1841         int expected_fds;
1842
1843         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1844         if (validate_msg_fds(msg, expected_fds) != 0)
1845                 return RTE_VHOST_MSG_RESULT_ERR;
1846
1847         if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1848                 close(msg->fds[0]);
1849         VHOST_LOG_CONFIG(INFO, "not implemented\n");
1850
1851         return RTE_VHOST_MSG_RESULT_OK;
1852 }
1853
1854 static int
1855 resubmit_desc_compare(const void *a, const void *b)
1856 {
1857         const struct rte_vhost_resubmit_desc *desc0 = a;
1858         const struct rte_vhost_resubmit_desc *desc1 = b;
1859
1860         if (desc1->counter > desc0->counter)
1861                 return 1;
1862
1863         return -1;
1864 }
1865
1866 static int
1867 vhost_check_queue_inflights_split(struct virtio_net *dev,
1868                                   struct vhost_virtqueue *vq)
1869 {
1870         uint16_t i;
1871         uint16_t resubmit_num = 0, last_io, num;
1872         struct vring_used *used = vq->used;
1873         struct rte_vhost_resubmit_info *resubmit;
1874         struct rte_vhost_inflight_info_split *inflight_split;
1875
1876         if (!(dev->protocol_features &
1877             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1878                 return RTE_VHOST_MSG_RESULT_OK;
1879
1880         /* The frontend may still not support the inflight feature
1881          * although we negotiate the protocol feature.
1882          */
1883         if ((!vq->inflight_split))
1884                 return RTE_VHOST_MSG_RESULT_OK;
1885
1886         if (!vq->inflight_split->version) {
1887                 vq->inflight_split->version = INFLIGHT_VERSION;
1888                 return RTE_VHOST_MSG_RESULT_OK;
1889         }
1890
1891         if (vq->resubmit_inflight)
1892                 return RTE_VHOST_MSG_RESULT_OK;
1893
1894         inflight_split = vq->inflight_split;
1895         vq->global_counter = 0;
1896         last_io = inflight_split->last_inflight_io;
1897
1898         if (inflight_split->used_idx != used->idx) {
1899                 inflight_split->desc[last_io].inflight = 0;
1900                 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1901                 inflight_split->used_idx = used->idx;
1902         }
1903
1904         for (i = 0; i < inflight_split->desc_num; i++) {
1905                 if (inflight_split->desc[i].inflight == 1)
1906                         resubmit_num++;
1907         }
1908
1909         vq->last_avail_idx += resubmit_num;
1910
1911         if (resubmit_num) {
1912                 resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
1913                                 0, vq->numa_node);
1914                 if (!resubmit) {
1915                         VHOST_LOG_CONFIG(ERR,
1916                                 "failed to allocate memory for resubmit info.\n");
1917                         return RTE_VHOST_MSG_RESULT_ERR;
1918                 }
1919
1920                 resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
1921                                 resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
1922                                 0, vq->numa_node);
1923                 if (!resubmit->resubmit_list) {
1924                         VHOST_LOG_CONFIG(ERR,
1925                                 "failed to allocate memory for inflight desc.\n");
1926                         rte_free(resubmit);
1927                         return RTE_VHOST_MSG_RESULT_ERR;
1928                 }
1929
1930                 num = 0;
1931                 for (i = 0; i < vq->inflight_split->desc_num; i++) {
1932                         if (vq->inflight_split->desc[i].inflight == 1) {
1933                                 resubmit->resubmit_list[num].index = i;
1934                                 resubmit->resubmit_list[num].counter =
1935                                         inflight_split->desc[i].counter;
1936                                 num++;
1937                         }
1938                 }
1939                 resubmit->resubmit_num = num;
1940
1941                 if (resubmit->resubmit_num > 1)
1942                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1943                               sizeof(struct rte_vhost_resubmit_desc),
1944                               resubmit_desc_compare);
1945
1946                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1947                 vq->resubmit_inflight = resubmit;
1948         }
1949
1950         return RTE_VHOST_MSG_RESULT_OK;
1951 }
1952
1953 static int
1954 vhost_check_queue_inflights_packed(struct virtio_net *dev,
1955                                    struct vhost_virtqueue *vq)
1956 {
1957         uint16_t i;
1958         uint16_t resubmit_num = 0, old_used_idx, num;
1959         struct rte_vhost_resubmit_info *resubmit;
1960         struct rte_vhost_inflight_info_packed *inflight_packed;
1961
1962         if (!(dev->protocol_features &
1963             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1964                 return RTE_VHOST_MSG_RESULT_OK;
1965
1966         /* The frontend may still not support the inflight feature
1967          * although we negotiate the protocol feature.
1968          */
1969         if ((!vq->inflight_packed))
1970                 return RTE_VHOST_MSG_RESULT_OK;
1971
1972         if (!vq->inflight_packed->version) {
1973                 vq->inflight_packed->version = INFLIGHT_VERSION;
1974                 return RTE_VHOST_MSG_RESULT_OK;
1975         }
1976
1977         if (vq->resubmit_inflight)
1978                 return RTE_VHOST_MSG_RESULT_OK;
1979
1980         inflight_packed = vq->inflight_packed;
1981         vq->global_counter = 0;
1982         old_used_idx = inflight_packed->old_used_idx;
1983
1984         if (inflight_packed->used_idx != old_used_idx) {
1985                 if (inflight_packed->desc[old_used_idx].inflight == 0) {
1986                         inflight_packed->old_used_idx =
1987                                 inflight_packed->used_idx;
1988                         inflight_packed->old_used_wrap_counter =
1989                                 inflight_packed->used_wrap_counter;
1990                         inflight_packed->old_free_head =
1991                                 inflight_packed->free_head;
1992                 } else {
1993                         inflight_packed->used_idx =
1994                                 inflight_packed->old_used_idx;
1995                         inflight_packed->used_wrap_counter =
1996                                 inflight_packed->old_used_wrap_counter;
1997                         inflight_packed->free_head =
1998                                 inflight_packed->old_free_head;
1999                 }
2000         }
2001
2002         for (i = 0; i < inflight_packed->desc_num; i++) {
2003                 if (inflight_packed->desc[i].inflight == 1)
2004                         resubmit_num++;
2005         }
2006
2007         if (resubmit_num) {
2008                 resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
2009                                 0, vq->numa_node);
2010                 if (resubmit == NULL) {
2011                         VHOST_LOG_CONFIG(ERR,
2012                                 "failed to allocate memory for resubmit info.\n");
2013                         return RTE_VHOST_MSG_RESULT_ERR;
2014                 }
2015
2016                 resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
2017                                 resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
2018                                 0, vq->numa_node);
2019                 if (resubmit->resubmit_list == NULL) {
2020                         VHOST_LOG_CONFIG(ERR,
2021                                 "failed to allocate memory for resubmit desc.\n");
2022                         rte_free(resubmit);
2023                         return RTE_VHOST_MSG_RESULT_ERR;
2024                 }
2025
2026                 num = 0;
2027                 for (i = 0; i < inflight_packed->desc_num; i++) {
2028                         if (vq->inflight_packed->desc[i].inflight == 1) {
2029                                 resubmit->resubmit_list[num].index = i;
2030                                 resubmit->resubmit_list[num].counter =
2031                                         inflight_packed->desc[i].counter;
2032                                 num++;
2033                         }
2034                 }
2035                 resubmit->resubmit_num = num;
2036
2037                 if (resubmit->resubmit_num > 1)
2038                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
2039                               sizeof(struct rte_vhost_resubmit_desc),
2040                               resubmit_desc_compare);
2041
2042                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
2043                 vq->resubmit_inflight = resubmit;
2044         }
2045
2046         return RTE_VHOST_MSG_RESULT_OK;
2047 }
2048
2049 static int
2050 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
2051                         int main_fd __rte_unused)
2052 {
2053         struct virtio_net *dev = *pdev;
2054         struct vhost_vring_file file;
2055         struct vhost_virtqueue *vq;
2056         int expected_fds;
2057
2058         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
2059         if (validate_msg_fds(msg, expected_fds) != 0)
2060                 return RTE_VHOST_MSG_RESULT_ERR;
2061
2062         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2063         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
2064                 file.fd = VIRTIO_INVALID_EVENTFD;
2065         else
2066                 file.fd = msg->fds[0];
2067         VHOST_LOG_CONFIG(INFO,
2068                 "vring kick idx:%d file:%d\n", file.index, file.fd);
2069
2070         /* Interpret ring addresses only when ring is started. */
2071         dev = translate_ring_addresses(dev, file.index);
2072         if (!dev) {
2073                 if (file.fd != VIRTIO_INVALID_EVENTFD)
2074                         close(file.fd);
2075
2076                 return RTE_VHOST_MSG_RESULT_ERR;
2077         }
2078
2079         *pdev = dev;
2080
2081         vq = dev->virtqueue[file.index];
2082
2083         /*
2084          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
2085          * the ring starts already enabled. Otherwise, it is enabled via
2086          * the SET_VRING_ENABLE message.
2087          */
2088         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
2089                 vq->enabled = true;
2090         }
2091
2092         if (vq->ready) {
2093                 vq->ready = false;
2094                 vhost_user_notify_queue_state(dev, file.index, 0);
2095         }
2096
2097         if (vq->kickfd >= 0)
2098                 close(vq->kickfd);
2099         vq->kickfd = file.fd;
2100
2101         if (vq_is_packed(dev)) {
2102                 if (vhost_check_queue_inflights_packed(dev, vq)) {
2103                         VHOST_LOG_CONFIG(ERR,
2104                                 "failed to inflights for vq: %d\n", file.index);
2105                         return RTE_VHOST_MSG_RESULT_ERR;
2106                 }
2107         } else {
2108                 if (vhost_check_queue_inflights_split(dev, vq)) {
2109                         VHOST_LOG_CONFIG(ERR,
2110                                 "failed to inflights for vq: %d\n", file.index);
2111                         return RTE_VHOST_MSG_RESULT_ERR;
2112                 }
2113         }
2114
2115         return RTE_VHOST_MSG_RESULT_OK;
2116 }
2117
2118 /*
2119  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
2120  */
2121 static int
2122 vhost_user_get_vring_base(struct virtio_net **pdev,
2123                         struct VhostUserMsg *msg,
2124                         int main_fd __rte_unused)
2125 {
2126         struct virtio_net *dev = *pdev;
2127         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
2128         uint64_t val;
2129
2130         if (validate_msg_fds(msg, 0) != 0)
2131                 return RTE_VHOST_MSG_RESULT_ERR;
2132
2133         /* We have to stop the queue (virtio) if it is running. */
2134         vhost_destroy_device_notify(dev);
2135
2136         dev->flags &= ~VIRTIO_DEV_READY;
2137         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
2138
2139         /* Here we are safe to get the indexes */
2140         if (vq_is_packed(dev)) {
2141                 /*
2142                  * Bit[0:14]: avail index
2143                  * Bit[15]: avail wrap counter
2144                  */
2145                 val = vq->last_avail_idx & 0x7fff;
2146                 val |= vq->avail_wrap_counter << 15;
2147                 msg->payload.state.num = val;
2148         } else {
2149                 msg->payload.state.num = vq->last_avail_idx;
2150         }
2151
2152         VHOST_LOG_CONFIG(INFO,
2153                 "vring base idx:%d file:%d\n", msg->payload.state.index,
2154                 msg->payload.state.num);
2155         /*
2156          * Based on current qemu vhost-user implementation, this message is
2157          * sent and only sent in vhost_vring_stop.
2158          * TODO: cleanup the vring, it isn't usable since here.
2159          */
2160         if (vq->kickfd >= 0)
2161                 close(vq->kickfd);
2162
2163         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
2164
2165         if (vq->callfd >= 0)
2166                 close(vq->callfd);
2167
2168         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
2169
2170         vq->signalled_used_valid = false;
2171
2172         if (vq_is_packed(dev)) {
2173                 rte_free(vq->shadow_used_packed);
2174                 vq->shadow_used_packed = NULL;
2175         } else {
2176                 rte_free(vq->shadow_used_split);
2177                 vq->shadow_used_split = NULL;
2178         }
2179
2180         rte_free(vq->batch_copy_elems);
2181         vq->batch_copy_elems = NULL;
2182
2183         rte_free(vq->log_cache);
2184         vq->log_cache = NULL;
2185
2186         msg->size = sizeof(msg->payload.state);
2187         msg->fd_num = 0;
2188
2189         vhost_user_iotlb_flush_all(vq);
2190
2191         vring_invalidate(dev, vq);
2192
2193         return RTE_VHOST_MSG_RESULT_REPLY;
2194 }
2195
2196 /*
2197  * when virtio queues are ready to work, qemu will send us to
2198  * enable the virtio queue pair.
2199  */
2200 static int
2201 vhost_user_set_vring_enable(struct virtio_net **pdev,
2202                         struct VhostUserMsg *msg,
2203                         int main_fd __rte_unused)
2204 {
2205         struct virtio_net *dev = *pdev;
2206         bool enable = !!msg->payload.state.num;
2207         int index = (int)msg->payload.state.index;
2208
2209         if (validate_msg_fds(msg, 0) != 0)
2210                 return RTE_VHOST_MSG_RESULT_ERR;
2211
2212         VHOST_LOG_CONFIG(INFO,
2213                 "set queue enable: %d to qp idx: %d\n",
2214                 enable, index);
2215
2216         if (enable && dev->virtqueue[index]->async) {
2217                 if (dev->virtqueue[index]->async->pkts_inflight_n) {
2218                         VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
2219                         "async inflight packets must be completed first\n");
2220                         return RTE_VHOST_MSG_RESULT_ERR;
2221                 }
2222         }
2223
2224         dev->virtqueue[index]->enabled = enable;
2225
2226         return RTE_VHOST_MSG_RESULT_OK;
2227 }
2228
2229 static int
2230 vhost_user_get_protocol_features(struct virtio_net **pdev,
2231                         struct VhostUserMsg *msg,
2232                         int main_fd __rte_unused)
2233 {
2234         struct virtio_net *dev = *pdev;
2235         uint64_t features, protocol_features;
2236
2237         if (validate_msg_fds(msg, 0) != 0)
2238                 return RTE_VHOST_MSG_RESULT_ERR;
2239
2240         rte_vhost_driver_get_features(dev->ifname, &features);
2241         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2242
2243         msg->payload.u64 = protocol_features;
2244         msg->size = sizeof(msg->payload.u64);
2245         msg->fd_num = 0;
2246
2247         return RTE_VHOST_MSG_RESULT_REPLY;
2248 }
2249
2250 static int
2251 vhost_user_set_protocol_features(struct virtio_net **pdev,
2252                         struct VhostUserMsg *msg,
2253                         int main_fd __rte_unused)
2254 {
2255         struct virtio_net *dev = *pdev;
2256         uint64_t protocol_features = msg->payload.u64;
2257         uint64_t slave_protocol_features = 0;
2258
2259         if (validate_msg_fds(msg, 0) != 0)
2260                 return RTE_VHOST_MSG_RESULT_ERR;
2261
2262         rte_vhost_driver_get_protocol_features(dev->ifname,
2263                         &slave_protocol_features);
2264         if (protocol_features & ~slave_protocol_features) {
2265                 VHOST_LOG_CONFIG(ERR,
2266                         "(%d) received invalid protocol features.\n",
2267                         dev->vid);
2268                 return RTE_VHOST_MSG_RESULT_ERR;
2269         }
2270
2271         dev->protocol_features = protocol_features;
2272         VHOST_LOG_CONFIG(INFO,
2273                 "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
2274                 dev->protocol_features);
2275
2276         return RTE_VHOST_MSG_RESULT_OK;
2277 }
2278
2279 static int
2280 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
2281                         int main_fd __rte_unused)
2282 {
2283         struct virtio_net *dev = *pdev;
2284         int fd = msg->fds[0];
2285         uint64_t size, off;
2286         void *addr;
2287         uint32_t i;
2288
2289         if (validate_msg_fds(msg, 1) != 0)
2290                 return RTE_VHOST_MSG_RESULT_ERR;
2291
2292         if (fd < 0) {
2293                 VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
2294                 return RTE_VHOST_MSG_RESULT_ERR;
2295         }
2296
2297         if (msg->size != sizeof(VhostUserLog)) {
2298                 VHOST_LOG_CONFIG(ERR,
2299                         "invalid log base msg size: %"PRId32" != %d\n",
2300                         msg->size, (int)sizeof(VhostUserLog));
2301                 goto close_msg_fds;
2302         }
2303
2304         size = msg->payload.log.mmap_size;
2305         off  = msg->payload.log.mmap_offset;
2306
2307         /* Check for mmap size and offset overflow. */
2308         if (off >= -size) {
2309                 VHOST_LOG_CONFIG(ERR,
2310                         "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
2311                         off, size);
2312                 goto close_msg_fds;
2313         }
2314
2315         VHOST_LOG_CONFIG(INFO,
2316                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
2317                 size, off);
2318
2319         /*
2320          * mmap from 0 to workaround a hugepage mmap bug: mmap will
2321          * fail when offset is not page size aligned.
2322          */
2323         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2324         close(fd);
2325         if (addr == MAP_FAILED) {
2326                 VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
2327                 return RTE_VHOST_MSG_RESULT_ERR;
2328         }
2329
2330         /*
2331          * Free previously mapped log memory on occasionally
2332          * multiple VHOST_USER_SET_LOG_BASE.
2333          */
2334         if (dev->log_addr) {
2335                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2336         }
2337         dev->log_addr = (uint64_t)(uintptr_t)addr;
2338         dev->log_base = dev->log_addr + off;
2339         dev->log_size = size;
2340
2341         for (i = 0; i < dev->nr_vring; i++) {
2342                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2343
2344                 rte_free(vq->log_cache);
2345                 vq->log_cache = NULL;
2346                 vq->log_cache_nb_elem = 0;
2347                 vq->log_cache = rte_malloc_socket("vq log cache",
2348                                 sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,
2349                                 0, vq->numa_node);
2350                 /*
2351                  * If log cache alloc fail, don't fail migration, but no
2352                  * caching will be done, which will impact performance
2353                  */
2354                 if (!vq->log_cache)
2355                         VHOST_LOG_CONFIG(ERR, "Failed to allocate VQ logging cache\n");
2356         }
2357
2358         /*
2359          * The spec is not clear about it (yet), but QEMU doesn't expect
2360          * any payload in the reply.
2361          */
2362         msg->size = 0;
2363         msg->fd_num = 0;
2364
2365         return RTE_VHOST_MSG_RESULT_REPLY;
2366
2367 close_msg_fds:
2368         close_msg_fds(msg);
2369         return RTE_VHOST_MSG_RESULT_ERR;
2370 }
2371
2372 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
2373                         struct VhostUserMsg *msg,
2374                         int main_fd __rte_unused)
2375 {
2376         if (validate_msg_fds(msg, 1) != 0)
2377                 return RTE_VHOST_MSG_RESULT_ERR;
2378
2379         close(msg->fds[0]);
2380         VHOST_LOG_CONFIG(INFO, "not implemented.\n");
2381
2382         return RTE_VHOST_MSG_RESULT_OK;
2383 }
2384
2385 /*
2386  * An rarp packet is constructed and broadcasted to notify switches about
2387  * the new location of the migrated VM, so that packets from outside will
2388  * not be lost after migration.
2389  *
2390  * However, we don't actually "send" a rarp packet here, instead, we set
2391  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
2392  */
2393 static int
2394 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
2395                         int main_fd __rte_unused)
2396 {
2397         struct virtio_net *dev = *pdev;
2398         uint8_t *mac = (uint8_t *)&msg->payload.u64;
2399         struct rte_vdpa_device *vdpa_dev;
2400
2401         if (validate_msg_fds(msg, 0) != 0)
2402                 return RTE_VHOST_MSG_RESULT_ERR;
2403
2404         VHOST_LOG_CONFIG(DEBUG,
2405                 ":: mac: " RTE_ETHER_ADDR_PRT_FMT "\n",
2406                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2407         memcpy(dev->mac.addr_bytes, mac, 6);
2408
2409         /*
2410          * Set the flag to inject a RARP broadcast packet at
2411          * rte_vhost_dequeue_burst().
2412          *
2413          * __ATOMIC_RELEASE ordering is for making sure the mac is
2414          * copied before the flag is set.
2415          */
2416         __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
2417         vdpa_dev = dev->vdpa_dev;
2418         if (vdpa_dev && vdpa_dev->ops->migration_done)
2419                 vdpa_dev->ops->migration_done(dev->vid);
2420
2421         return RTE_VHOST_MSG_RESULT_OK;
2422 }
2423
2424 static int
2425 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
2426                         int main_fd __rte_unused)
2427 {
2428         struct virtio_net *dev = *pdev;
2429
2430         if (validate_msg_fds(msg, 0) != 0)
2431                 return RTE_VHOST_MSG_RESULT_ERR;
2432
2433         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
2434                         msg->payload.u64 > VIRTIO_MAX_MTU) {
2435                 VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
2436                                 msg->payload.u64);
2437
2438                 return RTE_VHOST_MSG_RESULT_ERR;
2439         }
2440
2441         dev->mtu = msg->payload.u64;
2442
2443         return RTE_VHOST_MSG_RESULT_OK;
2444 }
2445
2446 static int
2447 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
2448                         int main_fd __rte_unused)
2449 {
2450         struct virtio_net *dev = *pdev;
2451         int fd = msg->fds[0];
2452
2453         if (validate_msg_fds(msg, 1) != 0)
2454                 return RTE_VHOST_MSG_RESULT_ERR;
2455
2456         if (fd < 0) {
2457                 VHOST_LOG_CONFIG(ERR,
2458                                 "Invalid file descriptor for slave channel (%d)\n",
2459                                 fd);
2460                 return RTE_VHOST_MSG_RESULT_ERR;
2461         }
2462
2463         if (dev->slave_req_fd >= 0)
2464                 close(dev->slave_req_fd);
2465
2466         dev->slave_req_fd = fd;
2467
2468         return RTE_VHOST_MSG_RESULT_OK;
2469 }
2470
2471 static int
2472 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2473 {
2474         struct vhost_vring_addr *ra;
2475         uint64_t start, end, len;
2476
2477         start = imsg->iova;
2478         end = start + imsg->size;
2479
2480         ra = &vq->ring_addrs;
2481         len = sizeof(struct vring_desc) * vq->size;
2482         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2483                 return 1;
2484
2485         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2486         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2487                 return 1;
2488
2489         len = sizeof(struct vring_used) +
2490                sizeof(struct vring_used_elem) * vq->size;
2491         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2492                 return 1;
2493
2494         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2495                 len = sizeof(uint64_t);
2496                 if (ra->log_guest_addr < end &&
2497                     (ra->log_guest_addr + len) > start)
2498                         return 1;
2499         }
2500
2501         return 0;
2502 }
2503
2504 static int
2505 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2506 {
2507         struct vhost_vring_addr *ra;
2508         uint64_t start, end, len;
2509
2510         start = imsg->iova;
2511         end = start + imsg->size;
2512
2513         ra = &vq->ring_addrs;
2514         len = sizeof(struct vring_packed_desc) * vq->size;
2515         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2516                 return 1;
2517
2518         len = sizeof(struct vring_packed_desc_event);
2519         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2520                 return 1;
2521
2522         len = sizeof(struct vring_packed_desc_event);
2523         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2524                 return 1;
2525
2526         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2527                 len = sizeof(uint64_t);
2528                 if (ra->log_guest_addr < end &&
2529                     (ra->log_guest_addr + len) > start)
2530                         return 1;
2531         }
2532
2533         return 0;
2534 }
2535
2536 static int is_vring_iotlb(struct virtio_net *dev,
2537                           struct vhost_virtqueue *vq,
2538                           struct vhost_iotlb_msg *imsg)
2539 {
2540         if (vq_is_packed(dev))
2541                 return is_vring_iotlb_packed(vq, imsg);
2542         else
2543                 return is_vring_iotlb_split(vq, imsg);
2544 }
2545
2546 static int
2547 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
2548                         int main_fd __rte_unused)
2549 {
2550         struct virtio_net *dev = *pdev;
2551         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
2552         uint16_t i;
2553         uint64_t vva, len;
2554
2555         if (validate_msg_fds(msg, 0) != 0)
2556                 return RTE_VHOST_MSG_RESULT_ERR;
2557
2558         switch (imsg->type) {
2559         case VHOST_IOTLB_UPDATE:
2560                 len = imsg->size;
2561                 vva = qva_to_vva(dev, imsg->uaddr, &len);
2562                 if (!vva)
2563                         return RTE_VHOST_MSG_RESULT_ERR;
2564
2565                 for (i = 0; i < dev->nr_vring; i++) {
2566                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2567
2568                         if (!vq)
2569                                 continue;
2570
2571                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
2572                                         len, imsg->perm);
2573
2574                         if (is_vring_iotlb(dev, vq, imsg))
2575                                 *pdev = dev = translate_ring_addresses(dev, i);
2576                 }
2577                 break;
2578         case VHOST_IOTLB_INVALIDATE:
2579                 for (i = 0; i < dev->nr_vring; i++) {
2580                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2581
2582                         if (!vq)
2583                                 continue;
2584
2585                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
2586                                         imsg->size);
2587
2588                         if (is_vring_iotlb(dev, vq, imsg))
2589                                 vring_invalidate(dev, vq);
2590                 }
2591                 break;
2592         default:
2593                 VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
2594                                 imsg->type);
2595                 return RTE_VHOST_MSG_RESULT_ERR;
2596         }
2597
2598         return RTE_VHOST_MSG_RESULT_OK;
2599 }
2600
2601 static int
2602 vhost_user_set_postcopy_advise(struct virtio_net **pdev,
2603                         struct VhostUserMsg *msg,
2604                         int main_fd __rte_unused)
2605 {
2606         struct virtio_net *dev = *pdev;
2607 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
2608         struct uffdio_api api_struct;
2609
2610         if (validate_msg_fds(msg, 0) != 0)
2611                 return RTE_VHOST_MSG_RESULT_ERR;
2612
2613         dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2614
2615         if (dev->postcopy_ufd == -1) {
2616                 VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
2617                         strerror(errno));
2618                 return RTE_VHOST_MSG_RESULT_ERR;
2619         }
2620         api_struct.api = UFFD_API;
2621         api_struct.features = 0;
2622         if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2623                 VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
2624                         strerror(errno));
2625                 close(dev->postcopy_ufd);
2626                 dev->postcopy_ufd = -1;
2627                 return RTE_VHOST_MSG_RESULT_ERR;
2628         }
2629         msg->fds[0] = dev->postcopy_ufd;
2630         msg->fd_num = 1;
2631
2632         return RTE_VHOST_MSG_RESULT_REPLY;
2633 #else
2634         dev->postcopy_ufd = -1;
2635         msg->fd_num = 0;
2636
2637         return RTE_VHOST_MSG_RESULT_ERR;
2638 #endif
2639 }
2640
2641 static int
2642 vhost_user_set_postcopy_listen(struct virtio_net **pdev,
2643                         struct VhostUserMsg *msg __rte_unused,
2644                         int main_fd __rte_unused)
2645 {
2646         struct virtio_net *dev = *pdev;
2647
2648         if (validate_msg_fds(msg, 0) != 0)
2649                 return RTE_VHOST_MSG_RESULT_ERR;
2650
2651         if (dev->mem && dev->mem->nregions) {
2652                 VHOST_LOG_CONFIG(ERR,
2653                         "Regions already registered at postcopy-listen\n");
2654                 return RTE_VHOST_MSG_RESULT_ERR;
2655         }
2656         dev->postcopy_listening = 1;
2657
2658         return RTE_VHOST_MSG_RESULT_OK;
2659 }
2660
2661 static int
2662 vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
2663                         int main_fd __rte_unused)
2664 {
2665         struct virtio_net *dev = *pdev;
2666
2667         if (validate_msg_fds(msg, 0) != 0)
2668                 return RTE_VHOST_MSG_RESULT_ERR;
2669
2670         dev->postcopy_listening = 0;
2671         if (dev->postcopy_ufd >= 0) {
2672                 close(dev->postcopy_ufd);
2673                 dev->postcopy_ufd = -1;
2674         }
2675
2676         msg->payload.u64 = 0;
2677         msg->size = sizeof(msg->payload.u64);
2678         msg->fd_num = 0;
2679
2680         return RTE_VHOST_MSG_RESULT_REPLY;
2681 }
2682
2683 static int
2684 vhost_user_get_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2685                       int main_fd __rte_unused)
2686 {
2687         struct virtio_net *dev = *pdev;
2688
2689         if (validate_msg_fds(msg, 0) != 0)
2690                 return RTE_VHOST_MSG_RESULT_ERR;
2691
2692         msg->payload.u64 = dev->status;
2693         msg->size = sizeof(msg->payload.u64);
2694         msg->fd_num = 0;
2695
2696         return RTE_VHOST_MSG_RESULT_REPLY;
2697 }
2698
2699 static int
2700 vhost_user_set_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2701                         int main_fd __rte_unused)
2702 {
2703         struct virtio_net *dev = *pdev;
2704
2705         if (validate_msg_fds(msg, 0) != 0)
2706                 return RTE_VHOST_MSG_RESULT_ERR;
2707
2708         /* As per Virtio specification, the device status is 8bits long */
2709         if (msg->payload.u64 > UINT8_MAX) {
2710                 VHOST_LOG_CONFIG(ERR, "Invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
2711                                 msg->payload.u64);
2712                 return RTE_VHOST_MSG_RESULT_ERR;
2713         }
2714
2715         dev->status = msg->payload.u64;
2716
2717         if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
2718             (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
2719                 VHOST_LOG_CONFIG(ERR, "FEATURES_OK bit is set but feature negotiation failed\n");
2720                 /*
2721                  * Clear the bit to let the driver know about the feature
2722                  * negotiation failure
2723                  */
2724                 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
2725         }
2726
2727         VHOST_LOG_CONFIG(INFO, "New device status(0x%08x):\n"
2728                         "\t-RESET: %u\n"
2729                         "\t-ACKNOWLEDGE: %u\n"
2730                         "\t-DRIVER: %u\n"
2731                         "\t-FEATURES_OK: %u\n"
2732                         "\t-DRIVER_OK: %u\n"
2733                         "\t-DEVICE_NEED_RESET: %u\n"
2734                         "\t-FAILED: %u\n",
2735                         dev->status,
2736                         (dev->status == VIRTIO_DEVICE_STATUS_RESET),
2737                         !!(dev->status & VIRTIO_DEVICE_STATUS_ACK),
2738                         !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER),
2739                         !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK),
2740                         !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK),
2741                         !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET),
2742                         !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
2743
2744         return RTE_VHOST_MSG_RESULT_OK;
2745 }
2746
2747 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
2748                                         struct VhostUserMsg *msg,
2749                                         int main_fd);
2750 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
2751         [VHOST_USER_NONE] = NULL,
2752         [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
2753         [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
2754         [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
2755         [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
2756         [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
2757         [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
2758         [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
2759         [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
2760         [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
2761         [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
2762         [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
2763         [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
2764         [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
2765         [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
2766         [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
2767         [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
2768         [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
2769         [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
2770         [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
2771         [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
2772         [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
2773         [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
2774         [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
2775         [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
2776         [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
2777         [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
2778         [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
2779         [VHOST_USER_SET_STATUS] = vhost_user_set_status,
2780         [VHOST_USER_GET_STATUS] = vhost_user_get_status,
2781 };
2782
2783 /* return bytes# of read on success or negative val on failure. */
2784 static int
2785 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2786 {
2787         int ret;
2788
2789         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2790                 msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
2791         if (ret <= 0) {
2792                 return ret;
2793         } else if (ret != VHOST_USER_HDR_SIZE) {
2794                 VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
2795                 close_msg_fds(msg);
2796                 return -1;
2797         }
2798
2799         if (msg->size) {
2800                 if (msg->size > sizeof(msg->payload)) {
2801                         VHOST_LOG_CONFIG(ERR,
2802                                 "invalid msg size: %d\n", msg->size);
2803                         return -1;
2804                 }
2805                 ret = read(sockfd, &msg->payload, msg->size);
2806                 if (ret <= 0)
2807                         return ret;
2808                 if (ret != (int)msg->size) {
2809                         VHOST_LOG_CONFIG(ERR,
2810                                 "read control message failed\n");
2811                         return -1;
2812                 }
2813         }
2814
2815         return ret;
2816 }
2817
2818 static int
2819 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2820 {
2821         if (!msg)
2822                 return 0;
2823
2824         return send_fd_message(sockfd, (char *)msg,
2825                 VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
2826 }
2827
2828 static int
2829 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
2830 {
2831         if (!msg)
2832                 return 0;
2833
2834         msg->flags &= ~VHOST_USER_VERSION_MASK;
2835         msg->flags &= ~VHOST_USER_NEED_REPLY;
2836         msg->flags |= VHOST_USER_VERSION;
2837         msg->flags |= VHOST_USER_REPLY_MASK;
2838
2839         return send_vhost_message(sockfd, msg);
2840 }
2841
2842 static int
2843 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
2844 {
2845         int ret;
2846
2847         if (msg->flags & VHOST_USER_NEED_REPLY)
2848                 rte_spinlock_lock(&dev->slave_req_lock);
2849
2850         ret = send_vhost_message(dev->slave_req_fd, msg);
2851         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
2852                 rte_spinlock_unlock(&dev->slave_req_lock);
2853
2854         return ret;
2855 }
2856
2857 /*
2858  * Allocate a queue pair if it hasn't been allocated yet
2859  */
2860 static int
2861 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
2862                         struct VhostUserMsg *msg)
2863 {
2864         uint32_t vring_idx;
2865
2866         switch (msg->request.master) {
2867         case VHOST_USER_SET_VRING_KICK:
2868         case VHOST_USER_SET_VRING_CALL:
2869         case VHOST_USER_SET_VRING_ERR:
2870                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2871                 break;
2872         case VHOST_USER_SET_VRING_NUM:
2873         case VHOST_USER_SET_VRING_BASE:
2874         case VHOST_USER_GET_VRING_BASE:
2875         case VHOST_USER_SET_VRING_ENABLE:
2876                 vring_idx = msg->payload.state.index;
2877                 break;
2878         case VHOST_USER_SET_VRING_ADDR:
2879                 vring_idx = msg->payload.addr.index;
2880                 break;
2881         default:
2882                 return 0;
2883         }
2884
2885         if (vring_idx >= VHOST_MAX_VRING) {
2886                 VHOST_LOG_CONFIG(ERR,
2887                         "invalid vring index: %u\n", vring_idx);
2888                 return -1;
2889         }
2890
2891         if (dev->virtqueue[vring_idx])
2892                 return 0;
2893
2894         return alloc_vring_queue(dev, vring_idx);
2895 }
2896
2897 static void
2898 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
2899 {
2900         unsigned int i = 0;
2901         unsigned int vq_num = 0;
2902
2903         while (vq_num < dev->nr_vring) {
2904                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2905
2906                 if (vq) {
2907                         rte_spinlock_lock(&vq->access_lock);
2908                         vq_num++;
2909                 }
2910                 i++;
2911         }
2912 }
2913
2914 static void
2915 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
2916 {
2917         unsigned int i = 0;
2918         unsigned int vq_num = 0;
2919
2920         while (vq_num < dev->nr_vring) {
2921                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2922
2923                 if (vq) {
2924                         rte_spinlock_unlock(&vq->access_lock);
2925                         vq_num++;
2926                 }
2927                 i++;
2928         }
2929 }
2930
2931 int
2932 vhost_user_msg_handler(int vid, int fd)
2933 {
2934         struct virtio_net *dev;
2935         struct VhostUserMsg msg;
2936         struct rte_vdpa_device *vdpa_dev;
2937         int ret;
2938         int unlock_required = 0;
2939         bool handled;
2940         int request;
2941         uint32_t i;
2942
2943         dev = get_device(vid);
2944         if (dev == NULL)
2945                 return -1;
2946
2947         if (!dev->notify_ops) {
2948                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
2949                 if (!dev->notify_ops) {
2950                         VHOST_LOG_CONFIG(ERR,
2951                                 "failed to get callback ops for driver %s\n",
2952                                 dev->ifname);
2953                         return -1;
2954                 }
2955         }
2956
2957         ret = read_vhost_message(fd, &msg);
2958         if (ret <= 0) {
2959                 if (ret < 0)
2960                         VHOST_LOG_CONFIG(ERR,
2961                                 "vhost read message failed\n");
2962                 else
2963                         VHOST_LOG_CONFIG(INFO,
2964                                 "vhost peer closed\n");
2965
2966                 return -1;
2967         }
2968
2969         ret = 0;
2970         request = msg.request.master;
2971         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
2972                         vhost_message_str[request]) {
2973                 if (request != VHOST_USER_IOTLB_MSG)
2974                         VHOST_LOG_CONFIG(INFO, "read message %s\n",
2975                                 vhost_message_str[request]);
2976                 else
2977                         VHOST_LOG_CONFIG(DEBUG, "read message %s\n",
2978                                 vhost_message_str[request]);
2979         } else {
2980                 VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
2981         }
2982
2983         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
2984         if (ret < 0) {
2985                 VHOST_LOG_CONFIG(ERR,
2986                         "failed to alloc queue\n");
2987                 return -1;
2988         }
2989
2990         /*
2991          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
2992          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
2993          * and device is destroyed. destroy_device waits for queues to be
2994          * inactive, so it is safe. Otherwise taking the access_lock
2995          * would cause a dead lock.
2996          */
2997         switch (request) {
2998         case VHOST_USER_SET_FEATURES:
2999         case VHOST_USER_SET_PROTOCOL_FEATURES:
3000         case VHOST_USER_SET_OWNER:
3001         case VHOST_USER_SET_MEM_TABLE:
3002         case VHOST_USER_SET_LOG_BASE:
3003         case VHOST_USER_SET_LOG_FD:
3004         case VHOST_USER_SET_VRING_NUM:
3005         case VHOST_USER_SET_VRING_ADDR:
3006         case VHOST_USER_SET_VRING_BASE:
3007         case VHOST_USER_SET_VRING_KICK:
3008         case VHOST_USER_SET_VRING_CALL:
3009         case VHOST_USER_SET_VRING_ERR:
3010         case VHOST_USER_SET_VRING_ENABLE:
3011         case VHOST_USER_SEND_RARP:
3012         case VHOST_USER_NET_SET_MTU:
3013         case VHOST_USER_SET_SLAVE_REQ_FD:
3014                 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
3015                         vhost_user_lock_all_queue_pairs(dev);
3016                         unlock_required = 1;
3017                 }
3018                 break;
3019         default:
3020                 break;
3021
3022         }
3023
3024         handled = false;
3025         if (dev->extern_ops.pre_msg_handle) {
3026                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
3027                                 (void *)&msg);
3028                 switch (ret) {
3029                 case RTE_VHOST_MSG_RESULT_REPLY:
3030                         send_vhost_reply(fd, &msg);
3031                         /* Fall-through */
3032                 case RTE_VHOST_MSG_RESULT_ERR:
3033                 case RTE_VHOST_MSG_RESULT_OK:
3034                         handled = true;
3035                         goto skip_to_post_handle;
3036                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
3037                 default:
3038                         break;
3039                 }
3040         }
3041
3042         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
3043                 if (!vhost_message_handlers[request])
3044                         goto skip_to_post_handle;
3045                 ret = vhost_message_handlers[request](&dev, &msg, fd);
3046
3047                 switch (ret) {
3048                 case RTE_VHOST_MSG_RESULT_ERR:
3049                         VHOST_LOG_CONFIG(ERR,
3050                                 "Processing %s failed.\n",
3051                                 vhost_message_str[request]);
3052                         handled = true;
3053                         break;
3054                 case RTE_VHOST_MSG_RESULT_OK:
3055                         VHOST_LOG_CONFIG(DEBUG,
3056                                 "Processing %s succeeded.\n",
3057                                 vhost_message_str[request]);
3058                         handled = true;
3059                         break;
3060                 case RTE_VHOST_MSG_RESULT_REPLY:
3061                         VHOST_LOG_CONFIG(DEBUG,
3062                                 "Processing %s succeeded and needs reply.\n",
3063                                 vhost_message_str[request]);
3064                         send_vhost_reply(fd, &msg);
3065                         handled = true;
3066                         break;
3067                 default:
3068                         break;
3069                 }
3070         }
3071
3072 skip_to_post_handle:
3073         if (ret != RTE_VHOST_MSG_RESULT_ERR &&
3074                         dev->extern_ops.post_msg_handle) {
3075                 ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
3076                                 (void *)&msg);
3077                 switch (ret) {
3078                 case RTE_VHOST_MSG_RESULT_REPLY:
3079                         send_vhost_reply(fd, &msg);
3080                         /* Fall-through */
3081                 case RTE_VHOST_MSG_RESULT_ERR:
3082                 case RTE_VHOST_MSG_RESULT_OK:
3083                         handled = true;
3084                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
3085                 default:
3086                         break;
3087                 }
3088         }
3089
3090         /* If message was not handled at this stage, treat it as an error */
3091         if (!handled) {
3092                 VHOST_LOG_CONFIG(ERR,
3093                         "vhost message (req: %d) was not handled.\n", request);
3094                 close_msg_fds(&msg);
3095                 ret = RTE_VHOST_MSG_RESULT_ERR;
3096         }
3097
3098         /*
3099          * If the request required a reply that was already sent,
3100          * this optional reply-ack won't be sent as the
3101          * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
3102          */
3103         if (msg.flags & VHOST_USER_NEED_REPLY) {
3104                 msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
3105                 msg.size = sizeof(msg.payload.u64);
3106                 msg.fd_num = 0;
3107                 send_vhost_reply(fd, &msg);
3108         } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
3109                 VHOST_LOG_CONFIG(ERR,
3110                         "vhost message handling failed.\n");
3111                 return -1;
3112         }
3113
3114         for (i = 0; i < dev->nr_vring; i++) {
3115                 struct vhost_virtqueue *vq = dev->virtqueue[i];
3116                 bool cur_ready = vq_is_ready(dev, vq);
3117
3118                 if (cur_ready != (vq && vq->ready)) {
3119                         vq->ready = cur_ready;
3120                         vhost_user_notify_queue_state(dev, i, cur_ready);
3121                 }
3122         }
3123
3124         if (unlock_required)
3125                 vhost_user_unlock_all_queue_pairs(dev);
3126
3127         if (!virtio_is_ready(dev))
3128                 goto out;
3129
3130         /*
3131          * Virtio is now ready. If not done already, it is time
3132          * to notify the application it can process the rings and
3133          * configure the vDPA device if present.
3134          */
3135
3136         if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
3137                 if (dev->notify_ops->new_device(dev->vid) == 0)
3138                         dev->flags |= VIRTIO_DEV_RUNNING;
3139         }
3140
3141         vdpa_dev = dev->vdpa_dev;
3142         if (!vdpa_dev)
3143                 goto out;
3144
3145         if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
3146                 if (vdpa_dev->ops->dev_conf(dev->vid))
3147                         VHOST_LOG_CONFIG(ERR,
3148                                          "Failed to configure vDPA device\n");
3149                 else
3150                         dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
3151         }
3152
3153 out:
3154         return 0;
3155 }
3156
3157 static int process_slave_message_reply(struct virtio_net *dev,
3158                                        const struct VhostUserMsg *msg)
3159 {
3160         struct VhostUserMsg msg_reply;
3161         int ret;
3162
3163         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
3164                 return 0;
3165
3166         ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
3167         if (ret <= 0) {
3168                 if (ret < 0)
3169                         VHOST_LOG_CONFIG(ERR,
3170                                 "vhost read slave message reply failed\n");
3171                 else
3172                         VHOST_LOG_CONFIG(INFO,
3173                                 "vhost peer closed\n");
3174                 ret = -1;
3175                 goto out;
3176         }
3177
3178         ret = 0;
3179         if (msg_reply.request.slave != msg->request.slave) {
3180                 VHOST_LOG_CONFIG(ERR,
3181                         "Received unexpected msg type (%u), expected %u\n",
3182                         msg_reply.request.slave, msg->request.slave);
3183                 ret = -1;
3184                 goto out;
3185         }
3186
3187         ret = msg_reply.payload.u64 ? -1 : 0;
3188
3189 out:
3190         rte_spinlock_unlock(&dev->slave_req_lock);
3191         return ret;
3192 }
3193
3194 int
3195 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
3196 {
3197         int ret;
3198         struct VhostUserMsg msg = {
3199                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
3200                 .flags = VHOST_USER_VERSION,
3201                 .size = sizeof(msg.payload.iotlb),
3202                 .payload.iotlb = {
3203                         .iova = iova,
3204                         .perm = perm,
3205                         .type = VHOST_IOTLB_MISS,
3206                 },
3207         };
3208
3209         ret = send_vhost_message(dev->slave_req_fd, &msg);
3210         if (ret < 0) {
3211                 VHOST_LOG_CONFIG(ERR,
3212                                 "Failed to send IOTLB miss message (%d)\n",
3213                                 ret);
3214                 return ret;
3215         }
3216
3217         return 0;
3218 }
3219
3220 static int
3221 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply)
3222 {
3223         int ret;
3224         struct VhostUserMsg msg = {
3225                 .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG,
3226                 .flags = VHOST_USER_VERSION,
3227                 .size = 0,
3228         };
3229
3230         if (need_reply)
3231                 msg.flags |= VHOST_USER_NEED_REPLY;
3232
3233         ret = send_vhost_slave_message(dev, &msg);
3234         if (ret < 0) {
3235                 VHOST_LOG_CONFIG(ERR,
3236                                 "Failed to send config change (%d)\n",
3237                                 ret);
3238                 return ret;
3239         }
3240
3241         return process_slave_message_reply(dev, &msg);
3242 }
3243
3244 int
3245 rte_vhost_slave_config_change(int vid, bool need_reply)
3246 {
3247         struct virtio_net *dev;
3248
3249         dev = get_device(vid);
3250         if (!dev)
3251                 return -ENODEV;
3252
3253         return vhost_user_slave_config_change(dev, need_reply);
3254 }
3255
3256 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
3257                                                     int index, int fd,
3258                                                     uint64_t offset,
3259                                                     uint64_t size)
3260 {
3261         int ret;
3262         struct VhostUserMsg msg = {
3263                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
3264                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
3265                 .size = sizeof(msg.payload.area),
3266                 .payload.area = {
3267                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
3268                         .size = size,
3269                         .offset = offset,
3270                 },
3271         };
3272
3273         if (fd < 0)
3274                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
3275         else {
3276                 msg.fds[0] = fd;
3277                 msg.fd_num = 1;
3278         }
3279
3280         ret = send_vhost_slave_message(dev, &msg);
3281         if (ret < 0) {
3282                 VHOST_LOG_CONFIG(ERR,
3283                         "Failed to set host notifier (%d)\n", ret);
3284                 return ret;
3285         }
3286
3287         return process_slave_message_reply(dev, &msg);
3288 }
3289
3290 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
3291 {
3292         struct virtio_net *dev;
3293         struct rte_vdpa_device *vdpa_dev;
3294         int vfio_device_fd, ret = 0;
3295         uint64_t offset, size;
3296         unsigned int i, q_start, q_last;
3297
3298         dev = get_device(vid);
3299         if (!dev)
3300                 return -ENODEV;
3301
3302         vdpa_dev = dev->vdpa_dev;
3303         if (vdpa_dev == NULL)
3304                 return -ENODEV;
3305
3306         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3307             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3308             !(dev->protocol_features &
3309                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
3310             !(dev->protocol_features &
3311                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
3312             !(dev->protocol_features &
3313                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
3314                 return -ENOTSUP;
3315
3316         if (qid == RTE_VHOST_QUEUE_ALL) {
3317                 q_start = 0;
3318                 q_last = dev->nr_vring - 1;
3319         } else {
3320                 if (qid >= dev->nr_vring)
3321                         return -EINVAL;
3322                 q_start = qid;
3323                 q_last = qid;
3324         }
3325
3326         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
3327         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
3328
3329         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
3330         if (vfio_device_fd < 0)
3331                 return -ENOTSUP;
3332
3333         if (enable) {
3334                 for (i = q_start; i <= q_last; i++) {
3335                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
3336                                         &size) < 0) {
3337                                 ret = -ENOTSUP;
3338                                 goto disable;
3339                         }
3340
3341                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
3342                                         vfio_device_fd, offset, size) < 0) {
3343                                 ret = -EFAULT;
3344                                 goto disable;
3345                         }
3346                 }
3347         } else {
3348 disable:
3349                 for (i = q_start; i <= q_last; i++) {
3350                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
3351                                         0, 0);
3352                 }
3353         }
3354
3355         return ret;
3356 }