examples: skip build when missing dependencies
[dpdk.git] / lib / vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/syscall.h>
33 #include <assert.h>
34 #ifdef RTE_LIBRTE_VHOST_NUMA
35 #include <numaif.h>
36 #endif
37 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
38 #include <linux/userfaultfd.h>
39 #endif
40 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
41 #include <linux/memfd.h>
42 #define MEMFD_SUPPORTED
43 #endif
44
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48 #include <rte_vfio.h>
49 #include <rte_errno.h>
50
51 #include "iotlb.h"
52 #include "vhost.h"
53 #include "vhost_user.h"
54
55 #define VIRTIO_MIN_MTU 68
56 #define VIRTIO_MAX_MTU 65535
57
58 #define INFLIGHT_ALIGNMENT      64
59 #define INFLIGHT_VERSION        0x1
60
61 static const char *vhost_message_str[VHOST_USER_MAX] = {
62         [VHOST_USER_NONE] = "VHOST_USER_NONE",
63         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
64         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
65         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
66         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
67         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
68         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
69         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
70         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
71         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
72         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
73         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
74         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
75         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
76         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
77         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
78         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
79         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
80         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
81         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
82         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
83         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
84         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
85         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
86         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
87         [VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
88         [VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
89         [VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
90         [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
91         [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
92         [VHOST_USER_SET_STATUS] = "VHOST_USER_SET_STATUS",
93         [VHOST_USER_GET_STATUS] = "VHOST_USER_GET_STATUS",
94 };
95
96 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
97 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
98
99 static void
100 close_msg_fds(struct VhostUserMsg *msg)
101 {
102         int i;
103
104         for (i = 0; i < msg->fd_num; i++) {
105                 int fd = msg->fds[i];
106
107                 if (fd == -1)
108                         continue;
109
110                 msg->fds[i] = -1;
111                 close(fd);
112         }
113 }
114
115 /*
116  * Ensure the expected number of FDs is received,
117  * close all FDs and return an error if this is not the case.
118  */
119 static int
120 validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
121 {
122         if (msg->fd_num == expected_fds)
123                 return 0;
124
125         VHOST_LOG_CONFIG(ERR,
126                 " Expect %d FDs for request %s, received %d\n",
127                 expected_fds,
128                 vhost_message_str[msg->request.master],
129                 msg->fd_num);
130
131         close_msg_fds(msg);
132
133         return -1;
134 }
135
136 static uint64_t
137 get_blk_size(int fd)
138 {
139         struct stat stat;
140         int ret;
141
142         ret = fstat(fd, &stat);
143         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
144 }
145
146 static int
147 async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
148 {
149         uint64_t host_iova;
150         int ret = 0;
151
152         host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
153         if (do_map) {
154                 /* Add mapped region into the default container of DPDK. */
155                 ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
156                                                  region->host_user_addr,
157                                                  host_iova,
158                                                  region->size);
159                 if (ret) {
160                         /*
161                          * DMA device may bind with kernel driver, in this case,
162                          * we don't need to program IOMMU manually. However, if no
163                          * device is bound with vfio/uio in DPDK, and vfio kernel
164                          * module is loaded, the API will still be called and return
165                          * with ENODEV/ENOSUP.
166                          *
167                          * DPDK vfio only returns ENODEV/ENOSUP in very similar
168                          * situations(vfio either unsupported, or supported
169                          * but no devices found). Either way, no mappings could be
170                          * performed. We treat it as normal case in async path.
171                          */
172                         if (rte_errno == ENODEV || rte_errno == ENOTSUP)
173                                 return 0;
174
175                         VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
176                         /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
177                         return 0;
178                 }
179
180         } else {
181                 /* Remove mapped region from the default container of DPDK. */
182                 ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
183                                                    region->host_user_addr,
184                                                    host_iova,
185                                                    region->size);
186                 if (ret) {
187                         /* like DMA map, ignore the kernel driver case when unmap. */
188                         if (rte_errno == EINVAL)
189                                 return 0;
190
191                         VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
192                         return ret;
193                 }
194         }
195
196         return ret;
197 }
198
199 static void
200 free_mem_region(struct virtio_net *dev)
201 {
202         uint32_t i;
203         struct rte_vhost_mem_region *reg;
204
205         if (!dev || !dev->mem)
206                 return;
207
208         for (i = 0; i < dev->mem->nregions; i++) {
209                 reg = &dev->mem->regions[i];
210                 if (reg->host_user_addr) {
211                         if (dev->async_copy && rte_vfio_is_enabled("vfio"))
212                                 async_dma_map(reg, false);
213
214                         munmap(reg->mmap_addr, reg->mmap_size);
215                         close(reg->fd);
216                 }
217         }
218 }
219
220 void
221 vhost_backend_cleanup(struct virtio_net *dev)
222 {
223         if (dev->mem) {
224                 free_mem_region(dev);
225                 rte_free(dev->mem);
226                 dev->mem = NULL;
227         }
228
229         rte_free(dev->guest_pages);
230         dev->guest_pages = NULL;
231
232         if (dev->log_addr) {
233                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
234                 dev->log_addr = 0;
235         }
236
237         if (dev->inflight_info) {
238                 if (dev->inflight_info->addr) {
239                         munmap(dev->inflight_info->addr,
240                                dev->inflight_info->size);
241                         dev->inflight_info->addr = NULL;
242                 }
243
244                 if (dev->inflight_info->fd >= 0) {
245                         close(dev->inflight_info->fd);
246                         dev->inflight_info->fd = -1;
247                 }
248
249                 rte_free(dev->inflight_info);
250                 dev->inflight_info = NULL;
251         }
252
253         if (dev->slave_req_fd >= 0) {
254                 close(dev->slave_req_fd);
255                 dev->slave_req_fd = -1;
256         }
257
258         if (dev->postcopy_ufd >= 0) {
259                 close(dev->postcopy_ufd);
260                 dev->postcopy_ufd = -1;
261         }
262
263         dev->postcopy_listening = 0;
264 }
265
266 static void
267 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
268                               int enable)
269 {
270         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
271         struct vhost_virtqueue *vq = dev->virtqueue[index];
272
273         /* Configure guest notifications on enable */
274         if (enable && vq->notif_enable != VIRTIO_UNINITIALIZED_NOTIF)
275                 vhost_enable_guest_notification(dev, vq, vq->notif_enable);
276
277         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
278                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
279
280         if (dev->notify_ops->vring_state_changed)
281                 dev->notify_ops->vring_state_changed(dev->vid,
282                                 index, enable);
283 }
284
285 /*
286  * This function just returns success at the moment unless
287  * the device hasn't been initialised.
288  */
289 static int
290 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
291                         struct VhostUserMsg *msg,
292                         int main_fd __rte_unused)
293 {
294         if (validate_msg_fds(msg, 0) != 0)
295                 return RTE_VHOST_MSG_RESULT_ERR;
296
297         return RTE_VHOST_MSG_RESULT_OK;
298 }
299
300 static int
301 vhost_user_reset_owner(struct virtio_net **pdev,
302                         struct VhostUserMsg *msg,
303                         int main_fd __rte_unused)
304 {
305         struct virtio_net *dev = *pdev;
306
307         if (validate_msg_fds(msg, 0) != 0)
308                 return RTE_VHOST_MSG_RESULT_ERR;
309
310         vhost_destroy_device_notify(dev);
311
312         cleanup_device(dev, 0);
313         reset_device(dev);
314         return RTE_VHOST_MSG_RESULT_OK;
315 }
316
317 /*
318  * The features that we support are requested.
319  */
320 static int
321 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
322                         int main_fd __rte_unused)
323 {
324         struct virtio_net *dev = *pdev;
325         uint64_t features = 0;
326
327         if (validate_msg_fds(msg, 0) != 0)
328                 return RTE_VHOST_MSG_RESULT_ERR;
329
330         rte_vhost_driver_get_features(dev->ifname, &features);
331
332         msg->payload.u64 = features;
333         msg->size = sizeof(msg->payload.u64);
334         msg->fd_num = 0;
335
336         return RTE_VHOST_MSG_RESULT_REPLY;
337 }
338
339 /*
340  * The queue number that we support are requested.
341  */
342 static int
343 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
344                         int main_fd __rte_unused)
345 {
346         struct virtio_net *dev = *pdev;
347         uint32_t queue_num = 0;
348
349         if (validate_msg_fds(msg, 0) != 0)
350                 return RTE_VHOST_MSG_RESULT_ERR;
351
352         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
353
354         msg->payload.u64 = (uint64_t)queue_num;
355         msg->size = sizeof(msg->payload.u64);
356         msg->fd_num = 0;
357
358         return RTE_VHOST_MSG_RESULT_REPLY;
359 }
360
361 /*
362  * We receive the negotiated features supported by us and the virtio device.
363  */
364 static int
365 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
366                         int main_fd __rte_unused)
367 {
368         struct virtio_net *dev = *pdev;
369         uint64_t features = msg->payload.u64;
370         uint64_t vhost_features = 0;
371         struct rte_vdpa_device *vdpa_dev;
372
373         if (validate_msg_fds(msg, 0) != 0)
374                 return RTE_VHOST_MSG_RESULT_ERR;
375
376         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
377         if (features & ~vhost_features) {
378                 VHOST_LOG_CONFIG(ERR,
379                         "(%d) received invalid negotiated features.\n",
380                         dev->vid);
381                 dev->flags |= VIRTIO_DEV_FEATURES_FAILED;
382                 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
383
384                 return RTE_VHOST_MSG_RESULT_ERR;
385         }
386
387         if (dev->flags & VIRTIO_DEV_RUNNING) {
388                 if (dev->features == features)
389                         return RTE_VHOST_MSG_RESULT_OK;
390
391                 /*
392                  * Error out if master tries to change features while device is
393                  * in running state. The exception being VHOST_F_LOG_ALL, which
394                  * is enabled when the live-migration starts.
395                  */
396                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
397                         VHOST_LOG_CONFIG(ERR,
398                                 "(%d) features changed while device is running.\n",
399                                 dev->vid);
400                         return RTE_VHOST_MSG_RESULT_ERR;
401                 }
402
403                 if (dev->notify_ops->features_changed)
404                         dev->notify_ops->features_changed(dev->vid, features);
405         }
406
407         dev->features = features;
408         if (dev->features &
409                 ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
410                  (1ULL << VIRTIO_F_VERSION_1) |
411                  (1ULL << VIRTIO_F_RING_PACKED))) {
412                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
413         } else {
414                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
415         }
416         VHOST_LOG_CONFIG(INFO,
417                 "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
418         VHOST_LOG_CONFIG(DEBUG,
419                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
420                 dev->vid,
421                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
422                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
423
424         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
425             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
426                 /*
427                  * Remove all but first queue pair if MQ hasn't been
428                  * negotiated. This is safe because the device is not
429                  * running at this stage.
430                  */
431                 while (dev->nr_vring > 2) {
432                         struct vhost_virtqueue *vq;
433
434                         vq = dev->virtqueue[--dev->nr_vring];
435                         if (!vq)
436                                 continue;
437
438                         dev->virtqueue[dev->nr_vring] = NULL;
439                         cleanup_vq(vq, 1);
440                         cleanup_vq_inflight(dev, vq);
441                         free_vq(dev, vq);
442                 }
443         }
444
445         vdpa_dev = dev->vdpa_dev;
446         if (vdpa_dev)
447                 vdpa_dev->ops->set_features(dev->vid);
448
449         dev->flags &= ~VIRTIO_DEV_FEATURES_FAILED;
450         return RTE_VHOST_MSG_RESULT_OK;
451 }
452
453 /*
454  * The virtio device sends us the size of the descriptor ring.
455  */
456 static int
457 vhost_user_set_vring_num(struct virtio_net **pdev,
458                         struct VhostUserMsg *msg,
459                         int main_fd __rte_unused)
460 {
461         struct virtio_net *dev = *pdev;
462         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
463
464         if (validate_msg_fds(msg, 0) != 0)
465                 return RTE_VHOST_MSG_RESULT_ERR;
466
467         if (msg->payload.state.num > 32768) {
468                 VHOST_LOG_CONFIG(ERR, "invalid virtqueue size %u\n", msg->payload.state.num);
469                 return RTE_VHOST_MSG_RESULT_ERR;
470         }
471
472         vq->size = msg->payload.state.num;
473
474         /* VIRTIO 1.0, 2.4 Virtqueues says:
475          *
476          *   Queue Size value is always a power of 2. The maximum Queue Size
477          *   value is 32768.
478          *
479          * VIRTIO 1.1 2.7 Virtqueues says:
480          *
481          *   Packed virtqueues support up to 2^15 entries each.
482          */
483         if (!vq_is_packed(dev)) {
484                 if (vq->size & (vq->size - 1)) {
485                         VHOST_LOG_CONFIG(ERR,
486                                 "invalid virtqueue size %u\n", vq->size);
487                         return RTE_VHOST_MSG_RESULT_ERR;
488                 }
489         }
490
491         if (vq_is_packed(dev)) {
492                 if (vq->shadow_used_packed)
493                         rte_free(vq->shadow_used_packed);
494                 vq->shadow_used_packed = rte_malloc_socket(NULL,
495                                 vq->size *
496                                 sizeof(struct vring_used_elem_packed),
497                                 RTE_CACHE_LINE_SIZE, vq->numa_node);
498                 if (!vq->shadow_used_packed) {
499                         VHOST_LOG_CONFIG(ERR,
500                                         "failed to allocate memory for shadow used ring.\n");
501                         return RTE_VHOST_MSG_RESULT_ERR;
502                 }
503
504         } else {
505                 if (vq->shadow_used_split)
506                         rte_free(vq->shadow_used_split);
507
508                 vq->shadow_used_split = rte_malloc_socket(NULL,
509                                 vq->size * sizeof(struct vring_used_elem),
510                                 RTE_CACHE_LINE_SIZE, vq->numa_node);
511
512                 if (!vq->shadow_used_split) {
513                         VHOST_LOG_CONFIG(ERR,
514                                         "failed to allocate memory for vq internal data.\n");
515                         return RTE_VHOST_MSG_RESULT_ERR;
516                 }
517         }
518
519         if (vq->batch_copy_elems)
520                 rte_free(vq->batch_copy_elems);
521         vq->batch_copy_elems = rte_malloc_socket(NULL,
522                                 vq->size * sizeof(struct batch_copy_elem),
523                                 RTE_CACHE_LINE_SIZE, vq->numa_node);
524         if (!vq->batch_copy_elems) {
525                 VHOST_LOG_CONFIG(ERR,
526                         "failed to allocate memory for batching copy.\n");
527                 return RTE_VHOST_MSG_RESULT_ERR;
528         }
529
530         return RTE_VHOST_MSG_RESULT_OK;
531 }
532
533 /*
534  * Reallocate virtio_dev, vhost_virtqueue and related data structures to
535  * make them on the same numa node as the memory of vring descriptor.
536  */
537 #ifdef RTE_LIBRTE_VHOST_NUMA
538 static struct virtio_net*
539 numa_realloc(struct virtio_net *dev, int index)
540 {
541         int node, dev_node;
542         struct virtio_net *old_dev;
543         struct vhost_virtqueue *vq;
544         struct batch_copy_elem *bce;
545         struct guest_page *gp;
546         struct rte_vhost_memory *mem;
547         size_t mem_size;
548         int ret;
549
550         old_dev = dev;
551         vq = dev->virtqueue[index];
552
553         /*
554          * If VQ is ready, it is too late to reallocate, it certainly already
555          * happened anyway on VHOST_USER_SET_VRING_ADRR.
556          */
557         if (vq->ready)
558                 return dev;
559
560         ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR);
561         if (ret) {
562                 VHOST_LOG_CONFIG(ERR, "Unable to get virtqueue %d numa information.\n", index);
563                 return dev;
564         }
565
566         if (node == vq->numa_node)
567                 goto out_dev_realloc;
568
569         vq = rte_realloc_socket(vq, sizeof(*vq), 0, node);
570         if (!vq) {
571                 VHOST_LOG_CONFIG(ERR, "Failed to realloc virtqueue %d on node %d\n",
572                                 index, node);
573                 return dev;
574         }
575
576         if (vq != dev->virtqueue[index]) {
577                 VHOST_LOG_CONFIG(INFO, "reallocated virtqueue on node %d\n", node);
578                 dev->virtqueue[index] = vq;
579                 vhost_user_iotlb_init(dev, index);
580         }
581
582         if (vq_is_packed(dev)) {
583                 struct vring_used_elem_packed *sup;
584
585                 sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup),
586                                 RTE_CACHE_LINE_SIZE, node);
587                 if (!sup) {
588                         VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow packed on node %d\n", node);
589                         return dev;
590                 }
591                 vq->shadow_used_packed = sup;
592         } else {
593                 struct vring_used_elem *sus;
594
595                 sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus),
596                                 RTE_CACHE_LINE_SIZE, node);
597                 if (!sus) {
598                         VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow split on node %d\n", node);
599                         return dev;
600                 }
601                 vq->shadow_used_split = sus;
602         }
603
604         bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce),
605                         RTE_CACHE_LINE_SIZE, node);
606         if (!bce) {
607                 VHOST_LOG_CONFIG(ERR, "Failed to realloc batch copy elem on node %d\n", node);
608                 return dev;
609         }
610         vq->batch_copy_elems = bce;
611
612         if (vq->log_cache) {
613                 struct log_cache_entry *lc;
614
615                 lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node);
616                 if (!lc) {
617                         VHOST_LOG_CONFIG(ERR, "Failed to realloc log cache on node %d\n", node);
618                         return dev;
619                 }
620                 vq->log_cache = lc;
621         }
622
623         if (vq->resubmit_inflight) {
624                 struct rte_vhost_resubmit_info *ri;
625
626                 ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node);
627                 if (!ri) {
628                         VHOST_LOG_CONFIG(ERR, "Failed to realloc resubmit inflight on node %d\n",
629                                         node);
630                         return dev;
631                 }
632                 vq->resubmit_inflight = ri;
633
634                 if (ri->resubmit_list) {
635                         struct rte_vhost_resubmit_desc *rd;
636
637                         rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num,
638                                         0, node);
639                         if (!rd) {
640                                 VHOST_LOG_CONFIG(ERR, "Failed to realloc resubmit list on node %d\n",
641                                                 node);
642                                 return dev;
643                         }
644                         ri->resubmit_list = rd;
645                 }
646         }
647
648         vq->numa_node = node;
649
650 out_dev_realloc:
651
652         if (dev->flags & VIRTIO_DEV_RUNNING)
653                 return dev;
654
655         ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR);
656         if (ret) {
657                 VHOST_LOG_CONFIG(ERR, "Unable to get Virtio dev %d numa information.\n", dev->vid);
658                 return dev;
659         }
660
661         if (dev_node == node)
662                 return dev;
663
664         dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node);
665         if (!dev) {
666                 VHOST_LOG_CONFIG(ERR, "Failed to realloc dev on node %d\n", node);
667                 return old_dev;
668         }
669
670         VHOST_LOG_CONFIG(INFO, "reallocated device on node %d\n", node);
671         vhost_devices[dev->vid] = dev;
672
673         mem_size = sizeof(struct rte_vhost_memory) +
674                 sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
675         mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
676         if (!mem) {
677                 VHOST_LOG_CONFIG(ERR, "Failed to realloc mem table on node %d\n", node);
678                 return dev;
679         }
680         dev->mem = mem;
681
682         gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
683                         RTE_CACHE_LINE_SIZE, node);
684         if (!gp) {
685                 VHOST_LOG_CONFIG(ERR, "Failed to realloc guest pages on node %d\n", node);
686                 return dev;
687         }
688         dev->guest_pages = gp;
689
690         return dev;
691 }
692 #else
693 static struct virtio_net*
694 numa_realloc(struct virtio_net *dev, int index __rte_unused)
695 {
696         return dev;
697 }
698 #endif
699
700 /* Converts QEMU virtual address to Vhost virtual address. */
701 static uint64_t
702 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
703 {
704         struct rte_vhost_mem_region *r;
705         uint32_t i;
706
707         if (unlikely(!dev || !dev->mem))
708                 goto out_error;
709
710         /* Find the region where the address lives. */
711         for (i = 0; i < dev->mem->nregions; i++) {
712                 r = &dev->mem->regions[i];
713
714                 if (qva >= r->guest_user_addr &&
715                     qva <  r->guest_user_addr + r->size) {
716
717                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
718                                 *len = r->guest_user_addr + r->size - qva;
719
720                         return qva - r->guest_user_addr +
721                                r->host_user_addr;
722                 }
723         }
724 out_error:
725         *len = 0;
726
727         return 0;
728 }
729
730
731 /*
732  * Converts ring address to Vhost virtual address.
733  * If IOMMU is enabled, the ring address is a guest IO virtual address,
734  * else it is a QEMU virtual address.
735  */
736 static uint64_t
737 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
738                 uint64_t ra, uint64_t *size)
739 {
740         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
741                 uint64_t vva;
742
743                 vhost_user_iotlb_rd_lock(vq);
744                 vva = vhost_iova_to_vva(dev, vq, ra,
745                                         size, VHOST_ACCESS_RW);
746                 vhost_user_iotlb_rd_unlock(vq);
747
748                 return vva;
749         }
750
751         return qva_to_vva(dev, ra, size);
752 }
753
754 static uint64_t
755 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
756 {
757         uint64_t log_gpa;
758
759         vhost_user_iotlb_rd_lock(vq);
760         log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
761         vhost_user_iotlb_rd_unlock(vq);
762
763         return log_gpa;
764 }
765
766 static struct virtio_net *
767 translate_ring_addresses(struct virtio_net *dev, int vq_index)
768 {
769         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
770         struct vhost_vring_addr *addr = &vq->ring_addrs;
771         uint64_t len, expected_len;
772
773         if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
774                 vq->log_guest_addr =
775                         log_addr_to_gpa(dev, vq);
776                 if (vq->log_guest_addr == 0) {
777                         VHOST_LOG_CONFIG(DEBUG,
778                                 "(%d) failed to map log_guest_addr.\n",
779                                 dev->vid);
780                         return dev;
781                 }
782         }
783
784         if (vq_is_packed(dev)) {
785                 len = sizeof(struct vring_packed_desc) * vq->size;
786                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
787                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
788                 if (vq->desc_packed == NULL ||
789                                 len != sizeof(struct vring_packed_desc) *
790                                 vq->size) {
791                         VHOST_LOG_CONFIG(DEBUG,
792                                 "(%d) failed to map desc_packed ring.\n",
793                                 dev->vid);
794                         return dev;
795                 }
796
797                 dev = numa_realloc(dev, vq_index);
798                 vq = dev->virtqueue[vq_index];
799                 addr = &vq->ring_addrs;
800
801                 len = sizeof(struct vring_packed_desc_event);
802                 vq->driver_event = (struct vring_packed_desc_event *)
803                                         (uintptr_t)ring_addr_to_vva(dev,
804                                         vq, addr->avail_user_addr, &len);
805                 if (vq->driver_event == NULL ||
806                                 len != sizeof(struct vring_packed_desc_event)) {
807                         VHOST_LOG_CONFIG(DEBUG,
808                                 "(%d) failed to find driver area address.\n",
809                                 dev->vid);
810                         return dev;
811                 }
812
813                 len = sizeof(struct vring_packed_desc_event);
814                 vq->device_event = (struct vring_packed_desc_event *)
815                                         (uintptr_t)ring_addr_to_vva(dev,
816                                         vq, addr->used_user_addr, &len);
817                 if (vq->device_event == NULL ||
818                                 len != sizeof(struct vring_packed_desc_event)) {
819                         VHOST_LOG_CONFIG(DEBUG,
820                                 "(%d) failed to find device area address.\n",
821                                 dev->vid);
822                         return dev;
823                 }
824
825                 vq->access_ok = true;
826                 return dev;
827         }
828
829         /* The addresses are converted from QEMU virtual to Vhost virtual. */
830         if (vq->desc && vq->avail && vq->used)
831                 return dev;
832
833         len = sizeof(struct vring_desc) * vq->size;
834         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
835                         vq, addr->desc_user_addr, &len);
836         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
837                 VHOST_LOG_CONFIG(DEBUG,
838                         "(%d) failed to map desc ring.\n",
839                         dev->vid);
840                 return dev;
841         }
842
843         dev = numa_realloc(dev, vq_index);
844         vq = dev->virtqueue[vq_index];
845         addr = &vq->ring_addrs;
846
847         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
848         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
849                 len += sizeof(uint16_t);
850         expected_len = len;
851         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
852                         vq, addr->avail_user_addr, &len);
853         if (vq->avail == 0 || len != expected_len) {
854                 VHOST_LOG_CONFIG(DEBUG,
855                         "(%d) failed to map avail ring.\n",
856                         dev->vid);
857                 return dev;
858         }
859
860         len = sizeof(struct vring_used) +
861                 sizeof(struct vring_used_elem) * vq->size;
862         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
863                 len += sizeof(uint16_t);
864         expected_len = len;
865         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
866                         vq, addr->used_user_addr, &len);
867         if (vq->used == 0 || len != expected_len) {
868                 VHOST_LOG_CONFIG(DEBUG,
869                         "(%d) failed to map used ring.\n",
870                         dev->vid);
871                 return dev;
872         }
873
874         if (vq->last_used_idx != vq->used->idx) {
875                 VHOST_LOG_CONFIG(WARNING,
876                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
877                         "some packets maybe resent for Tx and dropped for Rx\n",
878                         vq->last_used_idx, vq->used->idx);
879                 vq->last_used_idx  = vq->used->idx;
880                 vq->last_avail_idx = vq->used->idx;
881         }
882
883         vq->access_ok = true;
884
885         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
886                         dev->vid, vq->desc);
887         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address avail: %p\n",
888                         dev->vid, vq->avail);
889         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address used: %p\n",
890                         dev->vid, vq->used);
891         VHOST_LOG_CONFIG(DEBUG, "(%d) log_guest_addr: %" PRIx64 "\n",
892                         dev->vid, vq->log_guest_addr);
893
894         return dev;
895 }
896
897 /*
898  * The virtio device sends us the desc, used and avail ring addresses.
899  * This function then converts these to our address space.
900  */
901 static int
902 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
903                         int main_fd __rte_unused)
904 {
905         struct virtio_net *dev = *pdev;
906         struct vhost_virtqueue *vq;
907         struct vhost_vring_addr *addr = &msg->payload.addr;
908         bool access_ok;
909
910         if (validate_msg_fds(msg, 0) != 0)
911                 return RTE_VHOST_MSG_RESULT_ERR;
912
913         if (dev->mem == NULL)
914                 return RTE_VHOST_MSG_RESULT_ERR;
915
916         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
917         vq = dev->virtqueue[msg->payload.addr.index];
918
919         access_ok = vq->access_ok;
920
921         /*
922          * Rings addresses should not be interpreted as long as the ring is not
923          * started and enabled
924          */
925         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
926
927         vring_invalidate(dev, vq);
928
929         if ((vq->enabled && (dev->features &
930                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
931                         access_ok) {
932                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
933                 if (!dev)
934                         return RTE_VHOST_MSG_RESULT_ERR;
935
936                 *pdev = dev;
937         }
938
939         return RTE_VHOST_MSG_RESULT_OK;
940 }
941
942 /*
943  * The virtio device sends us the available ring last used index.
944  */
945 static int
946 vhost_user_set_vring_base(struct virtio_net **pdev,
947                         struct VhostUserMsg *msg,
948                         int main_fd __rte_unused)
949 {
950         struct virtio_net *dev = *pdev;
951         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
952         uint64_t val = msg->payload.state.num;
953
954         if (validate_msg_fds(msg, 0) != 0)
955                 return RTE_VHOST_MSG_RESULT_ERR;
956
957         if (vq_is_packed(dev)) {
958                 /*
959                  * Bit[0:14]: avail index
960                  * Bit[15]: avail wrap counter
961                  */
962                 vq->last_avail_idx = val & 0x7fff;
963                 vq->avail_wrap_counter = !!(val & (0x1 << 15));
964                 /*
965                  * Set used index to same value as available one, as
966                  * their values should be the same since ring processing
967                  * was stopped at get time.
968                  */
969                 vq->last_used_idx = vq->last_avail_idx;
970                 vq->used_wrap_counter = vq->avail_wrap_counter;
971         } else {
972                 vq->last_used_idx = msg->payload.state.num;
973                 vq->last_avail_idx = msg->payload.state.num;
974         }
975
976         return RTE_VHOST_MSG_RESULT_OK;
977 }
978
979 static int
980 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
981                    uint64_t host_phys_addr, uint64_t size)
982 {
983         struct guest_page *page, *last_page;
984         struct guest_page *old_pages;
985
986         if (dev->nr_guest_pages == dev->max_guest_pages) {
987                 dev->max_guest_pages *= 2;
988                 old_pages = dev->guest_pages;
989                 dev->guest_pages = rte_realloc(dev->guest_pages,
990                                         dev->max_guest_pages * sizeof(*page),
991                                         RTE_CACHE_LINE_SIZE);
992                 if (dev->guest_pages == NULL) {
993                         VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
994                         rte_free(old_pages);
995                         return -1;
996                 }
997         }
998
999         if (dev->nr_guest_pages > 0) {
1000                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
1001                 /* merge if the two pages are continuous */
1002                 if (host_phys_addr == last_page->host_phys_addr +
1003                                       last_page->size) {
1004                         last_page->size += size;
1005                         return 0;
1006                 }
1007         }
1008
1009         page = &dev->guest_pages[dev->nr_guest_pages++];
1010         page->guest_phys_addr = guest_phys_addr;
1011         page->host_phys_addr  = host_phys_addr;
1012         page->size = size;
1013
1014         return 0;
1015 }
1016
1017 static int
1018 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
1019                 uint64_t page_size)
1020 {
1021         uint64_t reg_size = reg->size;
1022         uint64_t host_user_addr  = reg->host_user_addr;
1023         uint64_t guest_phys_addr = reg->guest_phys_addr;
1024         uint64_t host_phys_addr;
1025         uint64_t size;
1026
1027         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
1028         size = page_size - (guest_phys_addr & (page_size - 1));
1029         size = RTE_MIN(size, reg_size);
1030
1031         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
1032                 return -1;
1033
1034         host_user_addr  += size;
1035         guest_phys_addr += size;
1036         reg_size -= size;
1037
1038         while (reg_size > 0) {
1039                 size = RTE_MIN(reg_size, page_size);
1040                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
1041                                                   host_user_addr);
1042                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
1043                                 size) < 0)
1044                         return -1;
1045
1046                 host_user_addr  += size;
1047                 guest_phys_addr += size;
1048                 reg_size -= size;
1049         }
1050
1051         /* sort guest page array if over binary search threshold */
1052         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
1053                 qsort((void *)dev->guest_pages, dev->nr_guest_pages,
1054                         sizeof(struct guest_page), guest_page_addrcmp);
1055         }
1056
1057         return 0;
1058 }
1059
1060 #ifdef RTE_LIBRTE_VHOST_DEBUG
1061 /* TODO: enable it only in debug mode? */
1062 static void
1063 dump_guest_pages(struct virtio_net *dev)
1064 {
1065         uint32_t i;
1066         struct guest_page *page;
1067
1068         for (i = 0; i < dev->nr_guest_pages; i++) {
1069                 page = &dev->guest_pages[i];
1070
1071                 VHOST_LOG_CONFIG(INFO,
1072                         "guest physical page region %u\n"
1073                         "\t guest_phys_addr: %" PRIx64 "\n"
1074                         "\t host_phys_addr : %" PRIx64 "\n"
1075                         "\t size           : %" PRIx64 "\n",
1076                         i,
1077                         page->guest_phys_addr,
1078                         page->host_phys_addr,
1079                         page->size);
1080         }
1081 }
1082 #else
1083 #define dump_guest_pages(dev)
1084 #endif
1085
1086 static bool
1087 vhost_memory_changed(struct VhostUserMemory *new,
1088                      struct rte_vhost_memory *old)
1089 {
1090         uint32_t i;
1091
1092         if (new->nregions != old->nregions)
1093                 return true;
1094
1095         for (i = 0; i < new->nregions; ++i) {
1096                 VhostUserMemoryRegion *new_r = &new->regions[i];
1097                 struct rte_vhost_mem_region *old_r = &old->regions[i];
1098
1099                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
1100                         return true;
1101                 if (new_r->memory_size != old_r->size)
1102                         return true;
1103                 if (new_r->userspace_addr != old_r->guest_user_addr)
1104                         return true;
1105         }
1106
1107         return false;
1108 }
1109
1110 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
1111 static int
1112 vhost_user_postcopy_region_register(struct virtio_net *dev,
1113                 struct rte_vhost_mem_region *reg)
1114 {
1115         struct uffdio_register reg_struct;
1116
1117         /*
1118          * Let's register all the mmap'ed area to ensure
1119          * alignment on page boundary.
1120          */
1121         reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;
1122         reg_struct.range.len = reg->mmap_size;
1123         reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
1124
1125         if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1126                                 &reg_struct)) {
1127                 VHOST_LOG_CONFIG(ERR, "Failed to register ufd for region "
1128                                 "%" PRIx64 " - %" PRIx64 " (ufd = %d) %s\n",
1129                                 (uint64_t)reg_struct.range.start,
1130                                 (uint64_t)reg_struct.range.start +
1131                                 (uint64_t)reg_struct.range.len - 1,
1132                                 dev->postcopy_ufd,
1133                                 strerror(errno));
1134                 return -1;
1135         }
1136
1137         VHOST_LOG_CONFIG(INFO, "\t userfaultfd registered for range : %" PRIx64 " - %" PRIx64 "\n",
1138                         (uint64_t)reg_struct.range.start,
1139                         (uint64_t)reg_struct.range.start +
1140                         (uint64_t)reg_struct.range.len - 1);
1141
1142         return 0;
1143 }
1144 #else
1145 static int
1146 vhost_user_postcopy_region_register(struct virtio_net *dev __rte_unused,
1147                 struct rte_vhost_mem_region *reg __rte_unused)
1148 {
1149         return -1;
1150 }
1151 #endif
1152
1153 static int
1154 vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
1155                 struct VhostUserMsg *msg)
1156 {
1157         struct VhostUserMemory *memory;
1158         struct rte_vhost_mem_region *reg;
1159         VhostUserMsg ack_msg;
1160         uint32_t i;
1161
1162         if (!dev->postcopy_listening)
1163                 return 0;
1164
1165         /*
1166          * We haven't a better way right now than sharing
1167          * DPDK's virtual address with Qemu, so that Qemu can
1168          * retrieve the region offset when handling userfaults.
1169          */
1170         memory = &msg->payload.memory;
1171         for (i = 0; i < memory->nregions; i++) {
1172                 reg = &dev->mem->regions[i];
1173                 memory->regions[i].userspace_addr = reg->host_user_addr;
1174         }
1175
1176         /* Send the addresses back to qemu */
1177         msg->fd_num = 0;
1178         send_vhost_reply(main_fd, msg);
1179
1180         /* Wait for qemu to acknolwedge it's got the addresses
1181          * we've got to wait before we're allowed to generate faults.
1182          */
1183         if (read_vhost_message(main_fd, &ack_msg) <= 0) {
1184                 VHOST_LOG_CONFIG(ERR,
1185                                 "Failed to read qemu ack on postcopy set-mem-table\n");
1186                 return -1;
1187         }
1188
1189         if (validate_msg_fds(&ack_msg, 0) != 0)
1190                 return -1;
1191
1192         if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
1193                 VHOST_LOG_CONFIG(ERR,
1194                                 "Bad qemu ack on postcopy set-mem-table (%d)\n",
1195                                 ack_msg.request.master);
1196                 return -1;
1197         }
1198
1199         /* Now userfault register and we can use the memory */
1200         for (i = 0; i < memory->nregions; i++) {
1201                 reg = &dev->mem->regions[i];
1202                 if (vhost_user_postcopy_region_register(dev, reg) < 0)
1203                         return -1;
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int
1210 vhost_user_mmap_region(struct virtio_net *dev,
1211                 struct rte_vhost_mem_region *region,
1212                 uint64_t mmap_offset)
1213 {
1214         void *mmap_addr;
1215         uint64_t mmap_size;
1216         uint64_t alignment;
1217         int populate;
1218         int ret;
1219
1220         /* Check for memory_size + mmap_offset overflow */
1221         if (mmap_offset >= -region->size) {
1222                 VHOST_LOG_CONFIG(ERR,
1223                                 "mmap_offset (%#"PRIx64") and memory_size "
1224                                 "(%#"PRIx64") overflow\n",
1225                                 mmap_offset, region->size);
1226                 return -1;
1227         }
1228
1229         mmap_size = region->size + mmap_offset;
1230
1231         /* mmap() without flag of MAP_ANONYMOUS, should be called with length
1232          * argument aligned with hugepagesz at older longterm version Linux,
1233          * like 2.6.32 and 3.2.72, or mmap() will fail with EINVAL.
1234          *
1235          * To avoid failure, make sure in caller to keep length aligned.
1236          */
1237         alignment = get_blk_size(region->fd);
1238         if (alignment == (uint64_t)-1) {
1239                 VHOST_LOG_CONFIG(ERR,
1240                                 "couldn't get hugepage size through fstat\n");
1241                 return -1;
1242         }
1243         mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
1244         if (mmap_size == 0) {
1245                 /*
1246                  * It could happen if initial mmap_size + alignment overflows
1247                  * the sizeof uint64, which could happen if either mmap_size or
1248                  * alignment value is wrong.
1249                  *
1250                  * mmap() kernel implementation would return an error, but
1251                  * better catch it before and provide useful info in the logs.
1252                  */
1253                 VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
1254                                 "or alignment (0x%" PRIx64 ") is invalid\n",
1255                                 region->size + mmap_offset, alignment);
1256                 return -1;
1257         }
1258
1259         populate = dev->async_copy ? MAP_POPULATE : 0;
1260         mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
1261                         MAP_SHARED | populate, region->fd, 0);
1262
1263         if (mmap_addr == MAP_FAILED) {
1264                 VHOST_LOG_CONFIG(ERR, "mmap failed (%s).\n", strerror(errno));
1265                 return -1;
1266         }
1267
1268         region->mmap_addr = mmap_addr;
1269         region->mmap_size = mmap_size;
1270         region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
1271
1272         if (dev->async_copy) {
1273                 if (add_guest_pages(dev, region, alignment) < 0) {
1274                         VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
1275                         return -1;
1276                 }
1277
1278                 if (rte_vfio_is_enabled("vfio")) {
1279                         ret = async_dma_map(region, true);
1280                         if (ret) {
1281                                 VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
1282                                 return -1;
1283                         }
1284                 }
1285         }
1286
1287         VHOST_LOG_CONFIG(INFO,
1288                         "guest memory region size: 0x%" PRIx64 "\n"
1289                         "\t guest physical addr: 0x%" PRIx64 "\n"
1290                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
1291                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
1292                         "\t mmap addr : 0x%" PRIx64 "\n"
1293                         "\t mmap size : 0x%" PRIx64 "\n"
1294                         "\t mmap align: 0x%" PRIx64 "\n"
1295                         "\t mmap off  : 0x%" PRIx64 "\n",
1296                         region->size,
1297                         region->guest_phys_addr,
1298                         region->guest_user_addr,
1299                         region->host_user_addr,
1300                         (uint64_t)(uintptr_t)mmap_addr,
1301                         mmap_size,
1302                         alignment,
1303                         mmap_offset);
1304
1305         return 0;
1306 }
1307
1308 static int
1309 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
1310                         int main_fd)
1311 {
1312         struct virtio_net *dev = *pdev;
1313         struct VhostUserMemory *memory = &msg->payload.memory;
1314         struct rte_vhost_mem_region *reg;
1315         int numa_node = SOCKET_ID_ANY;
1316         uint64_t mmap_offset;
1317         uint32_t i;
1318         bool async_notify = false;
1319
1320         if (validate_msg_fds(msg, memory->nregions) != 0)
1321                 return RTE_VHOST_MSG_RESULT_ERR;
1322
1323         if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
1324                 VHOST_LOG_CONFIG(ERR,
1325                         "too many memory regions (%u)\n", memory->nregions);
1326                 goto close_msg_fds;
1327         }
1328
1329         if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1330                 VHOST_LOG_CONFIG(INFO,
1331                         "(%d) memory regions not changed\n", dev->vid);
1332
1333                 close_msg_fds(msg);
1334
1335                 return RTE_VHOST_MSG_RESULT_OK;
1336         }
1337
1338         if (dev->mem) {
1339                 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1340                         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1341
1342                         if (vdpa_dev && vdpa_dev->ops->dev_close)
1343                                 vdpa_dev->ops->dev_close(dev->vid);
1344                         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1345                 }
1346
1347                 /* notify the vhost application to stop DMA transfers */
1348                 if (dev->async_copy && dev->notify_ops->vring_state_changed) {
1349                         for (i = 0; i < dev->nr_vring; i++) {
1350                                 dev->notify_ops->vring_state_changed(dev->vid,
1351                                                 i, 0);
1352                         }
1353                         async_notify = true;
1354                 }
1355
1356                 free_mem_region(dev);
1357                 rte_free(dev->mem);
1358                 dev->mem = NULL;
1359         }
1360
1361         /* Flush IOTLB cache as previous HVAs are now invalid */
1362         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1363                 for (i = 0; i < dev->nr_vring; i++)
1364                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
1365
1366         /*
1367          * If VQ 0 has already been allocated, try to allocate on the same
1368          * NUMA node. It can be reallocated later in numa_realloc().
1369          */
1370         if (dev->nr_vring > 0)
1371                 numa_node = dev->virtqueue[0]->numa_node;
1372
1373         dev->nr_guest_pages = 0;
1374         if (dev->guest_pages == NULL) {
1375                 dev->max_guest_pages = 8;
1376                 dev->guest_pages = rte_zmalloc_socket(NULL,
1377                                         dev->max_guest_pages *
1378                                         sizeof(struct guest_page),
1379                                         RTE_CACHE_LINE_SIZE,
1380                                         numa_node);
1381                 if (dev->guest_pages == NULL) {
1382                         VHOST_LOG_CONFIG(ERR,
1383                                 "(%d) failed to allocate memory "
1384                                 "for dev->guest_pages\n",
1385                                 dev->vid);
1386                         goto close_msg_fds;
1387                 }
1388         }
1389
1390         dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1391                 sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);
1392         if (dev->mem == NULL) {
1393                 VHOST_LOG_CONFIG(ERR,
1394                         "(%d) failed to allocate memory for dev->mem\n",
1395                         dev->vid);
1396                 goto free_guest_pages;
1397         }
1398
1399         for (i = 0; i < memory->nregions; i++) {
1400                 reg = &dev->mem->regions[i];
1401
1402                 reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
1403                 reg->guest_user_addr = memory->regions[i].userspace_addr;
1404                 reg->size            = memory->regions[i].memory_size;
1405                 reg->fd              = msg->fds[i];
1406
1407                 /*
1408                  * Assign invalid file descriptor value to avoid double
1409                  * closing on error path.
1410                  */
1411                 msg->fds[i] = -1;
1412
1413                 mmap_offset = memory->regions[i].mmap_offset;
1414
1415                 if (vhost_user_mmap_region(dev, reg, mmap_offset) < 0) {
1416                         VHOST_LOG_CONFIG(ERR, "Failed to mmap region %u\n", i);
1417                         goto free_mem_table;
1418                 }
1419
1420                 dev->mem->nregions++;
1421         }
1422
1423         if (vhost_user_postcopy_register(dev, main_fd, msg) < 0)
1424                 goto free_mem_table;
1425
1426         for (i = 0; i < dev->nr_vring; i++) {
1427                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1428
1429                 if (!vq)
1430                         continue;
1431
1432                 if (vq->desc || vq->avail || vq->used) {
1433                         /*
1434                          * If the memory table got updated, the ring addresses
1435                          * need to be translated again as virtual addresses have
1436                          * changed.
1437                          */
1438                         vring_invalidate(dev, vq);
1439
1440                         dev = translate_ring_addresses(dev, i);
1441                         if (!dev) {
1442                                 dev = *pdev;
1443                                 goto free_mem_table;
1444                         }
1445
1446                         *pdev = dev;
1447                 }
1448         }
1449
1450         dump_guest_pages(dev);
1451
1452         if (async_notify) {
1453                 for (i = 0; i < dev->nr_vring; i++)
1454                         dev->notify_ops->vring_state_changed(dev->vid, i, 1);
1455         }
1456
1457         return RTE_VHOST_MSG_RESULT_OK;
1458
1459 free_mem_table:
1460         free_mem_region(dev);
1461         rte_free(dev->mem);
1462         dev->mem = NULL;
1463
1464 free_guest_pages:
1465         rte_free(dev->guest_pages);
1466         dev->guest_pages = NULL;
1467 close_msg_fds:
1468         close_msg_fds(msg);
1469         return RTE_VHOST_MSG_RESULT_ERR;
1470 }
1471
1472 static bool
1473 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1474 {
1475         bool rings_ok;
1476
1477         if (!vq)
1478                 return false;
1479
1480         if (vq_is_packed(dev))
1481                 rings_ok = vq->desc_packed && vq->driver_event &&
1482                         vq->device_event;
1483         else
1484                 rings_ok = vq->desc && vq->avail && vq->used;
1485
1486         return rings_ok &&
1487                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1488                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1489                vq->enabled;
1490 }
1491
1492 #define VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY 2u
1493
1494 static int
1495 virtio_is_ready(struct virtio_net *dev)
1496 {
1497         struct vhost_virtqueue *vq;
1498         uint32_t i, nr_vring = dev->nr_vring;
1499
1500         if (dev->flags & VIRTIO_DEV_READY)
1501                 return 1;
1502
1503         if (!dev->nr_vring)
1504                 return 0;
1505
1506         if (dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) {
1507                 nr_vring = VIRTIO_BUILTIN_NUM_VQS_TO_BE_READY;
1508
1509                 if (dev->nr_vring < nr_vring)
1510                         return 0;
1511         }
1512
1513         for (i = 0; i < nr_vring; i++) {
1514                 vq = dev->virtqueue[i];
1515
1516                 if (!vq_is_ready(dev, vq))
1517                         return 0;
1518         }
1519
1520         /* If supported, ensure the frontend is really done with config */
1521         if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_STATUS))
1522                 if (!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK))
1523                         return 0;
1524
1525         dev->flags |= VIRTIO_DEV_READY;
1526
1527         if (!(dev->flags & VIRTIO_DEV_RUNNING))
1528                 VHOST_LOG_CONFIG(INFO,
1529                         "virtio is now ready for processing.\n");
1530         return 1;
1531 }
1532
1533 static void *
1534 inflight_mem_alloc(const char *name, size_t size, int *fd)
1535 {
1536         void *ptr;
1537         int mfd = -1;
1538         char fname[20] = "/tmp/memfd-XXXXXX";
1539
1540         *fd = -1;
1541 #ifdef MEMFD_SUPPORTED
1542         mfd = memfd_create(name, MFD_CLOEXEC);
1543 #else
1544         RTE_SET_USED(name);
1545 #endif
1546         if (mfd == -1) {
1547                 mfd = mkstemp(fname);
1548                 if (mfd == -1) {
1549                         VHOST_LOG_CONFIG(ERR,
1550                                 "failed to get inflight buffer fd\n");
1551                         return NULL;
1552                 }
1553
1554                 unlink(fname);
1555         }
1556
1557         if (ftruncate(mfd, size) == -1) {
1558                 VHOST_LOG_CONFIG(ERR,
1559                         "failed to alloc inflight buffer\n");
1560                 close(mfd);
1561                 return NULL;
1562         }
1563
1564         ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
1565         if (ptr == MAP_FAILED) {
1566                 VHOST_LOG_CONFIG(ERR,
1567                         "failed to mmap inflight buffer\n");
1568                 close(mfd);
1569                 return NULL;
1570         }
1571
1572         *fd = mfd;
1573         return ptr;
1574 }
1575
1576 static uint32_t
1577 get_pervq_shm_size_split(uint16_t queue_size)
1578 {
1579         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
1580                                   queue_size + sizeof(uint64_t) +
1581                                   sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
1582 }
1583
1584 static uint32_t
1585 get_pervq_shm_size_packed(uint16_t queue_size)
1586 {
1587         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
1588                                   * queue_size + sizeof(uint64_t) +
1589                                   sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
1590                                   INFLIGHT_ALIGNMENT);
1591 }
1592
1593 static int
1594 vhost_user_get_inflight_fd(struct virtio_net **pdev,
1595                            VhostUserMsg *msg,
1596                            int main_fd __rte_unused)
1597 {
1598         struct rte_vhost_inflight_info_packed *inflight_packed;
1599         uint64_t pervq_inflight_size, mmap_size;
1600         uint16_t num_queues, queue_size;
1601         struct virtio_net *dev = *pdev;
1602         int fd, i, j;
1603         int numa_node = SOCKET_ID_ANY;
1604         void *addr;
1605
1606         if (msg->size != sizeof(msg->payload.inflight)) {
1607                 VHOST_LOG_CONFIG(ERR,
1608                         "invalid get_inflight_fd message size is %d\n",
1609                         msg->size);
1610                 return RTE_VHOST_MSG_RESULT_ERR;
1611         }
1612
1613         /*
1614          * If VQ 0 has already been allocated, try to allocate on the same
1615          * NUMA node. It can be reallocated later in numa_realloc().
1616          */
1617         if (dev->nr_vring > 0)
1618                 numa_node = dev->virtqueue[0]->numa_node;
1619
1620         if (dev->inflight_info == NULL) {
1621                 dev->inflight_info = rte_zmalloc_socket("inflight_info",
1622                                 sizeof(struct inflight_mem_info), 0, numa_node);
1623                 if (!dev->inflight_info) {
1624                         VHOST_LOG_CONFIG(ERR,
1625                                 "failed to alloc dev inflight area\n");
1626                         return RTE_VHOST_MSG_RESULT_ERR;
1627                 }
1628                 dev->inflight_info->fd = -1;
1629         }
1630
1631         num_queues = msg->payload.inflight.num_queues;
1632         queue_size = msg->payload.inflight.queue_size;
1633
1634         VHOST_LOG_CONFIG(INFO, "get_inflight_fd num_queues: %u\n",
1635                 msg->payload.inflight.num_queues);
1636         VHOST_LOG_CONFIG(INFO, "get_inflight_fd queue_size: %u\n",
1637                 msg->payload.inflight.queue_size);
1638
1639         if (vq_is_packed(dev))
1640                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1641         else
1642                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1643
1644         mmap_size = num_queues * pervq_inflight_size;
1645         addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
1646         if (!addr) {
1647                 VHOST_LOG_CONFIG(ERR,
1648                         "failed to alloc vhost inflight area\n");
1649                         msg->payload.inflight.mmap_size = 0;
1650                 return RTE_VHOST_MSG_RESULT_ERR;
1651         }
1652         memset(addr, 0, mmap_size);
1653
1654         if (dev->inflight_info->addr) {
1655                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1656                 dev->inflight_info->addr = NULL;
1657         }
1658
1659         if (dev->inflight_info->fd >= 0) {
1660                 close(dev->inflight_info->fd);
1661                 dev->inflight_info->fd = -1;
1662         }
1663
1664         dev->inflight_info->addr = addr;
1665         dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
1666         dev->inflight_info->fd = msg->fds[0] = fd;
1667         msg->payload.inflight.mmap_offset = 0;
1668         msg->fd_num = 1;
1669
1670         if (vq_is_packed(dev)) {
1671                 for (i = 0; i < num_queues; i++) {
1672                         inflight_packed =
1673                                 (struct rte_vhost_inflight_info_packed *)addr;
1674                         inflight_packed->used_wrap_counter = 1;
1675                         inflight_packed->old_used_wrap_counter = 1;
1676                         for (j = 0; j < queue_size; j++)
1677                                 inflight_packed->desc[j].next = j + 1;
1678                         addr = (void *)((char *)addr + pervq_inflight_size);
1679                 }
1680         }
1681
1682         VHOST_LOG_CONFIG(INFO,
1683                 "send inflight mmap_size: %"PRIu64"\n",
1684                 msg->payload.inflight.mmap_size);
1685         VHOST_LOG_CONFIG(INFO,
1686                 "send inflight mmap_offset: %"PRIu64"\n",
1687                 msg->payload.inflight.mmap_offset);
1688         VHOST_LOG_CONFIG(INFO,
1689                 "send inflight fd: %d\n", msg->fds[0]);
1690
1691         return RTE_VHOST_MSG_RESULT_REPLY;
1692 }
1693
1694 static int
1695 vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
1696                            int main_fd __rte_unused)
1697 {
1698         uint64_t mmap_size, mmap_offset;
1699         uint16_t num_queues, queue_size;
1700         struct virtio_net *dev = *pdev;
1701         uint32_t pervq_inflight_size;
1702         struct vhost_virtqueue *vq;
1703         void *addr;
1704         int fd, i;
1705         int numa_node = SOCKET_ID_ANY;
1706
1707         fd = msg->fds[0];
1708         if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
1709                 VHOST_LOG_CONFIG(ERR,
1710                         "invalid set_inflight_fd message size is %d,fd is %d\n",
1711                         msg->size, fd);
1712                 return RTE_VHOST_MSG_RESULT_ERR;
1713         }
1714
1715         mmap_size = msg->payload.inflight.mmap_size;
1716         mmap_offset = msg->payload.inflight.mmap_offset;
1717         num_queues = msg->payload.inflight.num_queues;
1718         queue_size = msg->payload.inflight.queue_size;
1719
1720         if (vq_is_packed(dev))
1721                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1722         else
1723                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1724
1725         VHOST_LOG_CONFIG(INFO,
1726                 "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
1727         VHOST_LOG_CONFIG(INFO,
1728                 "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
1729         VHOST_LOG_CONFIG(INFO,
1730                 "set_inflight_fd num_queues: %u\n", num_queues);
1731         VHOST_LOG_CONFIG(INFO,
1732                 "set_inflight_fd queue_size: %u\n", queue_size);
1733         VHOST_LOG_CONFIG(INFO,
1734                 "set_inflight_fd fd: %d\n", fd);
1735         VHOST_LOG_CONFIG(INFO,
1736                 "set_inflight_fd pervq_inflight_size: %d\n",
1737                 pervq_inflight_size);
1738
1739         /*
1740          * If VQ 0 has already been allocated, try to allocate on the same
1741          * NUMA node. It can be reallocated later in numa_realloc().
1742          */
1743         if (dev->nr_vring > 0)
1744                 numa_node = dev->virtqueue[0]->numa_node;
1745
1746         if (!dev->inflight_info) {
1747                 dev->inflight_info = rte_zmalloc_socket("inflight_info",
1748                                 sizeof(struct inflight_mem_info), 0, numa_node);
1749                 if (dev->inflight_info == NULL) {
1750                         VHOST_LOG_CONFIG(ERR,
1751                                 "failed to alloc dev inflight area\n");
1752                         return RTE_VHOST_MSG_RESULT_ERR;
1753                 }
1754                 dev->inflight_info->fd = -1;
1755         }
1756
1757         if (dev->inflight_info->addr) {
1758                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1759                 dev->inflight_info->addr = NULL;
1760         }
1761
1762         addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1763                     fd, mmap_offset);
1764         if (addr == MAP_FAILED) {
1765                 VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
1766                 return RTE_VHOST_MSG_RESULT_ERR;
1767         }
1768
1769         if (dev->inflight_info->fd >= 0) {
1770                 close(dev->inflight_info->fd);
1771                 dev->inflight_info->fd = -1;
1772         }
1773
1774         dev->inflight_info->fd = fd;
1775         dev->inflight_info->addr = addr;
1776         dev->inflight_info->size = mmap_size;
1777
1778         for (i = 0; i < num_queues; i++) {
1779                 vq = dev->virtqueue[i];
1780                 if (!vq)
1781                         continue;
1782
1783                 if (vq_is_packed(dev)) {
1784                         vq->inflight_packed = addr;
1785                         vq->inflight_packed->desc_num = queue_size;
1786                 } else {
1787                         vq->inflight_split = addr;
1788                         vq->inflight_split->desc_num = queue_size;
1789                 }
1790                 addr = (void *)((char *)addr + pervq_inflight_size);
1791         }
1792
1793         return RTE_VHOST_MSG_RESULT_OK;
1794 }
1795
1796 static int
1797 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1798                         int main_fd __rte_unused)
1799 {
1800         struct virtio_net *dev = *pdev;
1801         struct vhost_vring_file file;
1802         struct vhost_virtqueue *vq;
1803         int expected_fds;
1804
1805         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1806         if (validate_msg_fds(msg, expected_fds) != 0)
1807                 return RTE_VHOST_MSG_RESULT_ERR;
1808
1809         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1810         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1811                 file.fd = VIRTIO_INVALID_EVENTFD;
1812         else
1813                 file.fd = msg->fds[0];
1814         VHOST_LOG_CONFIG(INFO,
1815                 "vring call idx:%d file:%d\n", file.index, file.fd);
1816
1817         vq = dev->virtqueue[file.index];
1818
1819         if (vq->ready) {
1820                 vq->ready = false;
1821                 vhost_user_notify_queue_state(dev, file.index, 0);
1822         }
1823
1824         if (vq->callfd >= 0)
1825                 close(vq->callfd);
1826
1827         vq->callfd = file.fd;
1828
1829         return RTE_VHOST_MSG_RESULT_OK;
1830 }
1831
1832 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1833                         struct VhostUserMsg *msg,
1834                         int main_fd __rte_unused)
1835 {
1836         int expected_fds;
1837
1838         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1839         if (validate_msg_fds(msg, expected_fds) != 0)
1840                 return RTE_VHOST_MSG_RESULT_ERR;
1841
1842         if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1843                 close(msg->fds[0]);
1844         VHOST_LOG_CONFIG(INFO, "not implemented\n");
1845
1846         return RTE_VHOST_MSG_RESULT_OK;
1847 }
1848
1849 static int
1850 resubmit_desc_compare(const void *a, const void *b)
1851 {
1852         const struct rte_vhost_resubmit_desc *desc0 = a;
1853         const struct rte_vhost_resubmit_desc *desc1 = b;
1854
1855         if (desc1->counter > desc0->counter)
1856                 return 1;
1857
1858         return -1;
1859 }
1860
1861 static int
1862 vhost_check_queue_inflights_split(struct virtio_net *dev,
1863                                   struct vhost_virtqueue *vq)
1864 {
1865         uint16_t i;
1866         uint16_t resubmit_num = 0, last_io, num;
1867         struct vring_used *used = vq->used;
1868         struct rte_vhost_resubmit_info *resubmit;
1869         struct rte_vhost_inflight_info_split *inflight_split;
1870
1871         if (!(dev->protocol_features &
1872             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1873                 return RTE_VHOST_MSG_RESULT_OK;
1874
1875         /* The frontend may still not support the inflight feature
1876          * although we negotiate the protocol feature.
1877          */
1878         if ((!vq->inflight_split))
1879                 return RTE_VHOST_MSG_RESULT_OK;
1880
1881         if (!vq->inflight_split->version) {
1882                 vq->inflight_split->version = INFLIGHT_VERSION;
1883                 return RTE_VHOST_MSG_RESULT_OK;
1884         }
1885
1886         if (vq->resubmit_inflight)
1887                 return RTE_VHOST_MSG_RESULT_OK;
1888
1889         inflight_split = vq->inflight_split;
1890         vq->global_counter = 0;
1891         last_io = inflight_split->last_inflight_io;
1892
1893         if (inflight_split->used_idx != used->idx) {
1894                 inflight_split->desc[last_io].inflight = 0;
1895                 rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
1896                 inflight_split->used_idx = used->idx;
1897         }
1898
1899         for (i = 0; i < inflight_split->desc_num; i++) {
1900                 if (inflight_split->desc[i].inflight == 1)
1901                         resubmit_num++;
1902         }
1903
1904         vq->last_avail_idx += resubmit_num;
1905
1906         if (resubmit_num) {
1907                 resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
1908                                 0, vq->numa_node);
1909                 if (!resubmit) {
1910                         VHOST_LOG_CONFIG(ERR,
1911                                 "failed to allocate memory for resubmit info.\n");
1912                         return RTE_VHOST_MSG_RESULT_ERR;
1913                 }
1914
1915                 resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
1916                                 resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
1917                                 0, vq->numa_node);
1918                 if (!resubmit->resubmit_list) {
1919                         VHOST_LOG_CONFIG(ERR,
1920                                 "failed to allocate memory for inflight desc.\n");
1921                         rte_free(resubmit);
1922                         return RTE_VHOST_MSG_RESULT_ERR;
1923                 }
1924
1925                 num = 0;
1926                 for (i = 0; i < vq->inflight_split->desc_num; i++) {
1927                         if (vq->inflight_split->desc[i].inflight == 1) {
1928                                 resubmit->resubmit_list[num].index = i;
1929                                 resubmit->resubmit_list[num].counter =
1930                                         inflight_split->desc[i].counter;
1931                                 num++;
1932                         }
1933                 }
1934                 resubmit->resubmit_num = num;
1935
1936                 if (resubmit->resubmit_num > 1)
1937                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1938                               sizeof(struct rte_vhost_resubmit_desc),
1939                               resubmit_desc_compare);
1940
1941                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1942                 vq->resubmit_inflight = resubmit;
1943         }
1944
1945         return RTE_VHOST_MSG_RESULT_OK;
1946 }
1947
1948 static int
1949 vhost_check_queue_inflights_packed(struct virtio_net *dev,
1950                                    struct vhost_virtqueue *vq)
1951 {
1952         uint16_t i;
1953         uint16_t resubmit_num = 0, old_used_idx, num;
1954         struct rte_vhost_resubmit_info *resubmit;
1955         struct rte_vhost_inflight_info_packed *inflight_packed;
1956
1957         if (!(dev->protocol_features &
1958             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1959                 return RTE_VHOST_MSG_RESULT_OK;
1960
1961         /* The frontend may still not support the inflight feature
1962          * although we negotiate the protocol feature.
1963          */
1964         if ((!vq->inflight_packed))
1965                 return RTE_VHOST_MSG_RESULT_OK;
1966
1967         if (!vq->inflight_packed->version) {
1968                 vq->inflight_packed->version = INFLIGHT_VERSION;
1969                 return RTE_VHOST_MSG_RESULT_OK;
1970         }
1971
1972         if (vq->resubmit_inflight)
1973                 return RTE_VHOST_MSG_RESULT_OK;
1974
1975         inflight_packed = vq->inflight_packed;
1976         vq->global_counter = 0;
1977         old_used_idx = inflight_packed->old_used_idx;
1978
1979         if (inflight_packed->used_idx != old_used_idx) {
1980                 if (inflight_packed->desc[old_used_idx].inflight == 0) {
1981                         inflight_packed->old_used_idx =
1982                                 inflight_packed->used_idx;
1983                         inflight_packed->old_used_wrap_counter =
1984                                 inflight_packed->used_wrap_counter;
1985                         inflight_packed->old_free_head =
1986                                 inflight_packed->free_head;
1987                 } else {
1988                         inflight_packed->used_idx =
1989                                 inflight_packed->old_used_idx;
1990                         inflight_packed->used_wrap_counter =
1991                                 inflight_packed->old_used_wrap_counter;
1992                         inflight_packed->free_head =
1993                                 inflight_packed->old_free_head;
1994                 }
1995         }
1996
1997         for (i = 0; i < inflight_packed->desc_num; i++) {
1998                 if (inflight_packed->desc[i].inflight == 1)
1999                         resubmit_num++;
2000         }
2001
2002         if (resubmit_num) {
2003                 resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
2004                                 0, vq->numa_node);
2005                 if (resubmit == NULL) {
2006                         VHOST_LOG_CONFIG(ERR,
2007                                 "failed to allocate memory for resubmit info.\n");
2008                         return RTE_VHOST_MSG_RESULT_ERR;
2009                 }
2010
2011                 resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
2012                                 resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
2013                                 0, vq->numa_node);
2014                 if (resubmit->resubmit_list == NULL) {
2015                         VHOST_LOG_CONFIG(ERR,
2016                                 "failed to allocate memory for resubmit desc.\n");
2017                         rte_free(resubmit);
2018                         return RTE_VHOST_MSG_RESULT_ERR;
2019                 }
2020
2021                 num = 0;
2022                 for (i = 0; i < inflight_packed->desc_num; i++) {
2023                         if (vq->inflight_packed->desc[i].inflight == 1) {
2024                                 resubmit->resubmit_list[num].index = i;
2025                                 resubmit->resubmit_list[num].counter =
2026                                         inflight_packed->desc[i].counter;
2027                                 num++;
2028                         }
2029                 }
2030                 resubmit->resubmit_num = num;
2031
2032                 if (resubmit->resubmit_num > 1)
2033                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
2034                               sizeof(struct rte_vhost_resubmit_desc),
2035                               resubmit_desc_compare);
2036
2037                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
2038                 vq->resubmit_inflight = resubmit;
2039         }
2040
2041         return RTE_VHOST_MSG_RESULT_OK;
2042 }
2043
2044 static int
2045 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
2046                         int main_fd __rte_unused)
2047 {
2048         struct virtio_net *dev = *pdev;
2049         struct vhost_vring_file file;
2050         struct vhost_virtqueue *vq;
2051         int expected_fds;
2052
2053         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
2054         if (validate_msg_fds(msg, expected_fds) != 0)
2055                 return RTE_VHOST_MSG_RESULT_ERR;
2056
2057         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2058         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
2059                 file.fd = VIRTIO_INVALID_EVENTFD;
2060         else
2061                 file.fd = msg->fds[0];
2062         VHOST_LOG_CONFIG(INFO,
2063                 "vring kick idx:%d file:%d\n", file.index, file.fd);
2064
2065         /* Interpret ring addresses only when ring is started. */
2066         dev = translate_ring_addresses(dev, file.index);
2067         if (!dev) {
2068                 if (file.fd != VIRTIO_INVALID_EVENTFD)
2069                         close(file.fd);
2070
2071                 return RTE_VHOST_MSG_RESULT_ERR;
2072         }
2073
2074         *pdev = dev;
2075
2076         vq = dev->virtqueue[file.index];
2077
2078         /*
2079          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
2080          * the ring starts already enabled. Otherwise, it is enabled via
2081          * the SET_VRING_ENABLE message.
2082          */
2083         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
2084                 vq->enabled = true;
2085         }
2086
2087         if (vq->ready) {
2088                 vq->ready = false;
2089                 vhost_user_notify_queue_state(dev, file.index, 0);
2090         }
2091
2092         if (vq->kickfd >= 0)
2093                 close(vq->kickfd);
2094         vq->kickfd = file.fd;
2095
2096         if (vq_is_packed(dev)) {
2097                 if (vhost_check_queue_inflights_packed(dev, vq)) {
2098                         VHOST_LOG_CONFIG(ERR,
2099                                 "failed to inflights for vq: %d\n", file.index);
2100                         return RTE_VHOST_MSG_RESULT_ERR;
2101                 }
2102         } else {
2103                 if (vhost_check_queue_inflights_split(dev, vq)) {
2104                         VHOST_LOG_CONFIG(ERR,
2105                                 "failed to inflights for vq: %d\n", file.index);
2106                         return RTE_VHOST_MSG_RESULT_ERR;
2107                 }
2108         }
2109
2110         return RTE_VHOST_MSG_RESULT_OK;
2111 }
2112
2113 /*
2114  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
2115  */
2116 static int
2117 vhost_user_get_vring_base(struct virtio_net **pdev,
2118                         struct VhostUserMsg *msg,
2119                         int main_fd __rte_unused)
2120 {
2121         struct virtio_net *dev = *pdev;
2122         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
2123         uint64_t val;
2124
2125         if (validate_msg_fds(msg, 0) != 0)
2126                 return RTE_VHOST_MSG_RESULT_ERR;
2127
2128         /* We have to stop the queue (virtio) if it is running. */
2129         vhost_destroy_device_notify(dev);
2130
2131         dev->flags &= ~VIRTIO_DEV_READY;
2132         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
2133
2134         /* Here we are safe to get the indexes */
2135         if (vq_is_packed(dev)) {
2136                 /*
2137                  * Bit[0:14]: avail index
2138                  * Bit[15]: avail wrap counter
2139                  */
2140                 val = vq->last_avail_idx & 0x7fff;
2141                 val |= vq->avail_wrap_counter << 15;
2142                 msg->payload.state.num = val;
2143         } else {
2144                 msg->payload.state.num = vq->last_avail_idx;
2145         }
2146
2147         VHOST_LOG_CONFIG(INFO,
2148                 "vring base idx:%d file:%d\n", msg->payload.state.index,
2149                 msg->payload.state.num);
2150         /*
2151          * Based on current qemu vhost-user implementation, this message is
2152          * sent and only sent in vhost_vring_stop.
2153          * TODO: cleanup the vring, it isn't usable since here.
2154          */
2155         if (vq->kickfd >= 0)
2156                 close(vq->kickfd);
2157
2158         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
2159
2160         if (vq->callfd >= 0)
2161                 close(vq->callfd);
2162
2163         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
2164
2165         vq->signalled_used_valid = false;
2166
2167         if (vq_is_packed(dev)) {
2168                 rte_free(vq->shadow_used_packed);
2169                 vq->shadow_used_packed = NULL;
2170         } else {
2171                 rte_free(vq->shadow_used_split);
2172                 vq->shadow_used_split = NULL;
2173         }
2174
2175         rte_free(vq->batch_copy_elems);
2176         vq->batch_copy_elems = NULL;
2177
2178         rte_free(vq->log_cache);
2179         vq->log_cache = NULL;
2180
2181         msg->size = sizeof(msg->payload.state);
2182         msg->fd_num = 0;
2183
2184         vhost_user_iotlb_flush_all(vq);
2185
2186         vring_invalidate(dev, vq);
2187
2188         return RTE_VHOST_MSG_RESULT_REPLY;
2189 }
2190
2191 /*
2192  * when virtio queues are ready to work, qemu will send us to
2193  * enable the virtio queue pair.
2194  */
2195 static int
2196 vhost_user_set_vring_enable(struct virtio_net **pdev,
2197                         struct VhostUserMsg *msg,
2198                         int main_fd __rte_unused)
2199 {
2200         struct virtio_net *dev = *pdev;
2201         bool enable = !!msg->payload.state.num;
2202         int index = (int)msg->payload.state.index;
2203
2204         if (validate_msg_fds(msg, 0) != 0)
2205                 return RTE_VHOST_MSG_RESULT_ERR;
2206
2207         VHOST_LOG_CONFIG(INFO,
2208                 "set queue enable: %d to qp idx: %d\n",
2209                 enable, index);
2210
2211         if (enable && dev->virtqueue[index]->async) {
2212                 if (dev->virtqueue[index]->async->pkts_inflight_n) {
2213                         VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
2214                         "async inflight packets must be completed first\n");
2215                         return RTE_VHOST_MSG_RESULT_ERR;
2216                 }
2217         }
2218
2219         dev->virtqueue[index]->enabled = enable;
2220
2221         return RTE_VHOST_MSG_RESULT_OK;
2222 }
2223
2224 static int
2225 vhost_user_get_protocol_features(struct virtio_net **pdev,
2226                         struct VhostUserMsg *msg,
2227                         int main_fd __rte_unused)
2228 {
2229         struct virtio_net *dev = *pdev;
2230         uint64_t features, protocol_features;
2231
2232         if (validate_msg_fds(msg, 0) != 0)
2233                 return RTE_VHOST_MSG_RESULT_ERR;
2234
2235         rte_vhost_driver_get_features(dev->ifname, &features);
2236         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2237
2238         msg->payload.u64 = protocol_features;
2239         msg->size = sizeof(msg->payload.u64);
2240         msg->fd_num = 0;
2241
2242         return RTE_VHOST_MSG_RESULT_REPLY;
2243 }
2244
2245 static int
2246 vhost_user_set_protocol_features(struct virtio_net **pdev,
2247                         struct VhostUserMsg *msg,
2248                         int main_fd __rte_unused)
2249 {
2250         struct virtio_net *dev = *pdev;
2251         uint64_t protocol_features = msg->payload.u64;
2252         uint64_t slave_protocol_features = 0;
2253
2254         if (validate_msg_fds(msg, 0) != 0)
2255                 return RTE_VHOST_MSG_RESULT_ERR;
2256
2257         rte_vhost_driver_get_protocol_features(dev->ifname,
2258                         &slave_protocol_features);
2259         if (protocol_features & ~slave_protocol_features) {
2260                 VHOST_LOG_CONFIG(ERR,
2261                         "(%d) received invalid protocol features.\n",
2262                         dev->vid);
2263                 return RTE_VHOST_MSG_RESULT_ERR;
2264         }
2265
2266         dev->protocol_features = protocol_features;
2267         VHOST_LOG_CONFIG(INFO,
2268                 "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
2269                 dev->protocol_features);
2270
2271         return RTE_VHOST_MSG_RESULT_OK;
2272 }
2273
2274 static int
2275 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
2276                         int main_fd __rte_unused)
2277 {
2278         struct virtio_net *dev = *pdev;
2279         int fd = msg->fds[0];
2280         uint64_t size, off;
2281         void *addr;
2282         uint32_t i;
2283
2284         if (validate_msg_fds(msg, 1) != 0)
2285                 return RTE_VHOST_MSG_RESULT_ERR;
2286
2287         if (fd < 0) {
2288                 VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
2289                 return RTE_VHOST_MSG_RESULT_ERR;
2290         }
2291
2292         if (msg->size != sizeof(VhostUserLog)) {
2293                 VHOST_LOG_CONFIG(ERR,
2294                         "invalid log base msg size: %"PRId32" != %d\n",
2295                         msg->size, (int)sizeof(VhostUserLog));
2296                 goto close_msg_fds;
2297         }
2298
2299         size = msg->payload.log.mmap_size;
2300         off  = msg->payload.log.mmap_offset;
2301
2302         /* Check for mmap size and offset overflow. */
2303         if (off >= -size) {
2304                 VHOST_LOG_CONFIG(ERR,
2305                         "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
2306                         off, size);
2307                 goto close_msg_fds;
2308         }
2309
2310         VHOST_LOG_CONFIG(INFO,
2311                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
2312                 size, off);
2313
2314         /*
2315          * mmap from 0 to workaround a hugepage mmap bug: mmap will
2316          * fail when offset is not page size aligned.
2317          */
2318         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2319         close(fd);
2320         if (addr == MAP_FAILED) {
2321                 VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
2322                 return RTE_VHOST_MSG_RESULT_ERR;
2323         }
2324
2325         /*
2326          * Free previously mapped log memory on occasionally
2327          * multiple VHOST_USER_SET_LOG_BASE.
2328          */
2329         if (dev->log_addr) {
2330                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2331         }
2332         dev->log_addr = (uint64_t)(uintptr_t)addr;
2333         dev->log_base = dev->log_addr + off;
2334         dev->log_size = size;
2335
2336         for (i = 0; i < dev->nr_vring; i++) {
2337                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2338
2339                 rte_free(vq->log_cache);
2340                 vq->log_cache = NULL;
2341                 vq->log_cache_nb_elem = 0;
2342                 vq->log_cache = rte_malloc_socket("vq log cache",
2343                                 sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,
2344                                 0, vq->numa_node);
2345                 /*
2346                  * If log cache alloc fail, don't fail migration, but no
2347                  * caching will be done, which will impact performance
2348                  */
2349                 if (!vq->log_cache)
2350                         VHOST_LOG_CONFIG(ERR, "Failed to allocate VQ logging cache\n");
2351         }
2352
2353         /*
2354          * The spec is not clear about it (yet), but QEMU doesn't expect
2355          * any payload in the reply.
2356          */
2357         msg->size = 0;
2358         msg->fd_num = 0;
2359
2360         return RTE_VHOST_MSG_RESULT_REPLY;
2361
2362 close_msg_fds:
2363         close_msg_fds(msg);
2364         return RTE_VHOST_MSG_RESULT_ERR;
2365 }
2366
2367 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
2368                         struct VhostUserMsg *msg,
2369                         int main_fd __rte_unused)
2370 {
2371         if (validate_msg_fds(msg, 1) != 0)
2372                 return RTE_VHOST_MSG_RESULT_ERR;
2373
2374         close(msg->fds[0]);
2375         VHOST_LOG_CONFIG(INFO, "not implemented.\n");
2376
2377         return RTE_VHOST_MSG_RESULT_OK;
2378 }
2379
2380 /*
2381  * An rarp packet is constructed and broadcasted to notify switches about
2382  * the new location of the migrated VM, so that packets from outside will
2383  * not be lost after migration.
2384  *
2385  * However, we don't actually "send" a rarp packet here, instead, we set
2386  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
2387  */
2388 static int
2389 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
2390                         int main_fd __rte_unused)
2391 {
2392         struct virtio_net *dev = *pdev;
2393         uint8_t *mac = (uint8_t *)&msg->payload.u64;
2394         struct rte_vdpa_device *vdpa_dev;
2395
2396         if (validate_msg_fds(msg, 0) != 0)
2397                 return RTE_VHOST_MSG_RESULT_ERR;
2398
2399         VHOST_LOG_CONFIG(DEBUG,
2400                 ":: mac: " RTE_ETHER_ADDR_PRT_FMT "\n",
2401                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2402         memcpy(dev->mac.addr_bytes, mac, 6);
2403
2404         /*
2405          * Set the flag to inject a RARP broadcast packet at
2406          * rte_vhost_dequeue_burst().
2407          *
2408          * __ATOMIC_RELEASE ordering is for making sure the mac is
2409          * copied before the flag is set.
2410          */
2411         __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
2412         vdpa_dev = dev->vdpa_dev;
2413         if (vdpa_dev && vdpa_dev->ops->migration_done)
2414                 vdpa_dev->ops->migration_done(dev->vid);
2415
2416         return RTE_VHOST_MSG_RESULT_OK;
2417 }
2418
2419 static int
2420 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
2421                         int main_fd __rte_unused)
2422 {
2423         struct virtio_net *dev = *pdev;
2424
2425         if (validate_msg_fds(msg, 0) != 0)
2426                 return RTE_VHOST_MSG_RESULT_ERR;
2427
2428         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
2429                         msg->payload.u64 > VIRTIO_MAX_MTU) {
2430                 VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
2431                                 msg->payload.u64);
2432
2433                 return RTE_VHOST_MSG_RESULT_ERR;
2434         }
2435
2436         dev->mtu = msg->payload.u64;
2437
2438         return RTE_VHOST_MSG_RESULT_OK;
2439 }
2440
2441 static int
2442 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
2443                         int main_fd __rte_unused)
2444 {
2445         struct virtio_net *dev = *pdev;
2446         int fd = msg->fds[0];
2447
2448         if (validate_msg_fds(msg, 1) != 0)
2449                 return RTE_VHOST_MSG_RESULT_ERR;
2450
2451         if (fd < 0) {
2452                 VHOST_LOG_CONFIG(ERR,
2453                                 "Invalid file descriptor for slave channel (%d)\n",
2454                                 fd);
2455                 return RTE_VHOST_MSG_RESULT_ERR;
2456         }
2457
2458         if (dev->slave_req_fd >= 0)
2459                 close(dev->slave_req_fd);
2460
2461         dev->slave_req_fd = fd;
2462
2463         return RTE_VHOST_MSG_RESULT_OK;
2464 }
2465
2466 static int
2467 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2468 {
2469         struct vhost_vring_addr *ra;
2470         uint64_t start, end, len;
2471
2472         start = imsg->iova;
2473         end = start + imsg->size;
2474
2475         ra = &vq->ring_addrs;
2476         len = sizeof(struct vring_desc) * vq->size;
2477         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2478                 return 1;
2479
2480         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2481         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2482                 return 1;
2483
2484         len = sizeof(struct vring_used) +
2485                sizeof(struct vring_used_elem) * vq->size;
2486         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2487                 return 1;
2488
2489         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2490                 len = sizeof(uint64_t);
2491                 if (ra->log_guest_addr < end &&
2492                     (ra->log_guest_addr + len) > start)
2493                         return 1;
2494         }
2495
2496         return 0;
2497 }
2498
2499 static int
2500 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2501 {
2502         struct vhost_vring_addr *ra;
2503         uint64_t start, end, len;
2504
2505         start = imsg->iova;
2506         end = start + imsg->size;
2507
2508         ra = &vq->ring_addrs;
2509         len = sizeof(struct vring_packed_desc) * vq->size;
2510         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2511                 return 1;
2512
2513         len = sizeof(struct vring_packed_desc_event);
2514         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2515                 return 1;
2516
2517         len = sizeof(struct vring_packed_desc_event);
2518         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2519                 return 1;
2520
2521         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2522                 len = sizeof(uint64_t);
2523                 if (ra->log_guest_addr < end &&
2524                     (ra->log_guest_addr + len) > start)
2525                         return 1;
2526         }
2527
2528         return 0;
2529 }
2530
2531 static int is_vring_iotlb(struct virtio_net *dev,
2532                           struct vhost_virtqueue *vq,
2533                           struct vhost_iotlb_msg *imsg)
2534 {
2535         if (vq_is_packed(dev))
2536                 return is_vring_iotlb_packed(vq, imsg);
2537         else
2538                 return is_vring_iotlb_split(vq, imsg);
2539 }
2540
2541 static int
2542 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
2543                         int main_fd __rte_unused)
2544 {
2545         struct virtio_net *dev = *pdev;
2546         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
2547         uint16_t i;
2548         uint64_t vva, len;
2549
2550         if (validate_msg_fds(msg, 0) != 0)
2551                 return RTE_VHOST_MSG_RESULT_ERR;
2552
2553         switch (imsg->type) {
2554         case VHOST_IOTLB_UPDATE:
2555                 len = imsg->size;
2556                 vva = qva_to_vva(dev, imsg->uaddr, &len);
2557                 if (!vva)
2558                         return RTE_VHOST_MSG_RESULT_ERR;
2559
2560                 for (i = 0; i < dev->nr_vring; i++) {
2561                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2562
2563                         if (!vq)
2564                                 continue;
2565
2566                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
2567                                         len, imsg->perm);
2568
2569                         if (is_vring_iotlb(dev, vq, imsg))
2570                                 *pdev = dev = translate_ring_addresses(dev, i);
2571                 }
2572                 break;
2573         case VHOST_IOTLB_INVALIDATE:
2574                 for (i = 0; i < dev->nr_vring; i++) {
2575                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2576
2577                         if (!vq)
2578                                 continue;
2579
2580                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
2581                                         imsg->size);
2582
2583                         if (is_vring_iotlb(dev, vq, imsg))
2584                                 vring_invalidate(dev, vq);
2585                 }
2586                 break;
2587         default:
2588                 VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
2589                                 imsg->type);
2590                 return RTE_VHOST_MSG_RESULT_ERR;
2591         }
2592
2593         return RTE_VHOST_MSG_RESULT_OK;
2594 }
2595
2596 static int
2597 vhost_user_set_postcopy_advise(struct virtio_net **pdev,
2598                         struct VhostUserMsg *msg,
2599                         int main_fd __rte_unused)
2600 {
2601         struct virtio_net *dev = *pdev;
2602 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
2603         struct uffdio_api api_struct;
2604
2605         if (validate_msg_fds(msg, 0) != 0)
2606                 return RTE_VHOST_MSG_RESULT_ERR;
2607
2608         dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2609
2610         if (dev->postcopy_ufd == -1) {
2611                 VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
2612                         strerror(errno));
2613                 return RTE_VHOST_MSG_RESULT_ERR;
2614         }
2615         api_struct.api = UFFD_API;
2616         api_struct.features = 0;
2617         if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2618                 VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
2619                         strerror(errno));
2620                 close(dev->postcopy_ufd);
2621                 dev->postcopy_ufd = -1;
2622                 return RTE_VHOST_MSG_RESULT_ERR;
2623         }
2624         msg->fds[0] = dev->postcopy_ufd;
2625         msg->fd_num = 1;
2626
2627         return RTE_VHOST_MSG_RESULT_REPLY;
2628 #else
2629         dev->postcopy_ufd = -1;
2630         msg->fd_num = 0;
2631
2632         return RTE_VHOST_MSG_RESULT_ERR;
2633 #endif
2634 }
2635
2636 static int
2637 vhost_user_set_postcopy_listen(struct virtio_net **pdev,
2638                         struct VhostUserMsg *msg __rte_unused,
2639                         int main_fd __rte_unused)
2640 {
2641         struct virtio_net *dev = *pdev;
2642
2643         if (validate_msg_fds(msg, 0) != 0)
2644                 return RTE_VHOST_MSG_RESULT_ERR;
2645
2646         if (dev->mem && dev->mem->nregions) {
2647                 VHOST_LOG_CONFIG(ERR,
2648                         "Regions already registered at postcopy-listen\n");
2649                 return RTE_VHOST_MSG_RESULT_ERR;
2650         }
2651         dev->postcopy_listening = 1;
2652
2653         return RTE_VHOST_MSG_RESULT_OK;
2654 }
2655
2656 static int
2657 vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
2658                         int main_fd __rte_unused)
2659 {
2660         struct virtio_net *dev = *pdev;
2661
2662         if (validate_msg_fds(msg, 0) != 0)
2663                 return RTE_VHOST_MSG_RESULT_ERR;
2664
2665         dev->postcopy_listening = 0;
2666         if (dev->postcopy_ufd >= 0) {
2667                 close(dev->postcopy_ufd);
2668                 dev->postcopy_ufd = -1;
2669         }
2670
2671         msg->payload.u64 = 0;
2672         msg->size = sizeof(msg->payload.u64);
2673         msg->fd_num = 0;
2674
2675         return RTE_VHOST_MSG_RESULT_REPLY;
2676 }
2677
2678 static int
2679 vhost_user_get_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2680                       int main_fd __rte_unused)
2681 {
2682         struct virtio_net *dev = *pdev;
2683
2684         if (validate_msg_fds(msg, 0) != 0)
2685                 return RTE_VHOST_MSG_RESULT_ERR;
2686
2687         msg->payload.u64 = dev->status;
2688         msg->size = sizeof(msg->payload.u64);
2689         msg->fd_num = 0;
2690
2691         return RTE_VHOST_MSG_RESULT_REPLY;
2692 }
2693
2694 static int
2695 vhost_user_set_status(struct virtio_net **pdev, struct VhostUserMsg *msg,
2696                         int main_fd __rte_unused)
2697 {
2698         struct virtio_net *dev = *pdev;
2699
2700         if (validate_msg_fds(msg, 0) != 0)
2701                 return RTE_VHOST_MSG_RESULT_ERR;
2702
2703         /* As per Virtio specification, the device status is 8bits long */
2704         if (msg->payload.u64 > UINT8_MAX) {
2705                 VHOST_LOG_CONFIG(ERR, "Invalid VHOST_USER_SET_STATUS payload 0x%" PRIx64 "\n",
2706                                 msg->payload.u64);
2707                 return RTE_VHOST_MSG_RESULT_ERR;
2708         }
2709
2710         dev->status = msg->payload.u64;
2711
2712         if ((dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK) &&
2713             (dev->flags & VIRTIO_DEV_FEATURES_FAILED)) {
2714                 VHOST_LOG_CONFIG(ERR, "FEATURES_OK bit is set but feature negotiation failed\n");
2715                 /*
2716                  * Clear the bit to let the driver know about the feature
2717                  * negotiation failure
2718                  */
2719                 dev->status &= ~VIRTIO_DEVICE_STATUS_FEATURES_OK;
2720         }
2721
2722         VHOST_LOG_CONFIG(INFO, "New device status(0x%08x):\n"
2723                         "\t-RESET: %u\n"
2724                         "\t-ACKNOWLEDGE: %u\n"
2725                         "\t-DRIVER: %u\n"
2726                         "\t-FEATURES_OK: %u\n"
2727                         "\t-DRIVER_OK: %u\n"
2728                         "\t-DEVICE_NEED_RESET: %u\n"
2729                         "\t-FAILED: %u\n",
2730                         dev->status,
2731                         (dev->status == VIRTIO_DEVICE_STATUS_RESET),
2732                         !!(dev->status & VIRTIO_DEVICE_STATUS_ACK),
2733                         !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER),
2734                         !!(dev->status & VIRTIO_DEVICE_STATUS_FEATURES_OK),
2735                         !!(dev->status & VIRTIO_DEVICE_STATUS_DRIVER_OK),
2736                         !!(dev->status & VIRTIO_DEVICE_STATUS_DEV_NEED_RESET),
2737                         !!(dev->status & VIRTIO_DEVICE_STATUS_FAILED));
2738
2739         return RTE_VHOST_MSG_RESULT_OK;
2740 }
2741
2742 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
2743                                         struct VhostUserMsg *msg,
2744                                         int main_fd);
2745 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
2746         [VHOST_USER_NONE] = NULL,
2747         [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
2748         [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
2749         [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
2750         [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
2751         [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
2752         [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
2753         [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
2754         [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
2755         [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
2756         [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
2757         [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
2758         [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
2759         [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
2760         [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
2761         [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
2762         [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
2763         [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
2764         [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
2765         [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
2766         [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
2767         [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
2768         [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
2769         [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
2770         [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
2771         [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
2772         [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
2773         [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
2774         [VHOST_USER_SET_STATUS] = vhost_user_set_status,
2775         [VHOST_USER_GET_STATUS] = vhost_user_get_status,
2776 };
2777
2778 /* return bytes# of read on success or negative val on failure. */
2779 static int
2780 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2781 {
2782         int ret;
2783
2784         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2785                 msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
2786         if (ret <= 0) {
2787                 return ret;
2788         } else if (ret != VHOST_USER_HDR_SIZE) {
2789                 VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
2790                 close_msg_fds(msg);
2791                 return -1;
2792         }
2793
2794         if (msg->size) {
2795                 if (msg->size > sizeof(msg->payload)) {
2796                         VHOST_LOG_CONFIG(ERR,
2797                                 "invalid msg size: %d\n", msg->size);
2798                         return -1;
2799                 }
2800                 ret = read(sockfd, &msg->payload, msg->size);
2801                 if (ret <= 0)
2802                         return ret;
2803                 if (ret != (int)msg->size) {
2804                         VHOST_LOG_CONFIG(ERR,
2805                                 "read control message failed\n");
2806                         return -1;
2807                 }
2808         }
2809
2810         return ret;
2811 }
2812
2813 static int
2814 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2815 {
2816         if (!msg)
2817                 return 0;
2818
2819         return send_fd_message(sockfd, (char *)msg,
2820                 VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
2821 }
2822
2823 static int
2824 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
2825 {
2826         if (!msg)
2827                 return 0;
2828
2829         msg->flags &= ~VHOST_USER_VERSION_MASK;
2830         msg->flags &= ~VHOST_USER_NEED_REPLY;
2831         msg->flags |= VHOST_USER_VERSION;
2832         msg->flags |= VHOST_USER_REPLY_MASK;
2833
2834         return send_vhost_message(sockfd, msg);
2835 }
2836
2837 static int
2838 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
2839 {
2840         int ret;
2841
2842         if (msg->flags & VHOST_USER_NEED_REPLY)
2843                 rte_spinlock_lock(&dev->slave_req_lock);
2844
2845         ret = send_vhost_message(dev->slave_req_fd, msg);
2846         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
2847                 rte_spinlock_unlock(&dev->slave_req_lock);
2848
2849         return ret;
2850 }
2851
2852 /*
2853  * Allocate a queue pair if it hasn't been allocated yet
2854  */
2855 static int
2856 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
2857                         struct VhostUserMsg *msg)
2858 {
2859         uint32_t vring_idx;
2860
2861         switch (msg->request.master) {
2862         case VHOST_USER_SET_VRING_KICK:
2863         case VHOST_USER_SET_VRING_CALL:
2864         case VHOST_USER_SET_VRING_ERR:
2865                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2866                 break;
2867         case VHOST_USER_SET_VRING_NUM:
2868         case VHOST_USER_SET_VRING_BASE:
2869         case VHOST_USER_GET_VRING_BASE:
2870         case VHOST_USER_SET_VRING_ENABLE:
2871                 vring_idx = msg->payload.state.index;
2872                 break;
2873         case VHOST_USER_SET_VRING_ADDR:
2874                 vring_idx = msg->payload.addr.index;
2875                 break;
2876         default:
2877                 return 0;
2878         }
2879
2880         if (vring_idx >= VHOST_MAX_VRING) {
2881                 VHOST_LOG_CONFIG(ERR,
2882                         "invalid vring index: %u\n", vring_idx);
2883                 return -1;
2884         }
2885
2886         if (dev->virtqueue[vring_idx])
2887                 return 0;
2888
2889         return alloc_vring_queue(dev, vring_idx);
2890 }
2891
2892 static void
2893 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
2894 {
2895         unsigned int i = 0;
2896         unsigned int vq_num = 0;
2897
2898         while (vq_num < dev->nr_vring) {
2899                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2900
2901                 if (vq) {
2902                         rte_spinlock_lock(&vq->access_lock);
2903                         vq_num++;
2904                 }
2905                 i++;
2906         }
2907 }
2908
2909 static void
2910 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
2911 {
2912         unsigned int i = 0;
2913         unsigned int vq_num = 0;
2914
2915         while (vq_num < dev->nr_vring) {
2916                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2917
2918                 if (vq) {
2919                         rte_spinlock_unlock(&vq->access_lock);
2920                         vq_num++;
2921                 }
2922                 i++;
2923         }
2924 }
2925
2926 int
2927 vhost_user_msg_handler(int vid, int fd)
2928 {
2929         struct virtio_net *dev;
2930         struct VhostUserMsg msg;
2931         struct rte_vdpa_device *vdpa_dev;
2932         int ret;
2933         int unlock_required = 0;
2934         bool handled;
2935         int request;
2936         uint32_t i;
2937
2938         dev = get_device(vid);
2939         if (dev == NULL)
2940                 return -1;
2941
2942         if (!dev->notify_ops) {
2943                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
2944                 if (!dev->notify_ops) {
2945                         VHOST_LOG_CONFIG(ERR,
2946                                 "failed to get callback ops for driver %s\n",
2947                                 dev->ifname);
2948                         return -1;
2949                 }
2950         }
2951
2952         ret = read_vhost_message(fd, &msg);
2953         if (ret <= 0) {
2954                 if (ret < 0)
2955                         VHOST_LOG_CONFIG(ERR,
2956                                 "vhost read message failed\n");
2957                 else
2958                         VHOST_LOG_CONFIG(INFO,
2959                                 "vhost peer closed\n");
2960
2961                 return -1;
2962         }
2963
2964         ret = 0;
2965         request = msg.request.master;
2966         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
2967                         vhost_message_str[request]) {
2968                 if (request != VHOST_USER_IOTLB_MSG)
2969                         VHOST_LOG_CONFIG(INFO, "read message %s\n",
2970                                 vhost_message_str[request]);
2971                 else
2972                         VHOST_LOG_CONFIG(DEBUG, "read message %s\n",
2973                                 vhost_message_str[request]);
2974         } else {
2975                 VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
2976         }
2977
2978         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
2979         if (ret < 0) {
2980                 VHOST_LOG_CONFIG(ERR,
2981                         "failed to alloc queue\n");
2982                 return -1;
2983         }
2984
2985         /*
2986          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
2987          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
2988          * and device is destroyed. destroy_device waits for queues to be
2989          * inactive, so it is safe. Otherwise taking the access_lock
2990          * would cause a dead lock.
2991          */
2992         switch (request) {
2993         case VHOST_USER_SET_FEATURES:
2994         case VHOST_USER_SET_PROTOCOL_FEATURES:
2995         case VHOST_USER_SET_OWNER:
2996         case VHOST_USER_SET_MEM_TABLE:
2997         case VHOST_USER_SET_LOG_BASE:
2998         case VHOST_USER_SET_LOG_FD:
2999         case VHOST_USER_SET_VRING_NUM:
3000         case VHOST_USER_SET_VRING_ADDR:
3001         case VHOST_USER_SET_VRING_BASE:
3002         case VHOST_USER_SET_VRING_KICK:
3003         case VHOST_USER_SET_VRING_CALL:
3004         case VHOST_USER_SET_VRING_ERR:
3005         case VHOST_USER_SET_VRING_ENABLE:
3006         case VHOST_USER_SEND_RARP:
3007         case VHOST_USER_NET_SET_MTU:
3008         case VHOST_USER_SET_SLAVE_REQ_FD:
3009                 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
3010                         vhost_user_lock_all_queue_pairs(dev);
3011                         unlock_required = 1;
3012                 }
3013                 break;
3014         default:
3015                 break;
3016
3017         }
3018
3019         handled = false;
3020         if (dev->extern_ops.pre_msg_handle) {
3021                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
3022                                 (void *)&msg);
3023                 switch (ret) {
3024                 case RTE_VHOST_MSG_RESULT_REPLY:
3025                         send_vhost_reply(fd, &msg);
3026                         /* Fall-through */
3027                 case RTE_VHOST_MSG_RESULT_ERR:
3028                 case RTE_VHOST_MSG_RESULT_OK:
3029                         handled = true;
3030                         goto skip_to_post_handle;
3031                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
3032                 default:
3033                         break;
3034                 }
3035         }
3036
3037         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
3038                 if (!vhost_message_handlers[request])
3039                         goto skip_to_post_handle;
3040                 ret = vhost_message_handlers[request](&dev, &msg, fd);
3041
3042                 switch (ret) {
3043                 case RTE_VHOST_MSG_RESULT_ERR:
3044                         VHOST_LOG_CONFIG(ERR,
3045                                 "Processing %s failed.\n",
3046                                 vhost_message_str[request]);
3047                         handled = true;
3048                         break;
3049                 case RTE_VHOST_MSG_RESULT_OK:
3050                         VHOST_LOG_CONFIG(DEBUG,
3051                                 "Processing %s succeeded.\n",
3052                                 vhost_message_str[request]);
3053                         handled = true;
3054                         break;
3055                 case RTE_VHOST_MSG_RESULT_REPLY:
3056                         VHOST_LOG_CONFIG(DEBUG,
3057                                 "Processing %s succeeded and needs reply.\n",
3058                                 vhost_message_str[request]);
3059                         send_vhost_reply(fd, &msg);
3060                         handled = true;
3061                         break;
3062                 default:
3063                         break;
3064                 }
3065         }
3066
3067 skip_to_post_handle:
3068         if (ret != RTE_VHOST_MSG_RESULT_ERR &&
3069                         dev->extern_ops.post_msg_handle) {
3070                 ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
3071                                 (void *)&msg);
3072                 switch (ret) {
3073                 case RTE_VHOST_MSG_RESULT_REPLY:
3074                         send_vhost_reply(fd, &msg);
3075                         /* Fall-through */
3076                 case RTE_VHOST_MSG_RESULT_ERR:
3077                 case RTE_VHOST_MSG_RESULT_OK:
3078                         handled = true;
3079                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
3080                 default:
3081                         break;
3082                 }
3083         }
3084
3085         /* If message was not handled at this stage, treat it as an error */
3086         if (!handled) {
3087                 VHOST_LOG_CONFIG(ERR,
3088                         "vhost message (req: %d) was not handled.\n", request);
3089                 close_msg_fds(&msg);
3090                 ret = RTE_VHOST_MSG_RESULT_ERR;
3091         }
3092
3093         /*
3094          * If the request required a reply that was already sent,
3095          * this optional reply-ack won't be sent as the
3096          * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
3097          */
3098         if (msg.flags & VHOST_USER_NEED_REPLY) {
3099                 msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
3100                 msg.size = sizeof(msg.payload.u64);
3101                 msg.fd_num = 0;
3102                 send_vhost_reply(fd, &msg);
3103         } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
3104                 VHOST_LOG_CONFIG(ERR,
3105                         "vhost message handling failed.\n");
3106                 return -1;
3107         }
3108
3109         for (i = 0; i < dev->nr_vring; i++) {
3110                 struct vhost_virtqueue *vq = dev->virtqueue[i];
3111                 bool cur_ready = vq_is_ready(dev, vq);
3112
3113                 if (cur_ready != (vq && vq->ready)) {
3114                         vq->ready = cur_ready;
3115                         vhost_user_notify_queue_state(dev, i, cur_ready);
3116                 }
3117         }
3118
3119         if (unlock_required)
3120                 vhost_user_unlock_all_queue_pairs(dev);
3121
3122         if (!virtio_is_ready(dev))
3123                 goto out;
3124
3125         /*
3126          * Virtio is now ready. If not done already, it is time
3127          * to notify the application it can process the rings and
3128          * configure the vDPA device if present.
3129          */
3130
3131         if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
3132                 if (dev->notify_ops->new_device(dev->vid) == 0)
3133                         dev->flags |= VIRTIO_DEV_RUNNING;
3134         }
3135
3136         vdpa_dev = dev->vdpa_dev;
3137         if (!vdpa_dev)
3138                 goto out;
3139
3140         if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
3141                 if (vdpa_dev->ops->dev_conf(dev->vid))
3142                         VHOST_LOG_CONFIG(ERR,
3143                                          "Failed to configure vDPA device\n");
3144                 else
3145                         dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
3146         }
3147
3148 out:
3149         return 0;
3150 }
3151
3152 static int process_slave_message_reply(struct virtio_net *dev,
3153                                        const struct VhostUserMsg *msg)
3154 {
3155         struct VhostUserMsg msg_reply;
3156         int ret;
3157
3158         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
3159                 return 0;
3160
3161         ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
3162         if (ret <= 0) {
3163                 if (ret < 0)
3164                         VHOST_LOG_CONFIG(ERR,
3165                                 "vhost read slave message reply failed\n");
3166                 else
3167                         VHOST_LOG_CONFIG(INFO,
3168                                 "vhost peer closed\n");
3169                 ret = -1;
3170                 goto out;
3171         }
3172
3173         ret = 0;
3174         if (msg_reply.request.slave != msg->request.slave) {
3175                 VHOST_LOG_CONFIG(ERR,
3176                         "Received unexpected msg type (%u), expected %u\n",
3177                         msg_reply.request.slave, msg->request.slave);
3178                 ret = -1;
3179                 goto out;
3180         }
3181
3182         ret = msg_reply.payload.u64 ? -1 : 0;
3183
3184 out:
3185         rte_spinlock_unlock(&dev->slave_req_lock);
3186         return ret;
3187 }
3188
3189 int
3190 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
3191 {
3192         int ret;
3193         struct VhostUserMsg msg = {
3194                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
3195                 .flags = VHOST_USER_VERSION,
3196                 .size = sizeof(msg.payload.iotlb),
3197                 .payload.iotlb = {
3198                         .iova = iova,
3199                         .perm = perm,
3200                         .type = VHOST_IOTLB_MISS,
3201                 },
3202         };
3203
3204         ret = send_vhost_message(dev->slave_req_fd, &msg);
3205         if (ret < 0) {
3206                 VHOST_LOG_CONFIG(ERR,
3207                                 "Failed to send IOTLB miss message (%d)\n",
3208                                 ret);
3209                 return ret;
3210         }
3211
3212         return 0;
3213 }
3214
3215 static int
3216 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply)
3217 {
3218         int ret;
3219         struct VhostUserMsg msg = {
3220                 .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG,
3221                 .flags = VHOST_USER_VERSION,
3222                 .size = 0,
3223         };
3224
3225         if (need_reply)
3226                 msg.flags |= VHOST_USER_NEED_REPLY;
3227
3228         ret = send_vhost_slave_message(dev, &msg);
3229         if (ret < 0) {
3230                 VHOST_LOG_CONFIG(ERR,
3231                                 "Failed to send config change (%d)\n",
3232                                 ret);
3233                 return ret;
3234         }
3235
3236         return process_slave_message_reply(dev, &msg);
3237 }
3238
3239 int
3240 rte_vhost_slave_config_change(int vid, bool need_reply)
3241 {
3242         struct virtio_net *dev;
3243
3244         dev = get_device(vid);
3245         if (!dev)
3246                 return -ENODEV;
3247
3248         return vhost_user_slave_config_change(dev, need_reply);
3249 }
3250
3251 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
3252                                                     int index, int fd,
3253                                                     uint64_t offset,
3254                                                     uint64_t size)
3255 {
3256         int ret;
3257         struct VhostUserMsg msg = {
3258                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
3259                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
3260                 .size = sizeof(msg.payload.area),
3261                 .payload.area = {
3262                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
3263                         .size = size,
3264                         .offset = offset,
3265                 },
3266         };
3267
3268         if (fd < 0)
3269                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
3270         else {
3271                 msg.fds[0] = fd;
3272                 msg.fd_num = 1;
3273         }
3274
3275         ret = send_vhost_slave_message(dev, &msg);
3276         if (ret < 0) {
3277                 VHOST_LOG_CONFIG(ERR,
3278                         "Failed to set host notifier (%d)\n", ret);
3279                 return ret;
3280         }
3281
3282         return process_slave_message_reply(dev, &msg);
3283 }
3284
3285 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
3286 {
3287         struct virtio_net *dev;
3288         struct rte_vdpa_device *vdpa_dev;
3289         int vfio_device_fd, ret = 0;
3290         uint64_t offset, size;
3291         unsigned int i, q_start, q_last;
3292
3293         dev = get_device(vid);
3294         if (!dev)
3295                 return -ENODEV;
3296
3297         vdpa_dev = dev->vdpa_dev;
3298         if (vdpa_dev == NULL)
3299                 return -ENODEV;
3300
3301         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3302             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3303             !(dev->protocol_features &
3304                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
3305             !(dev->protocol_features &
3306                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
3307             !(dev->protocol_features &
3308                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
3309                 return -ENOTSUP;
3310
3311         if (qid == RTE_VHOST_QUEUE_ALL) {
3312                 q_start = 0;
3313                 q_last = dev->nr_vring - 1;
3314         } else {
3315                 if (qid >= dev->nr_vring)
3316                         return -EINVAL;
3317                 q_start = qid;
3318                 q_last = qid;
3319         }
3320
3321         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
3322         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
3323
3324         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
3325         if (vfio_device_fd < 0)
3326                 return -ENOTSUP;
3327
3328         if (enable) {
3329                 for (i = q_start; i <= q_last; i++) {
3330                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
3331                                         &size) < 0) {
3332                                 ret = -ENOTSUP;
3333                                 goto disable;
3334                         }
3335
3336                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
3337                                         vfio_device_fd, offset, size) < 0) {
3338                                 ret = -EFAULT;
3339                                 goto disable;
3340                         }
3341                 }
3342         } else {
3343 disable:
3344                 for (i = q_start; i <= q_last; i++) {
3345                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
3346                                         0, 0);
3347                 }
3348         }
3349
3350         return ret;
3351 }