eal: fix uuid header dependencies
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/syscall.h>
33 #include <assert.h>
34 #ifdef RTE_LIBRTE_VHOST_NUMA
35 #include <numaif.h>
36 #endif
37 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
38 #include <linux/userfaultfd.h>
39 #endif
40 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
41 #include <linux/memfd.h>
42 #define MEMFD_SUPPORTED
43 #endif
44
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48
49 #include "iotlb.h"
50 #include "vhost.h"
51 #include "vhost_user.h"
52
53 #define VIRTIO_MIN_MTU 68
54 #define VIRTIO_MAX_MTU 65535
55
56 #define INFLIGHT_ALIGNMENT      64
57 #define INFLIGHT_VERSION        0x1
58
59 static const char *vhost_message_str[VHOST_USER_MAX] = {
60         [VHOST_USER_NONE] = "VHOST_USER_NONE",
61         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
62         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
63         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
64         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
65         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
66         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
67         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
68         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
69         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
70         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
71         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
72         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
73         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
74         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
75         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
76         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
77         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
78         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
79         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
80         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
81         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
82         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
83         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
84         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
85         [VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
86         [VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
87         [VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
88         [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
89         [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
90 };
91
92 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
93 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
94
95 static void
96 close_msg_fds(struct VhostUserMsg *msg)
97 {
98         int i;
99
100         for (i = 0; i < msg->fd_num; i++)
101                 close(msg->fds[i]);
102 }
103
104 /*
105  * Ensure the expected number of FDs is received,
106  * close all FDs and return an error if this is not the case.
107  */
108 static int
109 validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
110 {
111         if (msg->fd_num == expected_fds)
112                 return 0;
113
114         VHOST_LOG_CONFIG(ERR,
115                 " Expect %d FDs for request %s, received %d\n",
116                 expected_fds,
117                 vhost_message_str[msg->request.master],
118                 msg->fd_num);
119
120         close_msg_fds(msg);
121
122         return -1;
123 }
124
125 static uint64_t
126 get_blk_size(int fd)
127 {
128         struct stat stat;
129         int ret;
130
131         ret = fstat(fd, &stat);
132         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
133 }
134
135 /*
136  * Reclaim all the outstanding zmbufs for a virtqueue.
137  */
138 static void
139 drain_zmbuf_list(struct vhost_virtqueue *vq)
140 {
141         struct zcopy_mbuf *zmbuf, *next;
142
143         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
144              zmbuf != NULL; zmbuf = next) {
145                 next = TAILQ_NEXT(zmbuf, next);
146
147                 while (!mbuf_is_consumed(zmbuf->mbuf))
148                         usleep(1000);
149
150                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
151                 restore_mbuf(zmbuf->mbuf);
152                 rte_pktmbuf_free(zmbuf->mbuf);
153                 put_zmbuf(zmbuf);
154                 vq->nr_zmbuf -= 1;
155         }
156 }
157
158 static void
159 free_mem_region(struct virtio_net *dev)
160 {
161         uint32_t i;
162         struct rte_vhost_mem_region *reg;
163         struct vhost_virtqueue *vq;
164
165         if (!dev || !dev->mem)
166                 return;
167
168         if (dev->dequeue_zero_copy) {
169                 for (i = 0; i < dev->nr_vring; i++) {
170                         vq = dev->virtqueue[i];
171                         if (vq)
172                                 drain_zmbuf_list(vq);
173                 }
174         }
175
176         for (i = 0; i < dev->mem->nregions; i++) {
177                 reg = &dev->mem->regions[i];
178                 if (reg->host_user_addr) {
179                         munmap(reg->mmap_addr, reg->mmap_size);
180                         close(reg->fd);
181                 }
182         }
183 }
184
185 void
186 vhost_backend_cleanup(struct virtio_net *dev)
187 {
188         if (dev->mem) {
189                 free_mem_region(dev);
190                 rte_free(dev->mem);
191                 dev->mem = NULL;
192         }
193
194         rte_free(dev->guest_pages);
195         dev->guest_pages = NULL;
196
197         if (dev->log_addr) {
198                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
199                 dev->log_addr = 0;
200         }
201
202         if (dev->inflight_info) {
203                 if (dev->inflight_info->addr) {
204                         munmap(dev->inflight_info->addr,
205                                dev->inflight_info->size);
206                         dev->inflight_info->addr = NULL;
207                 }
208
209                 if (dev->inflight_info->fd >= 0) {
210                         close(dev->inflight_info->fd);
211                         dev->inflight_info->fd = -1;
212                 }
213
214                 free(dev->inflight_info);
215                 dev->inflight_info = NULL;
216         }
217
218         if (dev->slave_req_fd >= 0) {
219                 close(dev->slave_req_fd);
220                 dev->slave_req_fd = -1;
221         }
222
223         if (dev->postcopy_ufd >= 0) {
224                 close(dev->postcopy_ufd);
225                 dev->postcopy_ufd = -1;
226         }
227
228         dev->postcopy_listening = 0;
229 }
230
231 static void
232 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
233                               int enable)
234 {
235         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
236
237         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
238                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
239
240         if (dev->notify_ops->vring_state_changed)
241                 dev->notify_ops->vring_state_changed(dev->vid,
242                                 index, enable);
243 }
244
245 /*
246  * This function just returns success at the moment unless
247  * the device hasn't been initialised.
248  */
249 static int
250 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
251                         struct VhostUserMsg *msg,
252                         int main_fd __rte_unused)
253 {
254         if (validate_msg_fds(msg, 0) != 0)
255                 return RTE_VHOST_MSG_RESULT_ERR;
256
257         return RTE_VHOST_MSG_RESULT_OK;
258 }
259
260 static int
261 vhost_user_reset_owner(struct virtio_net **pdev,
262                         struct VhostUserMsg *msg,
263                         int main_fd __rte_unused)
264 {
265         struct virtio_net *dev = *pdev;
266
267         if (validate_msg_fds(msg, 0) != 0)
268                 return RTE_VHOST_MSG_RESULT_ERR;
269
270         vhost_destroy_device_notify(dev);
271
272         cleanup_device(dev, 0);
273         reset_device(dev);
274         return RTE_VHOST_MSG_RESULT_OK;
275 }
276
277 /*
278  * The features that we support are requested.
279  */
280 static int
281 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
282                         int main_fd __rte_unused)
283 {
284         struct virtio_net *dev = *pdev;
285         uint64_t features = 0;
286
287         if (validate_msg_fds(msg, 0) != 0)
288                 return RTE_VHOST_MSG_RESULT_ERR;
289
290         rte_vhost_driver_get_features(dev->ifname, &features);
291
292         msg->payload.u64 = features;
293         msg->size = sizeof(msg->payload.u64);
294         msg->fd_num = 0;
295
296         return RTE_VHOST_MSG_RESULT_REPLY;
297 }
298
299 /*
300  * The queue number that we support are requested.
301  */
302 static int
303 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
304                         int main_fd __rte_unused)
305 {
306         struct virtio_net *dev = *pdev;
307         uint32_t queue_num = 0;
308
309         if (validate_msg_fds(msg, 0) != 0)
310                 return RTE_VHOST_MSG_RESULT_ERR;
311
312         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
313
314         msg->payload.u64 = (uint64_t)queue_num;
315         msg->size = sizeof(msg->payload.u64);
316         msg->fd_num = 0;
317
318         return RTE_VHOST_MSG_RESULT_REPLY;
319 }
320
321 /*
322  * We receive the negotiated features supported by us and the virtio device.
323  */
324 static int
325 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
326                         int main_fd __rte_unused)
327 {
328         struct virtio_net *dev = *pdev;
329         uint64_t features = msg->payload.u64;
330         uint64_t vhost_features = 0;
331         struct rte_vdpa_device *vdpa_dev;
332
333         if (validate_msg_fds(msg, 0) != 0)
334                 return RTE_VHOST_MSG_RESULT_ERR;
335
336         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
337         if (features & ~vhost_features) {
338                 VHOST_LOG_CONFIG(ERR,
339                         "(%d) received invalid negotiated features.\n",
340                         dev->vid);
341                 return RTE_VHOST_MSG_RESULT_ERR;
342         }
343
344         if (dev->flags & VIRTIO_DEV_RUNNING) {
345                 if (dev->features == features)
346                         return RTE_VHOST_MSG_RESULT_OK;
347
348                 /*
349                  * Error out if master tries to change features while device is
350                  * in running state. The exception being VHOST_F_LOG_ALL, which
351                  * is enabled when the live-migration starts.
352                  */
353                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
354                         VHOST_LOG_CONFIG(ERR,
355                                 "(%d) features changed while device is running.\n",
356                                 dev->vid);
357                         return RTE_VHOST_MSG_RESULT_ERR;
358                 }
359
360                 if (dev->notify_ops->features_changed)
361                         dev->notify_ops->features_changed(dev->vid, features);
362         }
363
364         dev->features = features;
365         if (dev->features &
366                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
367                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
368         } else {
369                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
370         }
371         VHOST_LOG_CONFIG(INFO,
372                 "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
373         VHOST_LOG_CONFIG(DEBUG,
374                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
375                 dev->vid,
376                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
377                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
378
379         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
380             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
381                 /*
382                  * Remove all but first queue pair if MQ hasn't been
383                  * negotiated. This is safe because the device is not
384                  * running at this stage.
385                  */
386                 while (dev->nr_vring > 2) {
387                         struct vhost_virtqueue *vq;
388
389                         vq = dev->virtqueue[--dev->nr_vring];
390                         if (!vq)
391                                 continue;
392
393                         dev->virtqueue[dev->nr_vring] = NULL;
394                         cleanup_vq(vq, 1);
395                         cleanup_vq_inflight(dev, vq);
396                         free_vq(dev, vq);
397                 }
398         }
399
400         vdpa_dev = dev->vdpa_dev;
401         if (vdpa_dev && vdpa_dev->ops->set_features)
402                 vdpa_dev->ops->set_features(dev->vid);
403
404         return RTE_VHOST_MSG_RESULT_OK;
405 }
406
407 /*
408  * The virtio device sends us the size of the descriptor ring.
409  */
410 static int
411 vhost_user_set_vring_num(struct virtio_net **pdev,
412                         struct VhostUserMsg *msg,
413                         int main_fd __rte_unused)
414 {
415         struct virtio_net *dev = *pdev;
416         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
417
418         if (validate_msg_fds(msg, 0) != 0)
419                 return RTE_VHOST_MSG_RESULT_ERR;
420
421         vq->size = msg->payload.state.num;
422
423         /* VIRTIO 1.0, 2.4 Virtqueues says:
424          *
425          *   Queue Size value is always a power of 2. The maximum Queue Size
426          *   value is 32768.
427          *
428          * VIRTIO 1.1 2.7 Virtqueues says:
429          *
430          *   Packed virtqueues support up to 2^15 entries each.
431          */
432         if (!vq_is_packed(dev)) {
433                 if (vq->size & (vq->size - 1)) {
434                         VHOST_LOG_CONFIG(ERR,
435                                 "invalid virtqueue size %u\n", vq->size);
436                         return RTE_VHOST_MSG_RESULT_ERR;
437                 }
438         }
439
440         if (vq->size > 32768) {
441                 VHOST_LOG_CONFIG(ERR,
442                         "invalid virtqueue size %u\n", vq->size);
443                 return RTE_VHOST_MSG_RESULT_ERR;
444         }
445
446         if (dev->dequeue_zero_copy) {
447                 vq->nr_zmbuf = 0;
448                 vq->last_zmbuf_idx = 0;
449                 vq->zmbuf_size = vq->size;
450                 if (vq->zmbufs)
451                         rte_free(vq->zmbufs);
452                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
453                                          sizeof(struct zcopy_mbuf), 0);
454                 if (vq->zmbufs == NULL) {
455                         VHOST_LOG_CONFIG(WARNING,
456                                 "failed to allocate mem for zero copy; "
457                                 "zero copy is force disabled\n");
458                         dev->dequeue_zero_copy = 0;
459                 }
460                 TAILQ_INIT(&vq->zmbuf_list);
461         }
462
463         if (vq_is_packed(dev)) {
464                 if (vq->shadow_used_packed)
465                         rte_free(vq->shadow_used_packed);
466                 vq->shadow_used_packed = rte_malloc(NULL,
467                                 vq->size *
468                                 sizeof(struct vring_used_elem_packed),
469                                 RTE_CACHE_LINE_SIZE);
470                 if (!vq->shadow_used_packed) {
471                         VHOST_LOG_CONFIG(ERR,
472                                         "failed to allocate memory for shadow used ring.\n");
473                         return RTE_VHOST_MSG_RESULT_ERR;
474                 }
475
476         } else {
477                 if (vq->shadow_used_split)
478                         rte_free(vq->shadow_used_split);
479                 vq->shadow_used_split = rte_malloc(NULL,
480                                 vq->size * sizeof(struct vring_used_elem),
481                                 RTE_CACHE_LINE_SIZE);
482                 if (!vq->shadow_used_split) {
483                         VHOST_LOG_CONFIG(ERR,
484                                         "failed to allocate memory for shadow used ring.\n");
485                         return RTE_VHOST_MSG_RESULT_ERR;
486                 }
487         }
488
489         if (vq->batch_copy_elems)
490                 rte_free(vq->batch_copy_elems);
491         vq->batch_copy_elems = rte_malloc(NULL,
492                                 vq->size * sizeof(struct batch_copy_elem),
493                                 RTE_CACHE_LINE_SIZE);
494         if (!vq->batch_copy_elems) {
495                 VHOST_LOG_CONFIG(ERR,
496                         "failed to allocate memory for batching copy.\n");
497                 return RTE_VHOST_MSG_RESULT_ERR;
498         }
499
500         return RTE_VHOST_MSG_RESULT_OK;
501 }
502
503 /*
504  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
505  * same numa node as the memory of vring descriptor.
506  */
507 #ifdef RTE_LIBRTE_VHOST_NUMA
508 static struct virtio_net*
509 numa_realloc(struct virtio_net *dev, int index)
510 {
511         int oldnode, newnode;
512         struct virtio_net *old_dev;
513         struct vhost_virtqueue *old_vq, *vq;
514         struct zcopy_mbuf *new_zmbuf;
515         struct vring_used_elem *new_shadow_used_split;
516         struct vring_used_elem_packed *new_shadow_used_packed;
517         struct batch_copy_elem *new_batch_copy_elems;
518         int ret;
519
520         if (dev->flags & VIRTIO_DEV_RUNNING)
521                 return dev;
522
523         old_dev = dev;
524         vq = old_vq = dev->virtqueue[index];
525
526         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
527                             MPOL_F_NODE | MPOL_F_ADDR);
528
529         /* check if we need to reallocate vq */
530         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
531                              MPOL_F_NODE | MPOL_F_ADDR);
532         if (ret) {
533                 VHOST_LOG_CONFIG(ERR,
534                         "Unable to get vq numa information.\n");
535                 return dev;
536         }
537         if (oldnode != newnode) {
538                 VHOST_LOG_CONFIG(INFO,
539                         "reallocate vq from %d to %d node\n", oldnode, newnode);
540                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
541                 if (!vq)
542                         return dev;
543
544                 memcpy(vq, old_vq, sizeof(*vq));
545                 TAILQ_INIT(&vq->zmbuf_list);
546
547                 if (dev->dequeue_zero_copy) {
548                         new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
549                                         sizeof(struct zcopy_mbuf), 0, newnode);
550                         if (new_zmbuf) {
551                                 rte_free(vq->zmbufs);
552                                 vq->zmbufs = new_zmbuf;
553                         }
554                 }
555
556                 if (vq_is_packed(dev)) {
557                         new_shadow_used_packed = rte_malloc_socket(NULL,
558                                         vq->size *
559                                         sizeof(struct vring_used_elem_packed),
560                                         RTE_CACHE_LINE_SIZE,
561                                         newnode);
562                         if (new_shadow_used_packed) {
563                                 rte_free(vq->shadow_used_packed);
564                                 vq->shadow_used_packed = new_shadow_used_packed;
565                         }
566                 } else {
567                         new_shadow_used_split = rte_malloc_socket(NULL,
568                                         vq->size *
569                                         sizeof(struct vring_used_elem),
570                                         RTE_CACHE_LINE_SIZE,
571                                         newnode);
572                         if (new_shadow_used_split) {
573                                 rte_free(vq->shadow_used_split);
574                                 vq->shadow_used_split = new_shadow_used_split;
575                         }
576                 }
577
578                 new_batch_copy_elems = rte_malloc_socket(NULL,
579                         vq->size * sizeof(struct batch_copy_elem),
580                         RTE_CACHE_LINE_SIZE,
581                         newnode);
582                 if (new_batch_copy_elems) {
583                         rte_free(vq->batch_copy_elems);
584                         vq->batch_copy_elems = new_batch_copy_elems;
585                 }
586
587                 rte_free(old_vq);
588         }
589
590         /* check if we need to reallocate dev */
591         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
592                             MPOL_F_NODE | MPOL_F_ADDR);
593         if (ret) {
594                 VHOST_LOG_CONFIG(ERR,
595                         "Unable to get dev numa information.\n");
596                 goto out;
597         }
598         if (oldnode != newnode) {
599                 VHOST_LOG_CONFIG(INFO,
600                         "reallocate dev from %d to %d node\n",
601                         oldnode, newnode);
602                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
603                 if (!dev) {
604                         dev = old_dev;
605                         goto out;
606                 }
607
608                 memcpy(dev, old_dev, sizeof(*dev));
609                 rte_free(old_dev);
610         }
611
612 out:
613         dev->virtqueue[index] = vq;
614         vhost_devices[dev->vid] = dev;
615
616         if (old_vq != vq)
617                 vhost_user_iotlb_init(dev, index);
618
619         return dev;
620 }
621 #else
622 static struct virtio_net*
623 numa_realloc(struct virtio_net *dev, int index __rte_unused)
624 {
625         return dev;
626 }
627 #endif
628
629 /* Converts QEMU virtual address to Vhost virtual address. */
630 static uint64_t
631 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
632 {
633         struct rte_vhost_mem_region *r;
634         uint32_t i;
635
636         if (unlikely(!dev || !dev->mem))
637                 goto out_error;
638
639         /* Find the region where the address lives. */
640         for (i = 0; i < dev->mem->nregions; i++) {
641                 r = &dev->mem->regions[i];
642
643                 if (qva >= r->guest_user_addr &&
644                     qva <  r->guest_user_addr + r->size) {
645
646                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
647                                 *len = r->guest_user_addr + r->size - qva;
648
649                         return qva - r->guest_user_addr +
650                                r->host_user_addr;
651                 }
652         }
653 out_error:
654         *len = 0;
655
656         return 0;
657 }
658
659
660 /*
661  * Converts ring address to Vhost virtual address.
662  * If IOMMU is enabled, the ring address is a guest IO virtual address,
663  * else it is a QEMU virtual address.
664  */
665 static uint64_t
666 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
667                 uint64_t ra, uint64_t *size)
668 {
669         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
670                 uint64_t vva;
671
672                 vhost_user_iotlb_rd_lock(vq);
673                 vva = vhost_iova_to_vva(dev, vq, ra,
674                                         size, VHOST_ACCESS_RW);
675                 vhost_user_iotlb_rd_unlock(vq);
676
677                 return vva;
678         }
679
680         return qva_to_vva(dev, ra, size);
681 }
682
683 static uint64_t
684 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
685 {
686         uint64_t log_gpa;
687
688         vhost_user_iotlb_rd_lock(vq);
689         log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
690         vhost_user_iotlb_rd_unlock(vq);
691
692         return log_gpa;
693 }
694
695 static struct virtio_net *
696 translate_ring_addresses(struct virtio_net *dev, int vq_index)
697 {
698         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
699         struct vhost_vring_addr *addr = &vq->ring_addrs;
700         uint64_t len, expected_len;
701
702         if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
703                 vq->log_guest_addr =
704                         log_addr_to_gpa(dev, vq);
705                 if (vq->log_guest_addr == 0) {
706                         VHOST_LOG_CONFIG(DEBUG,
707                                 "(%d) failed to map log_guest_addr.\n",
708                                 dev->vid);
709                         return dev;
710                 }
711         }
712
713         if (vq_is_packed(dev)) {
714                 len = sizeof(struct vring_packed_desc) * vq->size;
715                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
716                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
717                 if (vq->desc_packed == NULL ||
718                                 len != sizeof(struct vring_packed_desc) *
719                                 vq->size) {
720                         VHOST_LOG_CONFIG(DEBUG,
721                                 "(%d) failed to map desc_packed ring.\n",
722                                 dev->vid);
723                         return dev;
724                 }
725
726                 dev = numa_realloc(dev, vq_index);
727                 vq = dev->virtqueue[vq_index];
728                 addr = &vq->ring_addrs;
729
730                 len = sizeof(struct vring_packed_desc_event);
731                 vq->driver_event = (struct vring_packed_desc_event *)
732                                         (uintptr_t)ring_addr_to_vva(dev,
733                                         vq, addr->avail_user_addr, &len);
734                 if (vq->driver_event == NULL ||
735                                 len != sizeof(struct vring_packed_desc_event)) {
736                         VHOST_LOG_CONFIG(DEBUG,
737                                 "(%d) failed to find driver area address.\n",
738                                 dev->vid);
739                         return dev;
740                 }
741
742                 len = sizeof(struct vring_packed_desc_event);
743                 vq->device_event = (struct vring_packed_desc_event *)
744                                         (uintptr_t)ring_addr_to_vva(dev,
745                                         vq, addr->used_user_addr, &len);
746                 if (vq->device_event == NULL ||
747                                 len != sizeof(struct vring_packed_desc_event)) {
748                         VHOST_LOG_CONFIG(DEBUG,
749                                 "(%d) failed to find device area address.\n",
750                                 dev->vid);
751                         return dev;
752                 }
753
754                 vq->access_ok = 1;
755                 return dev;
756         }
757
758         /* The addresses are converted from QEMU virtual to Vhost virtual. */
759         if (vq->desc && vq->avail && vq->used)
760                 return dev;
761
762         len = sizeof(struct vring_desc) * vq->size;
763         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
764                         vq, addr->desc_user_addr, &len);
765         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
766                 VHOST_LOG_CONFIG(DEBUG,
767                         "(%d) failed to map desc ring.\n",
768                         dev->vid);
769                 return dev;
770         }
771
772         dev = numa_realloc(dev, vq_index);
773         vq = dev->virtqueue[vq_index];
774         addr = &vq->ring_addrs;
775
776         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
777         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
778                 len += sizeof(uint16_t);
779         expected_len = len;
780         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
781                         vq, addr->avail_user_addr, &len);
782         if (vq->avail == 0 || len != expected_len) {
783                 VHOST_LOG_CONFIG(DEBUG,
784                         "(%d) failed to map avail ring.\n",
785                         dev->vid);
786                 return dev;
787         }
788
789         len = sizeof(struct vring_used) +
790                 sizeof(struct vring_used_elem) * vq->size;
791         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
792                 len += sizeof(uint16_t);
793         expected_len = len;
794         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
795                         vq, addr->used_user_addr, &len);
796         if (vq->used == 0 || len != expected_len) {
797                 VHOST_LOG_CONFIG(DEBUG,
798                         "(%d) failed to map used ring.\n",
799                         dev->vid);
800                 return dev;
801         }
802
803         if (vq->last_used_idx != vq->used->idx) {
804                 VHOST_LOG_CONFIG(WARNING,
805                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
806                         "some packets maybe resent for Tx and dropped for Rx\n",
807                         vq->last_used_idx, vq->used->idx);
808                 vq->last_used_idx  = vq->used->idx;
809                 vq->last_avail_idx = vq->used->idx;
810         }
811
812         vq->access_ok = 1;
813
814         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
815                         dev->vid, vq->desc);
816         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address avail: %p\n",
817                         dev->vid, vq->avail);
818         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address used: %p\n",
819                         dev->vid, vq->used);
820         VHOST_LOG_CONFIG(DEBUG, "(%d) log_guest_addr: %" PRIx64 "\n",
821                         dev->vid, vq->log_guest_addr);
822
823         return dev;
824 }
825
826 /*
827  * The virtio device sends us the desc, used and avail ring addresses.
828  * This function then converts these to our address space.
829  */
830 static int
831 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
832                         int main_fd __rte_unused)
833 {
834         struct virtio_net *dev = *pdev;
835         struct vhost_virtqueue *vq;
836         struct vhost_vring_addr *addr = &msg->payload.addr;
837         bool access_ok;
838
839         if (validate_msg_fds(msg, 0) != 0)
840                 return RTE_VHOST_MSG_RESULT_ERR;
841
842         if (dev->mem == NULL)
843                 return RTE_VHOST_MSG_RESULT_ERR;
844
845         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
846         vq = dev->virtqueue[msg->payload.addr.index];
847
848         access_ok = vq->access_ok;
849
850         /*
851          * Rings addresses should not be interpreted as long as the ring is not
852          * started and enabled
853          */
854         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
855
856         vring_invalidate(dev, vq);
857
858         if ((vq->enabled && (dev->features &
859                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
860                         access_ok) {
861                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
862                 if (!dev)
863                         return RTE_VHOST_MSG_RESULT_ERR;
864
865                 *pdev = dev;
866         }
867
868         return RTE_VHOST_MSG_RESULT_OK;
869 }
870
871 /*
872  * The virtio device sends us the available ring last used index.
873  */
874 static int
875 vhost_user_set_vring_base(struct virtio_net **pdev,
876                         struct VhostUserMsg *msg,
877                         int main_fd __rte_unused)
878 {
879         struct virtio_net *dev = *pdev;
880         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
881         uint64_t val = msg->payload.state.num;
882
883         if (validate_msg_fds(msg, 0) != 0)
884                 return RTE_VHOST_MSG_RESULT_ERR;
885
886         if (vq_is_packed(dev)) {
887                 /*
888                  * Bit[0:14]: avail index
889                  * Bit[15]: avail wrap counter
890                  */
891                 vq->last_avail_idx = val & 0x7fff;
892                 vq->avail_wrap_counter = !!(val & (0x1 << 15));
893                 /*
894                  * Set used index to same value as available one, as
895                  * their values should be the same since ring processing
896                  * was stopped at get time.
897                  */
898                 vq->last_used_idx = vq->last_avail_idx;
899                 vq->used_wrap_counter = vq->avail_wrap_counter;
900         } else {
901                 vq->last_used_idx = msg->payload.state.num;
902                 vq->last_avail_idx = msg->payload.state.num;
903         }
904
905         return RTE_VHOST_MSG_RESULT_OK;
906 }
907
908 static int
909 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
910                    uint64_t host_phys_addr, uint64_t size)
911 {
912         struct guest_page *page, *last_page;
913         struct guest_page *old_pages;
914
915         if (dev->nr_guest_pages == dev->max_guest_pages) {
916                 dev->max_guest_pages *= 2;
917                 old_pages = dev->guest_pages;
918                 dev->guest_pages = rte_realloc(dev->guest_pages,
919                                         dev->max_guest_pages * sizeof(*page),
920                                         RTE_CACHE_LINE_SIZE);
921                 if (dev->guest_pages == NULL) {
922                         VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
923                         rte_free(old_pages);
924                         return -1;
925                 }
926         }
927
928         if (dev->nr_guest_pages > 0) {
929                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
930                 /* merge if the two pages are continuous */
931                 if (host_phys_addr == last_page->host_phys_addr +
932                                       last_page->size) {
933                         last_page->size += size;
934                         return 0;
935                 }
936         }
937
938         page = &dev->guest_pages[dev->nr_guest_pages++];
939         page->guest_phys_addr = guest_phys_addr;
940         page->host_phys_addr  = host_phys_addr;
941         page->size = size;
942
943         return 0;
944 }
945
946 static int
947 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
948                 uint64_t page_size)
949 {
950         uint64_t reg_size = reg->size;
951         uint64_t host_user_addr  = reg->host_user_addr;
952         uint64_t guest_phys_addr = reg->guest_phys_addr;
953         uint64_t host_phys_addr;
954         uint64_t size;
955
956         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
957         size = page_size - (guest_phys_addr & (page_size - 1));
958         size = RTE_MIN(size, reg_size);
959
960         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
961                 return -1;
962
963         host_user_addr  += size;
964         guest_phys_addr += size;
965         reg_size -= size;
966
967         while (reg_size > 0) {
968                 size = RTE_MIN(reg_size, page_size);
969                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
970                                                   host_user_addr);
971                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
972                                 size) < 0)
973                         return -1;
974
975                 host_user_addr  += size;
976                 guest_phys_addr += size;
977                 reg_size -= size;
978         }
979
980         /* sort guest page array if over binary search threshold */
981         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
982                 qsort((void *)dev->guest_pages, dev->nr_guest_pages,
983                         sizeof(struct guest_page), guest_page_addrcmp);
984         }
985
986         return 0;
987 }
988
989 #ifdef RTE_LIBRTE_VHOST_DEBUG
990 /* TODO: enable it only in debug mode? */
991 static void
992 dump_guest_pages(struct virtio_net *dev)
993 {
994         uint32_t i;
995         struct guest_page *page;
996
997         for (i = 0; i < dev->nr_guest_pages; i++) {
998                 page = &dev->guest_pages[i];
999
1000                 VHOST_LOG_CONFIG(INFO,
1001                         "guest physical page region %u\n"
1002                         "\t guest_phys_addr: %" PRIx64 "\n"
1003                         "\t host_phys_addr : %" PRIx64 "\n"
1004                         "\t size           : %" PRIx64 "\n",
1005                         i,
1006                         page->guest_phys_addr,
1007                         page->host_phys_addr,
1008                         page->size);
1009         }
1010 }
1011 #else
1012 #define dump_guest_pages(dev)
1013 #endif
1014
1015 static bool
1016 vhost_memory_changed(struct VhostUserMemory *new,
1017                      struct rte_vhost_memory *old)
1018 {
1019         uint32_t i;
1020
1021         if (new->nregions != old->nregions)
1022                 return true;
1023
1024         for (i = 0; i < new->nregions; ++i) {
1025                 VhostUserMemoryRegion *new_r = &new->regions[i];
1026                 struct rte_vhost_mem_region *old_r = &old->regions[i];
1027
1028                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
1029                         return true;
1030                 if (new_r->memory_size != old_r->size)
1031                         return true;
1032                 if (new_r->userspace_addr != old_r->guest_user_addr)
1033                         return true;
1034         }
1035
1036         return false;
1037 }
1038
1039 static int
1040 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
1041                         int main_fd)
1042 {
1043         struct virtio_net *dev = *pdev;
1044         struct VhostUserMemory *memory = &msg->payload.memory;
1045         struct rte_vhost_mem_region *reg;
1046         void *mmap_addr;
1047         uint64_t mmap_size;
1048         uint64_t mmap_offset;
1049         uint64_t alignment;
1050         uint32_t i;
1051         int populate;
1052         int fd;
1053
1054         if (validate_msg_fds(msg, memory->nregions) != 0)
1055                 return RTE_VHOST_MSG_RESULT_ERR;
1056
1057         if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
1058                 VHOST_LOG_CONFIG(ERR,
1059                         "too many memory regions (%u)\n", memory->nregions);
1060                 return RTE_VHOST_MSG_RESULT_ERR;
1061         }
1062
1063         if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1064                 VHOST_LOG_CONFIG(INFO,
1065                         "(%d) memory regions not changed\n", dev->vid);
1066
1067                 close_msg_fds(msg);
1068
1069                 return RTE_VHOST_MSG_RESULT_OK;
1070         }
1071
1072         if (dev->mem) {
1073                 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1074                         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1075
1076                         if (vdpa_dev && vdpa_dev->ops->dev_close)
1077                                 vdpa_dev->ops->dev_close(dev->vid);
1078                         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1079                 }
1080                 free_mem_region(dev);
1081                 rte_free(dev->mem);
1082                 dev->mem = NULL;
1083         }
1084
1085         /* Flush IOTLB cache as previous HVAs are now invalid */
1086         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1087                 for (i = 0; i < dev->nr_vring; i++)
1088                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
1089
1090         dev->nr_guest_pages = 0;
1091         if (dev->guest_pages == NULL) {
1092                 dev->max_guest_pages = 8;
1093                 dev->guest_pages = rte_zmalloc(NULL,
1094                                         dev->max_guest_pages *
1095                                         sizeof(struct guest_page),
1096                                         RTE_CACHE_LINE_SIZE);
1097                 if (dev->guest_pages == NULL) {
1098                         VHOST_LOG_CONFIG(ERR,
1099                                 "(%d) failed to allocate memory "
1100                                 "for dev->guest_pages\n",
1101                                 dev->vid);
1102                         return RTE_VHOST_MSG_RESULT_ERR;
1103                 }
1104         }
1105
1106         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1107                 sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
1108         if (dev->mem == NULL) {
1109                 VHOST_LOG_CONFIG(ERR,
1110                         "(%d) failed to allocate memory for dev->mem\n",
1111                         dev->vid);
1112                 return RTE_VHOST_MSG_RESULT_ERR;
1113         }
1114         dev->mem->nregions = memory->nregions;
1115
1116         for (i = 0; i < memory->nregions; i++) {
1117                 fd  = msg->fds[i];
1118                 reg = &dev->mem->regions[i];
1119
1120                 reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
1121                 reg->guest_user_addr = memory->regions[i].userspace_addr;
1122                 reg->size            = memory->regions[i].memory_size;
1123                 reg->fd              = fd;
1124
1125                 mmap_offset = memory->regions[i].mmap_offset;
1126
1127                 /* Check for memory_size + mmap_offset overflow */
1128                 if (mmap_offset >= -reg->size) {
1129                         VHOST_LOG_CONFIG(ERR,
1130                                 "mmap_offset (%#"PRIx64") and memory_size "
1131                                 "(%#"PRIx64") overflow\n",
1132                                 mmap_offset, reg->size);
1133                         goto err_mmap;
1134                 }
1135
1136                 mmap_size = reg->size + mmap_offset;
1137
1138                 /* mmap() without flag of MAP_ANONYMOUS, should be called
1139                  * with length argument aligned with hugepagesz at older
1140                  * longterm version Linux, like 2.6.32 and 3.2.72, or
1141                  * mmap() will fail with EINVAL.
1142                  *
1143                  * to avoid failure, make sure in caller to keep length
1144                  * aligned.
1145                  */
1146                 alignment = get_blk_size(fd);
1147                 if (alignment == (uint64_t)-1) {
1148                         VHOST_LOG_CONFIG(ERR,
1149                                 "couldn't get hugepage size through fstat\n");
1150                         goto err_mmap;
1151                 }
1152                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
1153                 if (mmap_size == 0) {
1154                         /*
1155                          * It could happen if initial mmap_size + alignment
1156                          * overflows the sizeof uint64, which could happen if
1157                          * either mmap_size or alignment value is wrong.
1158                          *
1159                          * mmap() kernel implementation would return an error,
1160                          * but better catch it before and provide useful info
1161                          * in the logs.
1162                          */
1163                         VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
1164                                         "or alignment (0x%" PRIx64 ") is invalid\n",
1165                                         reg->size + mmap_offset, alignment);
1166                         goto err_mmap;
1167                 }
1168
1169                 populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
1170                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
1171                                  MAP_SHARED | populate, fd, 0);
1172
1173                 if (mmap_addr == MAP_FAILED) {
1174                         VHOST_LOG_CONFIG(ERR,
1175                                 "mmap region %u failed.\n", i);
1176                         goto err_mmap;
1177                 }
1178
1179                 reg->mmap_addr = mmap_addr;
1180                 reg->mmap_size = mmap_size;
1181                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
1182                                       mmap_offset;
1183
1184                 if (dev->dequeue_zero_copy)
1185                         if (add_guest_pages(dev, reg, alignment) < 0) {
1186                                 VHOST_LOG_CONFIG(ERR,
1187                                         "adding guest pages to region %u failed.\n",
1188                                         i);
1189                                 goto err_mmap;
1190                         }
1191
1192                 VHOST_LOG_CONFIG(INFO,
1193                         "guest memory region %u, size: 0x%" PRIx64 "\n"
1194                         "\t guest physical addr: 0x%" PRIx64 "\n"
1195                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
1196                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
1197                         "\t mmap addr : 0x%" PRIx64 "\n"
1198                         "\t mmap size : 0x%" PRIx64 "\n"
1199                         "\t mmap align: 0x%" PRIx64 "\n"
1200                         "\t mmap off  : 0x%" PRIx64 "\n",
1201                         i, reg->size,
1202                         reg->guest_phys_addr,
1203                         reg->guest_user_addr,
1204                         reg->host_user_addr,
1205                         (uint64_t)(uintptr_t)mmap_addr,
1206                         mmap_size,
1207                         alignment,
1208                         mmap_offset);
1209
1210                 if (dev->postcopy_listening) {
1211                         /*
1212                          * We haven't a better way right now than sharing
1213                          * DPDK's virtual address with Qemu, so that Qemu can
1214                          * retrieve the region offset when handling userfaults.
1215                          */
1216                         memory->regions[i].userspace_addr =
1217                                 reg->host_user_addr;
1218                 }
1219         }
1220         if (dev->postcopy_listening) {
1221                 /* Send the addresses back to qemu */
1222                 msg->fd_num = 0;
1223                 send_vhost_reply(main_fd, msg);
1224
1225                 /* Wait for qemu to acknolwedge it's got the addresses
1226                  * we've got to wait before we're allowed to generate faults.
1227                  */
1228                 VhostUserMsg ack_msg;
1229                 if (read_vhost_message(main_fd, &ack_msg) <= 0) {
1230                         VHOST_LOG_CONFIG(ERR,
1231                                 "Failed to read qemu ack on postcopy set-mem-table\n");
1232                         goto err_mmap;
1233                 }
1234
1235                 if (validate_msg_fds(&ack_msg, 0) != 0)
1236                         goto err_mmap;
1237
1238                 if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
1239                         VHOST_LOG_CONFIG(ERR,
1240                                 "Bad qemu ack on postcopy set-mem-table (%d)\n",
1241                                 ack_msg.request.master);
1242                         goto err_mmap;
1243                 }
1244
1245                 /* Now userfault register and we can use the memory */
1246                 for (i = 0; i < memory->nregions; i++) {
1247 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
1248                         reg = &dev->mem->regions[i];
1249                         struct uffdio_register reg_struct;
1250
1251                         /*
1252                          * Let's register all the mmap'ed area to ensure
1253                          * alignment on page boundary.
1254                          */
1255                         reg_struct.range.start =
1256                                 (uint64_t)(uintptr_t)reg->mmap_addr;
1257                         reg_struct.range.len = reg->mmap_size;
1258                         reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
1259
1260                         if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1261                                                 &reg_struct)) {
1262                                 VHOST_LOG_CONFIG(ERR,
1263                                         "Failed to register ufd for region %d: (ufd = %d) %s\n",
1264                                         i, dev->postcopy_ufd,
1265                                         strerror(errno));
1266                                 goto err_mmap;
1267                         }
1268                         VHOST_LOG_CONFIG(INFO,
1269                                 "\t userfaultfd registered for range : "
1270                                 "%" PRIx64 " - %" PRIx64 "\n",
1271                                 (uint64_t)reg_struct.range.start,
1272                                 (uint64_t)reg_struct.range.start +
1273                                 (uint64_t)reg_struct.range.len - 1);
1274 #else
1275                         goto err_mmap;
1276 #endif
1277                 }
1278         }
1279
1280         for (i = 0; i < dev->nr_vring; i++) {
1281                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1282
1283                 if (vq->desc || vq->avail || vq->used) {
1284                         /*
1285                          * If the memory table got updated, the ring addresses
1286                          * need to be translated again as virtual addresses have
1287                          * changed.
1288                          */
1289                         vring_invalidate(dev, vq);
1290
1291                         dev = translate_ring_addresses(dev, i);
1292                         if (!dev) {
1293                                 dev = *pdev;
1294                                 goto err_mmap;
1295                         }
1296
1297                         *pdev = dev;
1298                 }
1299         }
1300
1301         dump_guest_pages(dev);
1302
1303         return RTE_VHOST_MSG_RESULT_OK;
1304
1305 err_mmap:
1306         free_mem_region(dev);
1307         rte_free(dev->mem);
1308         dev->mem = NULL;
1309         return RTE_VHOST_MSG_RESULT_ERR;
1310 }
1311
1312 static bool
1313 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1314 {
1315         bool rings_ok;
1316
1317         if (!vq)
1318                 return false;
1319
1320         if (vq_is_packed(dev))
1321                 rings_ok = vq->desc_packed && vq->driver_event &&
1322                         vq->device_event;
1323         else
1324                 rings_ok = vq->desc && vq->avail && vq->used;
1325
1326         return rings_ok &&
1327                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1328                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1329                vq->enabled;
1330 }
1331
1332 #define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
1333
1334 static int
1335 virtio_is_ready(struct virtio_net *dev)
1336 {
1337         struct vhost_virtqueue *vq;
1338         uint32_t i;
1339
1340         if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
1341                 return 0;
1342
1343         for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
1344                 vq = dev->virtqueue[i];
1345
1346                 if (!vq_is_ready(dev, vq))
1347                         return 0;
1348         }
1349
1350         if (!(dev->flags & VIRTIO_DEV_RUNNING))
1351                 VHOST_LOG_CONFIG(INFO,
1352                         "virtio is now ready for processing.\n");
1353         return 1;
1354 }
1355
1356 static void *
1357 inflight_mem_alloc(const char *name, size_t size, int *fd)
1358 {
1359         void *ptr;
1360         int mfd = -1;
1361         char fname[20] = "/tmp/memfd-XXXXXX";
1362
1363         *fd = -1;
1364 #ifdef MEMFD_SUPPORTED
1365         mfd = memfd_create(name, MFD_CLOEXEC);
1366 #else
1367         RTE_SET_USED(name);
1368 #endif
1369         if (mfd == -1) {
1370                 mfd = mkstemp(fname);
1371                 if (mfd == -1) {
1372                         VHOST_LOG_CONFIG(ERR,
1373                                 "failed to get inflight buffer fd\n");
1374                         return NULL;
1375                 }
1376
1377                 unlink(fname);
1378         }
1379
1380         if (ftruncate(mfd, size) == -1) {
1381                 VHOST_LOG_CONFIG(ERR,
1382                         "failed to alloc inflight buffer\n");
1383                 close(mfd);
1384                 return NULL;
1385         }
1386
1387         ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
1388         if (ptr == MAP_FAILED) {
1389                 VHOST_LOG_CONFIG(ERR,
1390                         "failed to mmap inflight buffer\n");
1391                 close(mfd);
1392                 return NULL;
1393         }
1394
1395         *fd = mfd;
1396         return ptr;
1397 }
1398
1399 static uint32_t
1400 get_pervq_shm_size_split(uint16_t queue_size)
1401 {
1402         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
1403                                   queue_size + sizeof(uint64_t) +
1404                                   sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
1405 }
1406
1407 static uint32_t
1408 get_pervq_shm_size_packed(uint16_t queue_size)
1409 {
1410         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
1411                                   * queue_size + sizeof(uint64_t) +
1412                                   sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
1413                                   INFLIGHT_ALIGNMENT);
1414 }
1415
1416 static int
1417 vhost_user_get_inflight_fd(struct virtio_net **pdev,
1418                            VhostUserMsg *msg,
1419                            int main_fd __rte_unused)
1420 {
1421         struct rte_vhost_inflight_info_packed *inflight_packed;
1422         uint64_t pervq_inflight_size, mmap_size;
1423         uint16_t num_queues, queue_size;
1424         struct virtio_net *dev = *pdev;
1425         int fd, i, j;
1426         void *addr;
1427
1428         if (msg->size != sizeof(msg->payload.inflight)) {
1429                 VHOST_LOG_CONFIG(ERR,
1430                         "invalid get_inflight_fd message size is %d\n",
1431                         msg->size);
1432                 return RTE_VHOST_MSG_RESULT_ERR;
1433         }
1434
1435         if (dev->inflight_info == NULL) {
1436                 dev->inflight_info = calloc(1,
1437                                             sizeof(struct inflight_mem_info));
1438                 if (!dev->inflight_info) {
1439                         VHOST_LOG_CONFIG(ERR,
1440                                 "failed to alloc dev inflight area\n");
1441                         return RTE_VHOST_MSG_RESULT_ERR;
1442                 }
1443                 dev->inflight_info->fd = -1;
1444         }
1445
1446         num_queues = msg->payload.inflight.num_queues;
1447         queue_size = msg->payload.inflight.queue_size;
1448
1449         VHOST_LOG_CONFIG(INFO, "get_inflight_fd num_queues: %u\n",
1450                 msg->payload.inflight.num_queues);
1451         VHOST_LOG_CONFIG(INFO, "get_inflight_fd queue_size: %u\n",
1452                 msg->payload.inflight.queue_size);
1453
1454         if (vq_is_packed(dev))
1455                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1456         else
1457                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1458
1459         mmap_size = num_queues * pervq_inflight_size;
1460         addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
1461         if (!addr) {
1462                 VHOST_LOG_CONFIG(ERR,
1463                         "failed to alloc vhost inflight area\n");
1464                         msg->payload.inflight.mmap_size = 0;
1465                 return RTE_VHOST_MSG_RESULT_ERR;
1466         }
1467         memset(addr, 0, mmap_size);
1468
1469         if (dev->inflight_info->addr) {
1470                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1471                 dev->inflight_info->addr = NULL;
1472         }
1473
1474         if (dev->inflight_info->fd >= 0) {
1475                 close(dev->inflight_info->fd);
1476                 dev->inflight_info->fd = -1;
1477         }
1478
1479         dev->inflight_info->addr = addr;
1480         dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
1481         dev->inflight_info->fd = msg->fds[0] = fd;
1482         msg->payload.inflight.mmap_offset = 0;
1483         msg->fd_num = 1;
1484
1485         if (vq_is_packed(dev)) {
1486                 for (i = 0; i < num_queues; i++) {
1487                         inflight_packed =
1488                                 (struct rte_vhost_inflight_info_packed *)addr;
1489                         inflight_packed->used_wrap_counter = 1;
1490                         inflight_packed->old_used_wrap_counter = 1;
1491                         for (j = 0; j < queue_size; j++)
1492                                 inflight_packed->desc[j].next = j + 1;
1493                         addr = (void *)((char *)addr + pervq_inflight_size);
1494                 }
1495         }
1496
1497         VHOST_LOG_CONFIG(INFO,
1498                 "send inflight mmap_size: %"PRIu64"\n",
1499                 msg->payload.inflight.mmap_size);
1500         VHOST_LOG_CONFIG(INFO,
1501                 "send inflight mmap_offset: %"PRIu64"\n",
1502                 msg->payload.inflight.mmap_offset);
1503         VHOST_LOG_CONFIG(INFO,
1504                 "send inflight fd: %d\n", msg->fds[0]);
1505
1506         return RTE_VHOST_MSG_RESULT_REPLY;
1507 }
1508
1509 static int
1510 vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
1511                            int main_fd __rte_unused)
1512 {
1513         uint64_t mmap_size, mmap_offset;
1514         uint16_t num_queues, queue_size;
1515         struct virtio_net *dev = *pdev;
1516         uint32_t pervq_inflight_size;
1517         struct vhost_virtqueue *vq;
1518         void *addr;
1519         int fd, i;
1520
1521         fd = msg->fds[0];
1522         if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
1523                 VHOST_LOG_CONFIG(ERR,
1524                         "invalid set_inflight_fd message size is %d,fd is %d\n",
1525                         msg->size, fd);
1526                 return RTE_VHOST_MSG_RESULT_ERR;
1527         }
1528
1529         mmap_size = msg->payload.inflight.mmap_size;
1530         mmap_offset = msg->payload.inflight.mmap_offset;
1531         num_queues = msg->payload.inflight.num_queues;
1532         queue_size = msg->payload.inflight.queue_size;
1533
1534         if (vq_is_packed(dev))
1535                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1536         else
1537                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1538
1539         VHOST_LOG_CONFIG(INFO,
1540                 "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
1541         VHOST_LOG_CONFIG(INFO,
1542                 "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
1543         VHOST_LOG_CONFIG(INFO,
1544                 "set_inflight_fd num_queues: %u\n", num_queues);
1545         VHOST_LOG_CONFIG(INFO,
1546                 "set_inflight_fd queue_size: %u\n", queue_size);
1547         VHOST_LOG_CONFIG(INFO,
1548                 "set_inflight_fd fd: %d\n", fd);
1549         VHOST_LOG_CONFIG(INFO,
1550                 "set_inflight_fd pervq_inflight_size: %d\n",
1551                 pervq_inflight_size);
1552
1553         if (!dev->inflight_info) {
1554                 dev->inflight_info = calloc(1,
1555                                             sizeof(struct inflight_mem_info));
1556                 if (dev->inflight_info == NULL) {
1557                         VHOST_LOG_CONFIG(ERR,
1558                                 "failed to alloc dev inflight area\n");
1559                         return RTE_VHOST_MSG_RESULT_ERR;
1560                 }
1561                 dev->inflight_info->fd = -1;
1562         }
1563
1564         if (dev->inflight_info->addr) {
1565                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1566                 dev->inflight_info->addr = NULL;
1567         }
1568
1569         addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1570                     fd, mmap_offset);
1571         if (addr == MAP_FAILED) {
1572                 VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
1573                 return RTE_VHOST_MSG_RESULT_ERR;
1574         }
1575
1576         if (dev->inflight_info->fd >= 0) {
1577                 close(dev->inflight_info->fd);
1578                 dev->inflight_info->fd = -1;
1579         }
1580
1581         dev->inflight_info->fd = fd;
1582         dev->inflight_info->addr = addr;
1583         dev->inflight_info->size = mmap_size;
1584
1585         for (i = 0; i < num_queues; i++) {
1586                 vq = dev->virtqueue[i];
1587                 if (vq_is_packed(dev)) {
1588                         vq->inflight_packed = addr;
1589                         vq->inflight_packed->desc_num = queue_size;
1590                 } else {
1591                         vq->inflight_split = addr;
1592                         vq->inflight_split->desc_num = queue_size;
1593                 }
1594                 addr = (void *)((char *)addr + pervq_inflight_size);
1595         }
1596
1597         return RTE_VHOST_MSG_RESULT_OK;
1598 }
1599
1600 static int
1601 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1602                         int main_fd __rte_unused)
1603 {
1604         struct virtio_net *dev = *pdev;
1605         struct vhost_vring_file file;
1606         struct vhost_virtqueue *vq;
1607         int expected_fds;
1608
1609         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1610         if (validate_msg_fds(msg, expected_fds) != 0)
1611                 return RTE_VHOST_MSG_RESULT_ERR;
1612
1613         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1614         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1615                 file.fd = VIRTIO_INVALID_EVENTFD;
1616         else
1617                 file.fd = msg->fds[0];
1618         VHOST_LOG_CONFIG(INFO,
1619                 "vring call idx:%d file:%d\n", file.index, file.fd);
1620
1621         vq = dev->virtqueue[file.index];
1622
1623         if (vq->ready) {
1624                 vhost_user_notify_queue_state(dev, file.index, 0);
1625                 vq->ready = 0;
1626         }
1627
1628         if (vq->callfd >= 0)
1629                 close(vq->callfd);
1630
1631         vq->callfd = file.fd;
1632
1633         return RTE_VHOST_MSG_RESULT_OK;
1634 }
1635
1636 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1637                         struct VhostUserMsg *msg,
1638                         int main_fd __rte_unused)
1639 {
1640         int expected_fds;
1641
1642         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1643         if (validate_msg_fds(msg, expected_fds) != 0)
1644                 return RTE_VHOST_MSG_RESULT_ERR;
1645
1646         if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1647                 close(msg->fds[0]);
1648         VHOST_LOG_CONFIG(INFO, "not implemented\n");
1649
1650         return RTE_VHOST_MSG_RESULT_OK;
1651 }
1652
1653 static int
1654 resubmit_desc_compare(const void *a, const void *b)
1655 {
1656         const struct rte_vhost_resubmit_desc *desc0 = a;
1657         const struct rte_vhost_resubmit_desc *desc1 = b;
1658
1659         if (desc1->counter > desc0->counter)
1660                 return 1;
1661
1662         return -1;
1663 }
1664
1665 static int
1666 vhost_check_queue_inflights_split(struct virtio_net *dev,
1667                                   struct vhost_virtqueue *vq)
1668 {
1669         uint16_t i;
1670         uint16_t resubmit_num = 0, last_io, num;
1671         struct vring_used *used = vq->used;
1672         struct rte_vhost_resubmit_info *resubmit;
1673         struct rte_vhost_inflight_info_split *inflight_split;
1674
1675         if (!(dev->protocol_features &
1676             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1677                 return RTE_VHOST_MSG_RESULT_OK;
1678
1679         /* The frontend may still not support the inflight feature
1680          * although we negotiate the protocol feature.
1681          */
1682         if ((!vq->inflight_split))
1683                 return RTE_VHOST_MSG_RESULT_OK;
1684
1685         if (!vq->inflight_split->version) {
1686                 vq->inflight_split->version = INFLIGHT_VERSION;
1687                 return RTE_VHOST_MSG_RESULT_OK;
1688         }
1689
1690         if (vq->resubmit_inflight)
1691                 return RTE_VHOST_MSG_RESULT_OK;
1692
1693         inflight_split = vq->inflight_split;
1694         vq->global_counter = 0;
1695         last_io = inflight_split->last_inflight_io;
1696
1697         if (inflight_split->used_idx != used->idx) {
1698                 inflight_split->desc[last_io].inflight = 0;
1699                 rte_smp_mb();
1700                 inflight_split->used_idx = used->idx;
1701         }
1702
1703         for (i = 0; i < inflight_split->desc_num; i++) {
1704                 if (inflight_split->desc[i].inflight == 1)
1705                         resubmit_num++;
1706         }
1707
1708         vq->last_avail_idx += resubmit_num;
1709
1710         if (resubmit_num) {
1711                 resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1712                 if (!resubmit) {
1713                         VHOST_LOG_CONFIG(ERR,
1714                                 "failed to allocate memory for resubmit info.\n");
1715                         return RTE_VHOST_MSG_RESULT_ERR;
1716                 }
1717
1718                 resubmit->resubmit_list = calloc(resubmit_num,
1719                         sizeof(struct rte_vhost_resubmit_desc));
1720                 if (!resubmit->resubmit_list) {
1721                         VHOST_LOG_CONFIG(ERR,
1722                                 "failed to allocate memory for inflight desc.\n");
1723                         free(resubmit);
1724                         return RTE_VHOST_MSG_RESULT_ERR;
1725                 }
1726
1727                 num = 0;
1728                 for (i = 0; i < vq->inflight_split->desc_num; i++) {
1729                         if (vq->inflight_split->desc[i].inflight == 1) {
1730                                 resubmit->resubmit_list[num].index = i;
1731                                 resubmit->resubmit_list[num].counter =
1732                                         inflight_split->desc[i].counter;
1733                                 num++;
1734                         }
1735                 }
1736                 resubmit->resubmit_num = num;
1737
1738                 if (resubmit->resubmit_num > 1)
1739                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1740                               sizeof(struct rte_vhost_resubmit_desc),
1741                               resubmit_desc_compare);
1742
1743                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1744                 vq->resubmit_inflight = resubmit;
1745         }
1746
1747         return RTE_VHOST_MSG_RESULT_OK;
1748 }
1749
1750 static int
1751 vhost_check_queue_inflights_packed(struct virtio_net *dev,
1752                                    struct vhost_virtqueue *vq)
1753 {
1754         uint16_t i;
1755         uint16_t resubmit_num = 0, old_used_idx, num;
1756         struct rte_vhost_resubmit_info *resubmit;
1757         struct rte_vhost_inflight_info_packed *inflight_packed;
1758
1759         if (!(dev->protocol_features &
1760             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1761                 return RTE_VHOST_MSG_RESULT_OK;
1762
1763         /* The frontend may still not support the inflight feature
1764          * although we negotiate the protocol feature.
1765          */
1766         if ((!vq->inflight_packed))
1767                 return RTE_VHOST_MSG_RESULT_OK;
1768
1769         if (!vq->inflight_packed->version) {
1770                 vq->inflight_packed->version = INFLIGHT_VERSION;
1771                 return RTE_VHOST_MSG_RESULT_OK;
1772         }
1773
1774         if (vq->resubmit_inflight)
1775                 return RTE_VHOST_MSG_RESULT_OK;
1776
1777         inflight_packed = vq->inflight_packed;
1778         vq->global_counter = 0;
1779         old_used_idx = inflight_packed->old_used_idx;
1780
1781         if (inflight_packed->used_idx != old_used_idx) {
1782                 if (inflight_packed->desc[old_used_idx].inflight == 0) {
1783                         inflight_packed->old_used_idx =
1784                                 inflight_packed->used_idx;
1785                         inflight_packed->old_used_wrap_counter =
1786                                 inflight_packed->used_wrap_counter;
1787                         inflight_packed->old_free_head =
1788                                 inflight_packed->free_head;
1789                 } else {
1790                         inflight_packed->used_idx =
1791                                 inflight_packed->old_used_idx;
1792                         inflight_packed->used_wrap_counter =
1793                                 inflight_packed->old_used_wrap_counter;
1794                         inflight_packed->free_head =
1795                                 inflight_packed->old_free_head;
1796                 }
1797         }
1798
1799         for (i = 0; i < inflight_packed->desc_num; i++) {
1800                 if (inflight_packed->desc[i].inflight == 1)
1801                         resubmit_num++;
1802         }
1803
1804         if (resubmit_num) {
1805                 resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1806                 if (resubmit == NULL) {
1807                         VHOST_LOG_CONFIG(ERR,
1808                                 "failed to allocate memory for resubmit info.\n");
1809                         return RTE_VHOST_MSG_RESULT_ERR;
1810                 }
1811
1812                 resubmit->resubmit_list = calloc(resubmit_num,
1813                         sizeof(struct rte_vhost_resubmit_desc));
1814                 if (resubmit->resubmit_list == NULL) {
1815                         VHOST_LOG_CONFIG(ERR,
1816                                 "failed to allocate memory for resubmit desc.\n");
1817                         free(resubmit);
1818                         return RTE_VHOST_MSG_RESULT_ERR;
1819                 }
1820
1821                 num = 0;
1822                 for (i = 0; i < inflight_packed->desc_num; i++) {
1823                         if (vq->inflight_packed->desc[i].inflight == 1) {
1824                                 resubmit->resubmit_list[num].index = i;
1825                                 resubmit->resubmit_list[num].counter =
1826                                         inflight_packed->desc[i].counter;
1827                                 num++;
1828                         }
1829                 }
1830                 resubmit->resubmit_num = num;
1831
1832                 if (resubmit->resubmit_num > 1)
1833                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1834                               sizeof(struct rte_vhost_resubmit_desc),
1835                               resubmit_desc_compare);
1836
1837                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1838                 vq->resubmit_inflight = resubmit;
1839         }
1840
1841         return RTE_VHOST_MSG_RESULT_OK;
1842 }
1843
1844 static int
1845 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
1846                         int main_fd __rte_unused)
1847 {
1848         struct virtio_net *dev = *pdev;
1849         struct vhost_vring_file file;
1850         struct vhost_virtqueue *vq;
1851         int expected_fds;
1852
1853         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1854         if (validate_msg_fds(msg, expected_fds) != 0)
1855                 return RTE_VHOST_MSG_RESULT_ERR;
1856
1857         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1858         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1859                 file.fd = VIRTIO_INVALID_EVENTFD;
1860         else
1861                 file.fd = msg->fds[0];
1862         VHOST_LOG_CONFIG(INFO,
1863                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1864
1865         /* Interpret ring addresses only when ring is started. */
1866         dev = translate_ring_addresses(dev, file.index);
1867         if (!dev)
1868                 return RTE_VHOST_MSG_RESULT_ERR;
1869
1870         *pdev = dev;
1871
1872         vq = dev->virtqueue[file.index];
1873
1874         /*
1875          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1876          * the ring starts already enabled. Otherwise, it is enabled via
1877          * the SET_VRING_ENABLE message.
1878          */
1879         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
1880                 vq->enabled = 1;
1881                 if (dev->notify_ops->vring_state_changed)
1882                         dev->notify_ops->vring_state_changed(
1883                                 dev->vid, file.index, 1);
1884         }
1885
1886         if (vq->ready) {
1887                 vhost_user_notify_queue_state(dev, file.index, 0);
1888                 vq->ready = 0;
1889         }
1890
1891         if (vq->kickfd >= 0)
1892                 close(vq->kickfd);
1893         vq->kickfd = file.fd;
1894
1895         if (vq_is_packed(dev)) {
1896                 if (vhost_check_queue_inflights_packed(dev, vq)) {
1897                         VHOST_LOG_CONFIG(ERR,
1898                                 "failed to inflights for vq: %d\n", file.index);
1899                         return RTE_VHOST_MSG_RESULT_ERR;
1900                 }
1901         } else {
1902                 if (vhost_check_queue_inflights_split(dev, vq)) {
1903                         VHOST_LOG_CONFIG(ERR,
1904                                 "failed to inflights for vq: %d\n", file.index);
1905                         return RTE_VHOST_MSG_RESULT_ERR;
1906                 }
1907         }
1908
1909         return RTE_VHOST_MSG_RESULT_OK;
1910 }
1911
1912 static void
1913 free_zmbufs(struct vhost_virtqueue *vq)
1914 {
1915         drain_zmbuf_list(vq);
1916
1917         rte_free(vq->zmbufs);
1918 }
1919
1920 /*
1921  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1922  */
1923 static int
1924 vhost_user_get_vring_base(struct virtio_net **pdev,
1925                         struct VhostUserMsg *msg,
1926                         int main_fd __rte_unused)
1927 {
1928         struct virtio_net *dev = *pdev;
1929         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1930         uint64_t val;
1931
1932         if (validate_msg_fds(msg, 0) != 0)
1933                 return RTE_VHOST_MSG_RESULT_ERR;
1934
1935         /* We have to stop the queue (virtio) if it is running. */
1936         vhost_destroy_device_notify(dev);
1937
1938         dev->flags &= ~VIRTIO_DEV_READY;
1939         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1940
1941         /* Here we are safe to get the indexes */
1942         if (vq_is_packed(dev)) {
1943                 /*
1944                  * Bit[0:14]: avail index
1945                  * Bit[15]: avail wrap counter
1946                  */
1947                 val = vq->last_avail_idx & 0x7fff;
1948                 val |= vq->avail_wrap_counter << 15;
1949                 msg->payload.state.num = val;
1950         } else {
1951                 msg->payload.state.num = vq->last_avail_idx;
1952         }
1953
1954         VHOST_LOG_CONFIG(INFO,
1955                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1956                 msg->payload.state.num);
1957         /*
1958          * Based on current qemu vhost-user implementation, this message is
1959          * sent and only sent in vhost_vring_stop.
1960          * TODO: cleanup the vring, it isn't usable since here.
1961          */
1962         if (vq->kickfd >= 0)
1963                 close(vq->kickfd);
1964
1965         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1966
1967         if (vq->callfd >= 0)
1968                 close(vq->callfd);
1969
1970         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1971
1972         vq->signalled_used_valid = false;
1973
1974         if (dev->dequeue_zero_copy)
1975                 free_zmbufs(vq);
1976         if (vq_is_packed(dev)) {
1977                 rte_free(vq->shadow_used_packed);
1978                 vq->shadow_used_packed = NULL;
1979         } else {
1980                 rte_free(vq->shadow_used_split);
1981                 vq->shadow_used_split = NULL;
1982         }
1983
1984         rte_free(vq->batch_copy_elems);
1985         vq->batch_copy_elems = NULL;
1986
1987         msg->size = sizeof(msg->payload.state);
1988         msg->fd_num = 0;
1989
1990         vring_invalidate(dev, vq);
1991
1992         return RTE_VHOST_MSG_RESULT_REPLY;
1993 }
1994
1995 /*
1996  * when virtio queues are ready to work, qemu will send us to
1997  * enable the virtio queue pair.
1998  */
1999 static int
2000 vhost_user_set_vring_enable(struct virtio_net **pdev,
2001                         struct VhostUserMsg *msg,
2002                         int main_fd __rte_unused)
2003 {
2004         struct virtio_net *dev = *pdev;
2005         int enable = (int)msg->payload.state.num;
2006         int index = (int)msg->payload.state.index;
2007
2008         if (validate_msg_fds(msg, 0) != 0)
2009                 return RTE_VHOST_MSG_RESULT_ERR;
2010
2011         VHOST_LOG_CONFIG(INFO,
2012                 "set queue enable: %d to qp idx: %d\n",
2013                 enable, index);
2014
2015         /* On disable, rings have to be stopped being processed. */
2016         if (!enable && dev->dequeue_zero_copy)
2017                 drain_zmbuf_list(dev->virtqueue[index]);
2018
2019         dev->virtqueue[index]->enabled = enable;
2020
2021         return RTE_VHOST_MSG_RESULT_OK;
2022 }
2023
2024 static int
2025 vhost_user_get_protocol_features(struct virtio_net **pdev,
2026                         struct VhostUserMsg *msg,
2027                         int main_fd __rte_unused)
2028 {
2029         struct virtio_net *dev = *pdev;
2030         uint64_t features, protocol_features;
2031
2032         if (validate_msg_fds(msg, 0) != 0)
2033                 return RTE_VHOST_MSG_RESULT_ERR;
2034
2035         rte_vhost_driver_get_features(dev->ifname, &features);
2036         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2037
2038         msg->payload.u64 = protocol_features;
2039         msg->size = sizeof(msg->payload.u64);
2040         msg->fd_num = 0;
2041
2042         return RTE_VHOST_MSG_RESULT_REPLY;
2043 }
2044
2045 static int
2046 vhost_user_set_protocol_features(struct virtio_net **pdev,
2047                         struct VhostUserMsg *msg,
2048                         int main_fd __rte_unused)
2049 {
2050         struct virtio_net *dev = *pdev;
2051         uint64_t protocol_features = msg->payload.u64;
2052         uint64_t slave_protocol_features = 0;
2053
2054         if (validate_msg_fds(msg, 0) != 0)
2055                 return RTE_VHOST_MSG_RESULT_ERR;
2056
2057         rte_vhost_driver_get_protocol_features(dev->ifname,
2058                         &slave_protocol_features);
2059         if (protocol_features & ~slave_protocol_features) {
2060                 VHOST_LOG_CONFIG(ERR,
2061                         "(%d) received invalid protocol features.\n",
2062                         dev->vid);
2063                 return RTE_VHOST_MSG_RESULT_ERR;
2064         }
2065
2066         dev->protocol_features = protocol_features;
2067         VHOST_LOG_CONFIG(INFO,
2068                 "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
2069                 dev->protocol_features);
2070
2071         return RTE_VHOST_MSG_RESULT_OK;
2072 }
2073
2074 static int
2075 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
2076                         int main_fd __rte_unused)
2077 {
2078         struct virtio_net *dev = *pdev;
2079         int fd = msg->fds[0];
2080         uint64_t size, off;
2081         void *addr;
2082
2083         if (validate_msg_fds(msg, 1) != 0)
2084                 return RTE_VHOST_MSG_RESULT_ERR;
2085
2086         if (fd < 0) {
2087                 VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
2088                 return RTE_VHOST_MSG_RESULT_ERR;
2089         }
2090
2091         if (msg->size != sizeof(VhostUserLog)) {
2092                 VHOST_LOG_CONFIG(ERR,
2093                         "invalid log base msg size: %"PRId32" != %d\n",
2094                         msg->size, (int)sizeof(VhostUserLog));
2095                 return RTE_VHOST_MSG_RESULT_ERR;
2096         }
2097
2098         size = msg->payload.log.mmap_size;
2099         off  = msg->payload.log.mmap_offset;
2100
2101         /* Check for mmap size and offset overflow. */
2102         if (off >= -size) {
2103                 VHOST_LOG_CONFIG(ERR,
2104                         "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
2105                         off, size);
2106                 return RTE_VHOST_MSG_RESULT_ERR;
2107         }
2108
2109         VHOST_LOG_CONFIG(INFO,
2110                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
2111                 size, off);
2112
2113         /*
2114          * mmap from 0 to workaround a hugepage mmap bug: mmap will
2115          * fail when offset is not page size aligned.
2116          */
2117         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2118         close(fd);
2119         if (addr == MAP_FAILED) {
2120                 VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
2121                 return RTE_VHOST_MSG_RESULT_ERR;
2122         }
2123
2124         /*
2125          * Free previously mapped log memory on occasionally
2126          * multiple VHOST_USER_SET_LOG_BASE.
2127          */
2128         if (dev->log_addr) {
2129                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2130         }
2131         dev->log_addr = (uint64_t)(uintptr_t)addr;
2132         dev->log_base = dev->log_addr + off;
2133         dev->log_size = size;
2134
2135         /*
2136          * The spec is not clear about it (yet), but QEMU doesn't expect
2137          * any payload in the reply.
2138          */
2139         msg->size = 0;
2140         msg->fd_num = 0;
2141
2142         return RTE_VHOST_MSG_RESULT_REPLY;
2143 }
2144
2145 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
2146                         struct VhostUserMsg *msg,
2147                         int main_fd __rte_unused)
2148 {
2149         if (validate_msg_fds(msg, 1) != 0)
2150                 return RTE_VHOST_MSG_RESULT_ERR;
2151
2152         close(msg->fds[0]);
2153         VHOST_LOG_CONFIG(INFO, "not implemented.\n");
2154
2155         return RTE_VHOST_MSG_RESULT_OK;
2156 }
2157
2158 /*
2159  * An rarp packet is constructed and broadcasted to notify switches about
2160  * the new location of the migrated VM, so that packets from outside will
2161  * not be lost after migration.
2162  *
2163  * However, we don't actually "send" a rarp packet here, instead, we set
2164  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
2165  */
2166 static int
2167 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
2168                         int main_fd __rte_unused)
2169 {
2170         struct virtio_net *dev = *pdev;
2171         uint8_t *mac = (uint8_t *)&msg->payload.u64;
2172         struct rte_vdpa_device *vdpa_dev;
2173
2174         if (validate_msg_fds(msg, 0) != 0)
2175                 return RTE_VHOST_MSG_RESULT_ERR;
2176
2177         VHOST_LOG_CONFIG(DEBUG,
2178                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
2179                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2180         memcpy(dev->mac.addr_bytes, mac, 6);
2181
2182         /*
2183          * Set the flag to inject a RARP broadcast packet at
2184          * rte_vhost_dequeue_burst().
2185          *
2186          * __ATOMIC_RELEASE ordering is for making sure the mac is
2187          * copied before the flag is set.
2188          */
2189         __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
2190         vdpa_dev = dev->vdpa_dev;
2191         if (vdpa_dev && vdpa_dev->ops->migration_done)
2192                 vdpa_dev->ops->migration_done(dev->vid);
2193
2194         return RTE_VHOST_MSG_RESULT_OK;
2195 }
2196
2197 static int
2198 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
2199                         int main_fd __rte_unused)
2200 {
2201         struct virtio_net *dev = *pdev;
2202
2203         if (validate_msg_fds(msg, 0) != 0)
2204                 return RTE_VHOST_MSG_RESULT_ERR;
2205
2206         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
2207                         msg->payload.u64 > VIRTIO_MAX_MTU) {
2208                 VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
2209                                 msg->payload.u64);
2210
2211                 return RTE_VHOST_MSG_RESULT_ERR;
2212         }
2213
2214         dev->mtu = msg->payload.u64;
2215
2216         return RTE_VHOST_MSG_RESULT_OK;
2217 }
2218
2219 static int
2220 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
2221                         int main_fd __rte_unused)
2222 {
2223         struct virtio_net *dev = *pdev;
2224         int fd = msg->fds[0];
2225
2226         if (validate_msg_fds(msg, 1) != 0)
2227                 return RTE_VHOST_MSG_RESULT_ERR;
2228
2229         if (fd < 0) {
2230                 VHOST_LOG_CONFIG(ERR,
2231                                 "Invalid file descriptor for slave channel (%d)\n",
2232                                 fd);
2233                 return RTE_VHOST_MSG_RESULT_ERR;
2234         }
2235
2236         if (dev->slave_req_fd >= 0)
2237                 close(dev->slave_req_fd);
2238
2239         dev->slave_req_fd = fd;
2240
2241         return RTE_VHOST_MSG_RESULT_OK;
2242 }
2243
2244 static int
2245 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2246 {
2247         struct vhost_vring_addr *ra;
2248         uint64_t start, end, len;
2249
2250         start = imsg->iova;
2251         end = start + imsg->size;
2252
2253         ra = &vq->ring_addrs;
2254         len = sizeof(struct vring_desc) * vq->size;
2255         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2256                 return 1;
2257
2258         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2259         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2260                 return 1;
2261
2262         len = sizeof(struct vring_used) +
2263                sizeof(struct vring_used_elem) * vq->size;
2264         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2265                 return 1;
2266
2267         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2268                 len = sizeof(uint64_t);
2269                 if (ra->log_guest_addr < end &&
2270                     (ra->log_guest_addr + len) > start)
2271                         return 1;
2272         }
2273
2274         return 0;
2275 }
2276
2277 static int
2278 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2279 {
2280         struct vhost_vring_addr *ra;
2281         uint64_t start, end, len;
2282
2283         start = imsg->iova;
2284         end = start + imsg->size;
2285
2286         ra = &vq->ring_addrs;
2287         len = sizeof(struct vring_packed_desc) * vq->size;
2288         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2289                 return 1;
2290
2291         len = sizeof(struct vring_packed_desc_event);
2292         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2293                 return 1;
2294
2295         len = sizeof(struct vring_packed_desc_event);
2296         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2297                 return 1;
2298
2299         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2300                 len = sizeof(uint64_t);
2301                 if (ra->log_guest_addr < end &&
2302                     (ra->log_guest_addr + len) > start)
2303                         return 1;
2304         }
2305
2306         return 0;
2307 }
2308
2309 static int is_vring_iotlb(struct virtio_net *dev,
2310                           struct vhost_virtqueue *vq,
2311                           struct vhost_iotlb_msg *imsg)
2312 {
2313         if (vq_is_packed(dev))
2314                 return is_vring_iotlb_packed(vq, imsg);
2315         else
2316                 return is_vring_iotlb_split(vq, imsg);
2317 }
2318
2319 static int
2320 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
2321                         int main_fd __rte_unused)
2322 {
2323         struct virtio_net *dev = *pdev;
2324         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
2325         uint16_t i;
2326         uint64_t vva, len;
2327
2328         if (validate_msg_fds(msg, 0) != 0)
2329                 return RTE_VHOST_MSG_RESULT_ERR;
2330
2331         switch (imsg->type) {
2332         case VHOST_IOTLB_UPDATE:
2333                 len = imsg->size;
2334                 vva = qva_to_vva(dev, imsg->uaddr, &len);
2335                 if (!vva)
2336                         return RTE_VHOST_MSG_RESULT_ERR;
2337
2338                 for (i = 0; i < dev->nr_vring; i++) {
2339                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2340
2341                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
2342                                         len, imsg->perm);
2343
2344                         if (is_vring_iotlb(dev, vq, imsg))
2345                                 *pdev = dev = translate_ring_addresses(dev, i);
2346                 }
2347                 break;
2348         case VHOST_IOTLB_INVALIDATE:
2349                 for (i = 0; i < dev->nr_vring; i++) {
2350                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2351
2352                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
2353                                         imsg->size);
2354
2355                         if (is_vring_iotlb(dev, vq, imsg))
2356                                 vring_invalidate(dev, vq);
2357                 }
2358                 break;
2359         default:
2360                 VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
2361                                 imsg->type);
2362                 return RTE_VHOST_MSG_RESULT_ERR;
2363         }
2364
2365         return RTE_VHOST_MSG_RESULT_OK;
2366 }
2367
2368 static int
2369 vhost_user_set_postcopy_advise(struct virtio_net **pdev,
2370                         struct VhostUserMsg *msg,
2371                         int main_fd __rte_unused)
2372 {
2373         struct virtio_net *dev = *pdev;
2374 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
2375         struct uffdio_api api_struct;
2376
2377         if (validate_msg_fds(msg, 0) != 0)
2378                 return RTE_VHOST_MSG_RESULT_ERR;
2379
2380         dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2381
2382         if (dev->postcopy_ufd == -1) {
2383                 VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
2384                         strerror(errno));
2385                 return RTE_VHOST_MSG_RESULT_ERR;
2386         }
2387         api_struct.api = UFFD_API;
2388         api_struct.features = 0;
2389         if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2390                 VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
2391                         strerror(errno));
2392                 close(dev->postcopy_ufd);
2393                 dev->postcopy_ufd = -1;
2394                 return RTE_VHOST_MSG_RESULT_ERR;
2395         }
2396         msg->fds[0] = dev->postcopy_ufd;
2397         msg->fd_num = 1;
2398
2399         return RTE_VHOST_MSG_RESULT_REPLY;
2400 #else
2401         dev->postcopy_ufd = -1;
2402         msg->fd_num = 0;
2403
2404         return RTE_VHOST_MSG_RESULT_ERR;
2405 #endif
2406 }
2407
2408 static int
2409 vhost_user_set_postcopy_listen(struct virtio_net **pdev,
2410                         struct VhostUserMsg *msg __rte_unused,
2411                         int main_fd __rte_unused)
2412 {
2413         struct virtio_net *dev = *pdev;
2414
2415         if (validate_msg_fds(msg, 0) != 0)
2416                 return RTE_VHOST_MSG_RESULT_ERR;
2417
2418         if (dev->mem && dev->mem->nregions) {
2419                 VHOST_LOG_CONFIG(ERR,
2420                         "Regions already registered at postcopy-listen\n");
2421                 return RTE_VHOST_MSG_RESULT_ERR;
2422         }
2423         dev->postcopy_listening = 1;
2424
2425         return RTE_VHOST_MSG_RESULT_OK;
2426 }
2427
2428 static int
2429 vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
2430                         int main_fd __rte_unused)
2431 {
2432         struct virtio_net *dev = *pdev;
2433
2434         if (validate_msg_fds(msg, 0) != 0)
2435                 return RTE_VHOST_MSG_RESULT_ERR;
2436
2437         dev->postcopy_listening = 0;
2438         if (dev->postcopy_ufd >= 0) {
2439                 close(dev->postcopy_ufd);
2440                 dev->postcopy_ufd = -1;
2441         }
2442
2443         msg->payload.u64 = 0;
2444         msg->size = sizeof(msg->payload.u64);
2445         msg->fd_num = 0;
2446
2447         return RTE_VHOST_MSG_RESULT_REPLY;
2448 }
2449
2450 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
2451                                         struct VhostUserMsg *msg,
2452                                         int main_fd);
2453 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
2454         [VHOST_USER_NONE] = NULL,
2455         [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
2456         [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
2457         [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
2458         [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
2459         [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
2460         [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
2461         [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
2462         [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
2463         [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
2464         [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
2465         [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
2466         [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
2467         [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
2468         [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
2469         [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
2470         [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
2471         [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
2472         [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
2473         [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
2474         [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
2475         [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
2476         [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
2477         [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
2478         [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
2479         [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
2480         [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
2481         [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
2482 };
2483
2484 /* return bytes# of read on success or negative val on failure. */
2485 static int
2486 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2487 {
2488         int ret;
2489
2490         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2491                 msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
2492         if (ret <= 0) {
2493                 return ret;
2494         } else if (ret != VHOST_USER_HDR_SIZE) {
2495                 VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
2496                 close_msg_fds(msg);
2497                 return -1;
2498         }
2499
2500         if (msg->size) {
2501                 if (msg->size > sizeof(msg->payload)) {
2502                         VHOST_LOG_CONFIG(ERR,
2503                                 "invalid msg size: %d\n", msg->size);
2504                         return -1;
2505                 }
2506                 ret = read(sockfd, &msg->payload, msg->size);
2507                 if (ret <= 0)
2508                         return ret;
2509                 if (ret != (int)msg->size) {
2510                         VHOST_LOG_CONFIG(ERR,
2511                                 "read control message failed\n");
2512                         return -1;
2513                 }
2514         }
2515
2516         return ret;
2517 }
2518
2519 static int
2520 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2521 {
2522         if (!msg)
2523                 return 0;
2524
2525         return send_fd_message(sockfd, (char *)msg,
2526                 VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
2527 }
2528
2529 static int
2530 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
2531 {
2532         if (!msg)
2533                 return 0;
2534
2535         msg->flags &= ~VHOST_USER_VERSION_MASK;
2536         msg->flags &= ~VHOST_USER_NEED_REPLY;
2537         msg->flags |= VHOST_USER_VERSION;
2538         msg->flags |= VHOST_USER_REPLY_MASK;
2539
2540         return send_vhost_message(sockfd, msg);
2541 }
2542
2543 static int
2544 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
2545 {
2546         int ret;
2547
2548         if (msg->flags & VHOST_USER_NEED_REPLY)
2549                 rte_spinlock_lock(&dev->slave_req_lock);
2550
2551         ret = send_vhost_message(dev->slave_req_fd, msg);
2552         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
2553                 rte_spinlock_unlock(&dev->slave_req_lock);
2554
2555         return ret;
2556 }
2557
2558 /*
2559  * Allocate a queue pair if it hasn't been allocated yet
2560  */
2561 static int
2562 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
2563                         struct VhostUserMsg *msg)
2564 {
2565         uint32_t vring_idx;
2566
2567         switch (msg->request.master) {
2568         case VHOST_USER_SET_VRING_KICK:
2569         case VHOST_USER_SET_VRING_CALL:
2570         case VHOST_USER_SET_VRING_ERR:
2571                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2572                 break;
2573         case VHOST_USER_SET_VRING_NUM:
2574         case VHOST_USER_SET_VRING_BASE:
2575         case VHOST_USER_SET_VRING_ENABLE:
2576                 vring_idx = msg->payload.state.index;
2577                 break;
2578         case VHOST_USER_SET_VRING_ADDR:
2579                 vring_idx = msg->payload.addr.index;
2580                 break;
2581         default:
2582                 return 0;
2583         }
2584
2585         if (vring_idx >= VHOST_MAX_VRING) {
2586                 VHOST_LOG_CONFIG(ERR,
2587                         "invalid vring index: %u\n", vring_idx);
2588                 return -1;
2589         }
2590
2591         if (dev->virtqueue[vring_idx])
2592                 return 0;
2593
2594         return alloc_vring_queue(dev, vring_idx);
2595 }
2596
2597 static void
2598 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
2599 {
2600         unsigned int i = 0;
2601         unsigned int vq_num = 0;
2602
2603         while (vq_num < dev->nr_vring) {
2604                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2605
2606                 if (vq) {
2607                         rte_spinlock_lock(&vq->access_lock);
2608                         vq_num++;
2609                 }
2610                 i++;
2611         }
2612 }
2613
2614 static void
2615 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
2616 {
2617         unsigned int i = 0;
2618         unsigned int vq_num = 0;
2619
2620         while (vq_num < dev->nr_vring) {
2621                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2622
2623                 if (vq) {
2624                         rte_spinlock_unlock(&vq->access_lock);
2625                         vq_num++;
2626                 }
2627                 i++;
2628         }
2629 }
2630
2631 int
2632 vhost_user_msg_handler(int vid, int fd)
2633 {
2634         struct virtio_net *dev;
2635         struct VhostUserMsg msg;
2636         struct rte_vdpa_device *vdpa_dev;
2637         int ret;
2638         int unlock_required = 0;
2639         bool handled;
2640         int request;
2641         uint32_t i;
2642
2643         dev = get_device(vid);
2644         if (dev == NULL)
2645                 return -1;
2646
2647         if (!dev->notify_ops) {
2648                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
2649                 if (!dev->notify_ops) {
2650                         VHOST_LOG_CONFIG(ERR,
2651                                 "failed to get callback ops for driver %s\n",
2652                                 dev->ifname);
2653                         return -1;
2654                 }
2655         }
2656
2657         ret = read_vhost_message(fd, &msg);
2658         if (ret <= 0) {
2659                 if (ret < 0)
2660                         VHOST_LOG_CONFIG(ERR,
2661                                 "vhost read message failed\n");
2662                 else
2663                         VHOST_LOG_CONFIG(INFO,
2664                                 "vhost peer closed\n");
2665
2666                 return -1;
2667         }
2668
2669         ret = 0;
2670         request = msg.request.master;
2671         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
2672                         vhost_message_str[request]) {
2673                 if (request != VHOST_USER_IOTLB_MSG)
2674                         VHOST_LOG_CONFIG(INFO, "read message %s\n",
2675                                 vhost_message_str[request]);
2676                 else
2677                         VHOST_LOG_CONFIG(DEBUG, "read message %s\n",
2678                                 vhost_message_str[request]);
2679         } else {
2680                 VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
2681         }
2682
2683         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
2684         if (ret < 0) {
2685                 VHOST_LOG_CONFIG(ERR,
2686                         "failed to alloc queue\n");
2687                 return -1;
2688         }
2689
2690         /*
2691          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
2692          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
2693          * and device is destroyed. destroy_device waits for queues to be
2694          * inactive, so it is safe. Otherwise taking the access_lock
2695          * would cause a dead lock.
2696          */
2697         switch (request) {
2698         case VHOST_USER_SET_FEATURES:
2699         case VHOST_USER_SET_PROTOCOL_FEATURES:
2700         case VHOST_USER_SET_OWNER:
2701         case VHOST_USER_SET_MEM_TABLE:
2702         case VHOST_USER_SET_LOG_BASE:
2703         case VHOST_USER_SET_LOG_FD:
2704         case VHOST_USER_SET_VRING_NUM:
2705         case VHOST_USER_SET_VRING_ADDR:
2706         case VHOST_USER_SET_VRING_BASE:
2707         case VHOST_USER_SET_VRING_KICK:
2708         case VHOST_USER_SET_VRING_CALL:
2709         case VHOST_USER_SET_VRING_ERR:
2710         case VHOST_USER_SET_VRING_ENABLE:
2711         case VHOST_USER_SEND_RARP:
2712         case VHOST_USER_NET_SET_MTU:
2713         case VHOST_USER_SET_SLAVE_REQ_FD:
2714                 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2715                         vhost_user_lock_all_queue_pairs(dev);
2716                         unlock_required = 1;
2717                 }
2718                 break;
2719         default:
2720                 break;
2721
2722         }
2723
2724         handled = false;
2725         if (dev->extern_ops.pre_msg_handle) {
2726                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
2727                                 (void *)&msg);
2728                 switch (ret) {
2729                 case RTE_VHOST_MSG_RESULT_REPLY:
2730                         send_vhost_reply(fd, &msg);
2731                         /* Fall-through */
2732                 case RTE_VHOST_MSG_RESULT_ERR:
2733                 case RTE_VHOST_MSG_RESULT_OK:
2734                         handled = true;
2735                         goto skip_to_post_handle;
2736                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2737                 default:
2738                         break;
2739                 }
2740         }
2741
2742         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
2743                 if (!vhost_message_handlers[request])
2744                         goto skip_to_post_handle;
2745                 ret = vhost_message_handlers[request](&dev, &msg, fd);
2746
2747                 switch (ret) {
2748                 case RTE_VHOST_MSG_RESULT_ERR:
2749                         VHOST_LOG_CONFIG(ERR,
2750                                 "Processing %s failed.\n",
2751                                 vhost_message_str[request]);
2752                         handled = true;
2753                         break;
2754                 case RTE_VHOST_MSG_RESULT_OK:
2755                         VHOST_LOG_CONFIG(DEBUG,
2756                                 "Processing %s succeeded.\n",
2757                                 vhost_message_str[request]);
2758                         handled = true;
2759                         break;
2760                 case RTE_VHOST_MSG_RESULT_REPLY:
2761                         VHOST_LOG_CONFIG(DEBUG,
2762                                 "Processing %s succeeded and needs reply.\n",
2763                                 vhost_message_str[request]);
2764                         send_vhost_reply(fd, &msg);
2765                         handled = true;
2766                         break;
2767                 default:
2768                         break;
2769                 }
2770         }
2771
2772 skip_to_post_handle:
2773         if (ret != RTE_VHOST_MSG_RESULT_ERR &&
2774                         dev->extern_ops.post_msg_handle) {
2775                 ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
2776                                 (void *)&msg);
2777                 switch (ret) {
2778                 case RTE_VHOST_MSG_RESULT_REPLY:
2779                         send_vhost_reply(fd, &msg);
2780                         /* Fall-through */
2781                 case RTE_VHOST_MSG_RESULT_ERR:
2782                 case RTE_VHOST_MSG_RESULT_OK:
2783                         handled = true;
2784                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2785                 default:
2786                         break;
2787                 }
2788         }
2789
2790         if (unlock_required)
2791                 vhost_user_unlock_all_queue_pairs(dev);
2792
2793         /* If message was not handled at this stage, treat it as an error */
2794         if (!handled) {
2795                 VHOST_LOG_CONFIG(ERR,
2796                         "vhost message (req: %d) was not handled.\n", request);
2797                 close_msg_fds(&msg);
2798                 ret = RTE_VHOST_MSG_RESULT_ERR;
2799         }
2800
2801         /*
2802          * If the request required a reply that was already sent,
2803          * this optional reply-ack won't be sent as the
2804          * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
2805          */
2806         if (msg.flags & VHOST_USER_NEED_REPLY) {
2807                 msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
2808                 msg.size = sizeof(msg.payload.u64);
2809                 msg.fd_num = 0;
2810                 send_vhost_reply(fd, &msg);
2811         } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
2812                 VHOST_LOG_CONFIG(ERR,
2813                         "vhost message handling failed.\n");
2814                 return -1;
2815         }
2816
2817         for (i = 0; i < dev->nr_vring; i++) {
2818                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2819                 bool cur_ready = vq_is_ready(dev, vq);
2820
2821                 if (cur_ready != (vq && vq->ready)) {
2822                         vhost_user_notify_queue_state(dev, i, cur_ready);
2823                         vq->ready = cur_ready;
2824                 }
2825         }
2826
2827
2828         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
2829                 dev->flags |= VIRTIO_DEV_READY;
2830
2831                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
2832                         if (dev->dequeue_zero_copy) {
2833                                 VHOST_LOG_CONFIG(INFO,
2834                                                 "dequeue zero copy is enabled\n");
2835                         }
2836
2837                         if (dev->notify_ops->new_device(dev->vid) == 0)
2838                                 dev->flags |= VIRTIO_DEV_RUNNING;
2839                 }
2840         }
2841
2842         vdpa_dev = dev->vdpa_dev;
2843         if (vdpa_dev && virtio_is_ready(dev) &&
2844             !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2845                 if (vdpa_dev->ops->dev_conf)
2846                         vdpa_dev->ops->dev_conf(dev->vid);
2847                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
2848         }
2849
2850         return 0;
2851 }
2852
2853 static int process_slave_message_reply(struct virtio_net *dev,
2854                                        const struct VhostUserMsg *msg)
2855 {
2856         struct VhostUserMsg msg_reply;
2857         int ret;
2858
2859         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
2860                 return 0;
2861
2862         ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
2863         if (ret <= 0) {
2864                 if (ret < 0)
2865                         VHOST_LOG_CONFIG(ERR,
2866                                 "vhost read slave message reply failed\n");
2867                 else
2868                         VHOST_LOG_CONFIG(INFO,
2869                                 "vhost peer closed\n");
2870                 ret = -1;
2871                 goto out;
2872         }
2873
2874         ret = 0;
2875         if (msg_reply.request.slave != msg->request.slave) {
2876                 VHOST_LOG_CONFIG(ERR,
2877                         "Received unexpected msg type (%u), expected %u\n",
2878                         msg_reply.request.slave, msg->request.slave);
2879                 ret = -1;
2880                 goto out;
2881         }
2882
2883         ret = msg_reply.payload.u64 ? -1 : 0;
2884
2885 out:
2886         rte_spinlock_unlock(&dev->slave_req_lock);
2887         return ret;
2888 }
2889
2890 int
2891 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
2892 {
2893         int ret;
2894         struct VhostUserMsg msg = {
2895                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
2896                 .flags = VHOST_USER_VERSION,
2897                 .size = sizeof(msg.payload.iotlb),
2898                 .payload.iotlb = {
2899                         .iova = iova,
2900                         .perm = perm,
2901                         .type = VHOST_IOTLB_MISS,
2902                 },
2903         };
2904
2905         ret = send_vhost_message(dev->slave_req_fd, &msg);
2906         if (ret < 0) {
2907                 VHOST_LOG_CONFIG(ERR,
2908                                 "Failed to send IOTLB miss message (%d)\n",
2909                                 ret);
2910                 return ret;
2911         }
2912
2913         return 0;
2914 }
2915
2916 static int
2917 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply)
2918 {
2919         int ret;
2920         struct VhostUserMsg msg = {
2921                 .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG,
2922                 .flags = VHOST_USER_VERSION,
2923                 .size = 0,
2924         };
2925
2926         if (need_reply)
2927                 msg.flags |= VHOST_USER_NEED_REPLY;
2928
2929         ret = send_vhost_slave_message(dev, &msg);
2930         if (ret < 0) {
2931                 VHOST_LOG_CONFIG(ERR,
2932                                 "Failed to send config change (%d)\n",
2933                                 ret);
2934                 return ret;
2935         }
2936
2937         return process_slave_message_reply(dev, &msg);
2938 }
2939
2940 int
2941 rte_vhost_slave_config_change(int vid, bool need_reply)
2942 {
2943         struct virtio_net *dev;
2944
2945         dev = get_device(vid);
2946         if (!dev)
2947                 return -ENODEV;
2948
2949         return vhost_user_slave_config_change(dev, need_reply);
2950 }
2951
2952 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
2953                                                     int index, int fd,
2954                                                     uint64_t offset,
2955                                                     uint64_t size)
2956 {
2957         int ret;
2958         struct VhostUserMsg msg = {
2959                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
2960                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
2961                 .size = sizeof(msg.payload.area),
2962                 .payload.area = {
2963                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
2964                         .size = size,
2965                         .offset = offset,
2966                 },
2967         };
2968
2969         if (fd < 0)
2970                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
2971         else {
2972                 msg.fds[0] = fd;
2973                 msg.fd_num = 1;
2974         }
2975
2976         ret = send_vhost_slave_message(dev, &msg);
2977         if (ret < 0) {
2978                 VHOST_LOG_CONFIG(ERR,
2979                         "Failed to set host notifier (%d)\n", ret);
2980                 return ret;
2981         }
2982
2983         return process_slave_message_reply(dev, &msg);
2984 }
2985
2986 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
2987 {
2988         struct virtio_net *dev;
2989         struct rte_vdpa_device *vdpa_dev;
2990         int vfio_device_fd, ret = 0;
2991         uint64_t offset, size;
2992         unsigned int i, q_start, q_last;
2993
2994         dev = get_device(vid);
2995         if (!dev)
2996                 return -ENODEV;
2997
2998         vdpa_dev = dev->vdpa_dev;
2999         if (vdpa_dev == NULL)
3000                 return -ENODEV;
3001
3002         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3003             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3004             !(dev->protocol_features &
3005                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
3006             !(dev->protocol_features &
3007                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
3008             !(dev->protocol_features &
3009                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
3010                 return -ENOTSUP;
3011
3012         if (qid == RTE_VHOST_QUEUE_ALL) {
3013                 q_start = 0;
3014                 q_last = dev->nr_vring - 1;
3015         } else {
3016                 if (qid >= dev->nr_vring)
3017                         return -EINVAL;
3018                 q_start = qid;
3019                 q_last = qid;
3020         }
3021
3022         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
3023         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
3024
3025         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
3026         if (vfio_device_fd < 0)
3027                 return -ENOTSUP;
3028
3029         if (enable) {
3030                 for (i = q_start; i <= q_last; i++) {
3031                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
3032                                         &size) < 0) {
3033                                 ret = -ENOTSUP;
3034                                 goto disable;
3035                         }
3036
3037                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
3038                                         vfio_device_fd, offset, size) < 0) {
3039                                 ret = -EFAULT;
3040                                 goto disable;
3041                         }
3042                 }
3043         } else {
3044 disable:
3045                 for (i = q_start; i <= q_last; i++) {
3046                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
3047                                         0, 0);
3048                 }
3049         }
3050
3051         return ret;
3052 }