bac3e89713da22aa1354678d800282993b2c5c81
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <fcntl.h>
28 #include <sys/ioctl.h>
29 #include <sys/mman.h>
30 #include <sys/types.h>
31 #include <sys/stat.h>
32 #include <sys/syscall.h>
33 #include <assert.h>
34 #ifdef RTE_LIBRTE_VHOST_NUMA
35 #include <numaif.h>
36 #endif
37 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
38 #include <linux/userfaultfd.h>
39 #endif
40 #ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
41 #include <linux/memfd.h>
42 #define MEMFD_SUPPORTED
43 #endif
44
45 #include <rte_common.h>
46 #include <rte_malloc.h>
47 #include <rte_log.h>
48
49 #include "iotlb.h"
50 #include "vhost.h"
51 #include "vhost_user.h"
52
53 #define VIRTIO_MIN_MTU 68
54 #define VIRTIO_MAX_MTU 65535
55
56 #define INFLIGHT_ALIGNMENT      64
57 #define INFLIGHT_VERSION        0x1
58
59 static const char *vhost_message_str[VHOST_USER_MAX] = {
60         [VHOST_USER_NONE] = "VHOST_USER_NONE",
61         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
62         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
63         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
64         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
65         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
66         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
67         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
68         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
69         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
70         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
71         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
72         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
73         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
74         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
75         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
76         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
77         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
78         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
79         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
80         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
81         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
82         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
83         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
84         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
85         [VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
86         [VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
87         [VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
88         [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
89         [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
90 };
91
92 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
93 static int read_vhost_message(int sockfd, struct VhostUserMsg *msg);
94
95 static void
96 close_msg_fds(struct VhostUserMsg *msg)
97 {
98         int i;
99
100         for (i = 0; i < msg->fd_num; i++)
101                 close(msg->fds[i]);
102 }
103
104 /*
105  * Ensure the expected number of FDs is received,
106  * close all FDs and return an error if this is not the case.
107  */
108 static int
109 validate_msg_fds(struct VhostUserMsg *msg, int expected_fds)
110 {
111         if (msg->fd_num == expected_fds)
112                 return 0;
113
114         VHOST_LOG_CONFIG(ERR,
115                 " Expect %d FDs for request %s, received %d\n",
116                 expected_fds,
117                 vhost_message_str[msg->request.master],
118                 msg->fd_num);
119
120         close_msg_fds(msg);
121
122         return -1;
123 }
124
125 static uint64_t
126 get_blk_size(int fd)
127 {
128         struct stat stat;
129         int ret;
130
131         ret = fstat(fd, &stat);
132         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
133 }
134
135 /*
136  * Reclaim all the outstanding zmbufs for a virtqueue.
137  */
138 static void
139 drain_zmbuf_list(struct vhost_virtqueue *vq)
140 {
141         struct zcopy_mbuf *zmbuf, *next;
142
143         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
144              zmbuf != NULL; zmbuf = next) {
145                 next = TAILQ_NEXT(zmbuf, next);
146
147                 while (!mbuf_is_consumed(zmbuf->mbuf))
148                         usleep(1000);
149
150                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
151                 restore_mbuf(zmbuf->mbuf);
152                 rte_pktmbuf_free(zmbuf->mbuf);
153                 put_zmbuf(zmbuf);
154                 vq->nr_zmbuf -= 1;
155         }
156 }
157
158 static void
159 free_mem_region(struct virtio_net *dev)
160 {
161         uint32_t i;
162         struct rte_vhost_mem_region *reg;
163         struct vhost_virtqueue *vq;
164
165         if (!dev || !dev->mem)
166                 return;
167
168         if (dev->dequeue_zero_copy) {
169                 for (i = 0; i < dev->nr_vring; i++) {
170                         vq = dev->virtqueue[i];
171                         if (vq)
172                                 drain_zmbuf_list(vq);
173                 }
174         }
175
176         for (i = 0; i < dev->mem->nregions; i++) {
177                 reg = &dev->mem->regions[i];
178                 if (reg->host_user_addr) {
179                         munmap(reg->mmap_addr, reg->mmap_size);
180                         close(reg->fd);
181                 }
182         }
183 }
184
185 void
186 vhost_backend_cleanup(struct virtio_net *dev)
187 {
188         if (dev->mem) {
189                 free_mem_region(dev);
190                 rte_free(dev->mem);
191                 dev->mem = NULL;
192         }
193
194         rte_free(dev->guest_pages);
195         dev->guest_pages = NULL;
196
197         if (dev->log_addr) {
198                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
199                 dev->log_addr = 0;
200         }
201
202         if (dev->inflight_info) {
203                 if (dev->inflight_info->addr) {
204                         munmap(dev->inflight_info->addr,
205                                dev->inflight_info->size);
206                         dev->inflight_info->addr = NULL;
207                 }
208
209                 if (dev->inflight_info->fd >= 0) {
210                         close(dev->inflight_info->fd);
211                         dev->inflight_info->fd = -1;
212                 }
213
214                 free(dev->inflight_info);
215                 dev->inflight_info = NULL;
216         }
217
218         if (dev->slave_req_fd >= 0) {
219                 close(dev->slave_req_fd);
220                 dev->slave_req_fd = -1;
221         }
222
223         if (dev->postcopy_ufd >= 0) {
224                 close(dev->postcopy_ufd);
225                 dev->postcopy_ufd = -1;
226         }
227
228         dev->postcopy_listening = 0;
229 }
230
231 static void
232 vhost_user_notify_queue_state(struct virtio_net *dev, uint16_t index,
233                               int enable)
234 {
235         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
236
237         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
238                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
239
240         if (dev->notify_ops->vring_state_changed)
241                 dev->notify_ops->vring_state_changed(dev->vid,
242                                 index, enable);
243 }
244
245 /*
246  * This function just returns success at the moment unless
247  * the device hasn't been initialised.
248  */
249 static int
250 vhost_user_set_owner(struct virtio_net **pdev __rte_unused,
251                         struct VhostUserMsg *msg,
252                         int main_fd __rte_unused)
253 {
254         if (validate_msg_fds(msg, 0) != 0)
255                 return RTE_VHOST_MSG_RESULT_ERR;
256
257         return RTE_VHOST_MSG_RESULT_OK;
258 }
259
260 static int
261 vhost_user_reset_owner(struct virtio_net **pdev,
262                         struct VhostUserMsg *msg,
263                         int main_fd __rte_unused)
264 {
265         struct virtio_net *dev = *pdev;
266
267         if (validate_msg_fds(msg, 0) != 0)
268                 return RTE_VHOST_MSG_RESULT_ERR;
269
270         vhost_destroy_device_notify(dev);
271
272         cleanup_device(dev, 0);
273         reset_device(dev);
274         return RTE_VHOST_MSG_RESULT_OK;
275 }
276
277 /*
278  * The features that we support are requested.
279  */
280 static int
281 vhost_user_get_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
282                         int main_fd __rte_unused)
283 {
284         struct virtio_net *dev = *pdev;
285         uint64_t features = 0;
286
287         if (validate_msg_fds(msg, 0) != 0)
288                 return RTE_VHOST_MSG_RESULT_ERR;
289
290         rte_vhost_driver_get_features(dev->ifname, &features);
291
292         msg->payload.u64 = features;
293         msg->size = sizeof(msg->payload.u64);
294         msg->fd_num = 0;
295
296         return RTE_VHOST_MSG_RESULT_REPLY;
297 }
298
299 /*
300  * The queue number that we support are requested.
301  */
302 static int
303 vhost_user_get_queue_num(struct virtio_net **pdev, struct VhostUserMsg *msg,
304                         int main_fd __rte_unused)
305 {
306         struct virtio_net *dev = *pdev;
307         uint32_t queue_num = 0;
308
309         if (validate_msg_fds(msg, 0) != 0)
310                 return RTE_VHOST_MSG_RESULT_ERR;
311
312         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
313
314         msg->payload.u64 = (uint64_t)queue_num;
315         msg->size = sizeof(msg->payload.u64);
316         msg->fd_num = 0;
317
318         return RTE_VHOST_MSG_RESULT_REPLY;
319 }
320
321 /*
322  * We receive the negotiated features supported by us and the virtio device.
323  */
324 static int
325 vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
326                         int main_fd __rte_unused)
327 {
328         struct virtio_net *dev = *pdev;
329         uint64_t features = msg->payload.u64;
330         uint64_t vhost_features = 0;
331         struct rte_vdpa_device *vdpa_dev;
332
333         if (validate_msg_fds(msg, 0) != 0)
334                 return RTE_VHOST_MSG_RESULT_ERR;
335
336         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
337         if (features & ~vhost_features) {
338                 VHOST_LOG_CONFIG(ERR,
339                         "(%d) received invalid negotiated features.\n",
340                         dev->vid);
341                 return RTE_VHOST_MSG_RESULT_ERR;
342         }
343
344         if (dev->flags & VIRTIO_DEV_RUNNING) {
345                 if (dev->features == features)
346                         return RTE_VHOST_MSG_RESULT_OK;
347
348                 /*
349                  * Error out if master tries to change features while device is
350                  * in running state. The exception being VHOST_F_LOG_ALL, which
351                  * is enabled when the live-migration starts.
352                  */
353                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
354                         VHOST_LOG_CONFIG(ERR,
355                                 "(%d) features changed while device is running.\n",
356                                 dev->vid);
357                         return RTE_VHOST_MSG_RESULT_ERR;
358                 }
359
360                 if (dev->notify_ops->features_changed)
361                         dev->notify_ops->features_changed(dev->vid, features);
362         }
363
364         dev->features = features;
365         if (dev->features &
366                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
367                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
368         } else {
369                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
370         }
371         VHOST_LOG_CONFIG(INFO,
372                 "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
373         VHOST_LOG_CONFIG(DEBUG,
374                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
375                 dev->vid,
376                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
377                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
378
379         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
380             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
381                 /*
382                  * Remove all but first queue pair if MQ hasn't been
383                  * negotiated. This is safe because the device is not
384                  * running at this stage.
385                  */
386                 while (dev->nr_vring > 2) {
387                         struct vhost_virtqueue *vq;
388
389                         vq = dev->virtqueue[--dev->nr_vring];
390                         if (!vq)
391                                 continue;
392
393                         dev->virtqueue[dev->nr_vring] = NULL;
394                         cleanup_vq(vq, 1);
395                         cleanup_vq_inflight(dev, vq);
396                         free_vq(dev, vq);
397                 }
398         }
399
400         vdpa_dev = dev->vdpa_dev;
401         if (vdpa_dev && vdpa_dev->ops->set_features)
402                 vdpa_dev->ops->set_features(dev->vid);
403
404         return RTE_VHOST_MSG_RESULT_OK;
405 }
406
407 /*
408  * The virtio device sends us the size of the descriptor ring.
409  */
410 static int
411 vhost_user_set_vring_num(struct virtio_net **pdev,
412                         struct VhostUserMsg *msg,
413                         int main_fd __rte_unused)
414 {
415         struct virtio_net *dev = *pdev;
416         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
417
418         if (validate_msg_fds(msg, 0) != 0)
419                 return RTE_VHOST_MSG_RESULT_ERR;
420
421         vq->size = msg->payload.state.num;
422
423         /* VIRTIO 1.0, 2.4 Virtqueues says:
424          *
425          *   Queue Size value is always a power of 2. The maximum Queue Size
426          *   value is 32768.
427          *
428          * VIRTIO 1.1 2.7 Virtqueues says:
429          *
430          *   Packed virtqueues support up to 2^15 entries each.
431          */
432         if (!vq_is_packed(dev)) {
433                 if (vq->size & (vq->size - 1)) {
434                         VHOST_LOG_CONFIG(ERR,
435                                 "invalid virtqueue size %u\n", vq->size);
436                         return RTE_VHOST_MSG_RESULT_ERR;
437                 }
438         }
439
440         if (vq->size > 32768) {
441                 VHOST_LOG_CONFIG(ERR,
442                         "invalid virtqueue size %u\n", vq->size);
443                 return RTE_VHOST_MSG_RESULT_ERR;
444         }
445
446         if (dev->dequeue_zero_copy) {
447                 vq->nr_zmbuf = 0;
448                 vq->last_zmbuf_idx = 0;
449                 vq->zmbuf_size = vq->size;
450                 if (vq->zmbufs)
451                         rte_free(vq->zmbufs);
452                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
453                                          sizeof(struct zcopy_mbuf), 0);
454                 if (vq->zmbufs == NULL) {
455                         VHOST_LOG_CONFIG(WARNING,
456                                 "failed to allocate mem for zero copy; "
457                                 "zero copy is force disabled\n");
458                         dev->dequeue_zero_copy = 0;
459                 }
460                 TAILQ_INIT(&vq->zmbuf_list);
461         }
462
463         if (vq_is_packed(dev)) {
464                 if (vq->shadow_used_packed)
465                         rte_free(vq->shadow_used_packed);
466                 vq->shadow_used_packed = rte_malloc(NULL,
467                                 vq->size *
468                                 sizeof(struct vring_used_elem_packed),
469                                 RTE_CACHE_LINE_SIZE);
470                 if (!vq->shadow_used_packed) {
471                         VHOST_LOG_CONFIG(ERR,
472                                         "failed to allocate memory for shadow used ring.\n");
473                         return RTE_VHOST_MSG_RESULT_ERR;
474                 }
475
476         } else {
477                 if (vq->shadow_used_split)
478                         rte_free(vq->shadow_used_split);
479
480                 vq->shadow_used_split = rte_malloc(NULL,
481                                 vq->size * sizeof(struct vring_used_elem),
482                                 RTE_CACHE_LINE_SIZE);
483
484                 if (!vq->shadow_used_split) {
485                         VHOST_LOG_CONFIG(ERR,
486                                         "failed to allocate memory for vq internal data.\n");
487                         return RTE_VHOST_MSG_RESULT_ERR;
488                 }
489         }
490
491         if (vq->batch_copy_elems)
492                 rte_free(vq->batch_copy_elems);
493         vq->batch_copy_elems = rte_malloc(NULL,
494                                 vq->size * sizeof(struct batch_copy_elem),
495                                 RTE_CACHE_LINE_SIZE);
496         if (!vq->batch_copy_elems) {
497                 VHOST_LOG_CONFIG(ERR,
498                         "failed to allocate memory for batching copy.\n");
499                 return RTE_VHOST_MSG_RESULT_ERR;
500         }
501
502         return RTE_VHOST_MSG_RESULT_OK;
503 }
504
505 /*
506  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
507  * same numa node as the memory of vring descriptor.
508  */
509 #ifdef RTE_LIBRTE_VHOST_NUMA
510 static struct virtio_net*
511 numa_realloc(struct virtio_net *dev, int index)
512 {
513         int oldnode, newnode;
514         struct virtio_net *old_dev;
515         struct vhost_virtqueue *old_vq, *vq;
516         struct zcopy_mbuf *new_zmbuf;
517         struct vring_used_elem *new_shadow_used_split;
518         struct vring_used_elem_packed *new_shadow_used_packed;
519         struct batch_copy_elem *new_batch_copy_elems;
520         int ret;
521
522         if (dev->flags & VIRTIO_DEV_RUNNING)
523                 return dev;
524
525         old_dev = dev;
526         vq = old_vq = dev->virtqueue[index];
527
528         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
529                             MPOL_F_NODE | MPOL_F_ADDR);
530
531         /* check if we need to reallocate vq */
532         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
533                              MPOL_F_NODE | MPOL_F_ADDR);
534         if (ret) {
535                 VHOST_LOG_CONFIG(ERR,
536                         "Unable to get vq numa information.\n");
537                 return dev;
538         }
539         if (oldnode != newnode) {
540                 VHOST_LOG_CONFIG(INFO,
541                         "reallocate vq from %d to %d node\n", oldnode, newnode);
542                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
543                 if (!vq)
544                         return dev;
545
546                 memcpy(vq, old_vq, sizeof(*vq));
547                 TAILQ_INIT(&vq->zmbuf_list);
548
549                 if (dev->dequeue_zero_copy) {
550                         new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
551                                         sizeof(struct zcopy_mbuf), 0, newnode);
552                         if (new_zmbuf) {
553                                 rte_free(vq->zmbufs);
554                                 vq->zmbufs = new_zmbuf;
555                         }
556                 }
557
558                 if (vq_is_packed(dev)) {
559                         new_shadow_used_packed = rte_malloc_socket(NULL,
560                                         vq->size *
561                                         sizeof(struct vring_used_elem_packed),
562                                         RTE_CACHE_LINE_SIZE,
563                                         newnode);
564                         if (new_shadow_used_packed) {
565                                 rte_free(vq->shadow_used_packed);
566                                 vq->shadow_used_packed = new_shadow_used_packed;
567                         }
568                 } else {
569                         new_shadow_used_split = rte_malloc_socket(NULL,
570                                         vq->size *
571                                         sizeof(struct vring_used_elem),
572                                         RTE_CACHE_LINE_SIZE,
573                                         newnode);
574                         if (new_shadow_used_split) {
575                                 rte_free(vq->shadow_used_split);
576                                 vq->shadow_used_split = new_shadow_used_split;
577                         }
578                 }
579
580                 new_batch_copy_elems = rte_malloc_socket(NULL,
581                         vq->size * sizeof(struct batch_copy_elem),
582                         RTE_CACHE_LINE_SIZE,
583                         newnode);
584                 if (new_batch_copy_elems) {
585                         rte_free(vq->batch_copy_elems);
586                         vq->batch_copy_elems = new_batch_copy_elems;
587                 }
588
589                 rte_free(old_vq);
590         }
591
592         /* check if we need to reallocate dev */
593         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
594                             MPOL_F_NODE | MPOL_F_ADDR);
595         if (ret) {
596                 VHOST_LOG_CONFIG(ERR,
597                         "Unable to get dev numa information.\n");
598                 goto out;
599         }
600         if (oldnode != newnode) {
601                 VHOST_LOG_CONFIG(INFO,
602                         "reallocate dev from %d to %d node\n",
603                         oldnode, newnode);
604                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
605                 if (!dev) {
606                         dev = old_dev;
607                         goto out;
608                 }
609
610                 memcpy(dev, old_dev, sizeof(*dev));
611                 rte_free(old_dev);
612         }
613
614 out:
615         dev->virtqueue[index] = vq;
616         vhost_devices[dev->vid] = dev;
617
618         if (old_vq != vq)
619                 vhost_user_iotlb_init(dev, index);
620
621         return dev;
622 }
623 #else
624 static struct virtio_net*
625 numa_realloc(struct virtio_net *dev, int index __rte_unused)
626 {
627         return dev;
628 }
629 #endif
630
631 /* Converts QEMU virtual address to Vhost virtual address. */
632 static uint64_t
633 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
634 {
635         struct rte_vhost_mem_region *r;
636         uint32_t i;
637
638         if (unlikely(!dev || !dev->mem))
639                 goto out_error;
640
641         /* Find the region where the address lives. */
642         for (i = 0; i < dev->mem->nregions; i++) {
643                 r = &dev->mem->regions[i];
644
645                 if (qva >= r->guest_user_addr &&
646                     qva <  r->guest_user_addr + r->size) {
647
648                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
649                                 *len = r->guest_user_addr + r->size - qva;
650
651                         return qva - r->guest_user_addr +
652                                r->host_user_addr;
653                 }
654         }
655 out_error:
656         *len = 0;
657
658         return 0;
659 }
660
661
662 /*
663  * Converts ring address to Vhost virtual address.
664  * If IOMMU is enabled, the ring address is a guest IO virtual address,
665  * else it is a QEMU virtual address.
666  */
667 static uint64_t
668 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
669                 uint64_t ra, uint64_t *size)
670 {
671         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
672                 uint64_t vva;
673
674                 vhost_user_iotlb_rd_lock(vq);
675                 vva = vhost_iova_to_vva(dev, vq, ra,
676                                         size, VHOST_ACCESS_RW);
677                 vhost_user_iotlb_rd_unlock(vq);
678
679                 return vva;
680         }
681
682         return qva_to_vva(dev, ra, size);
683 }
684
685 static uint64_t
686 log_addr_to_gpa(struct virtio_net *dev, struct vhost_virtqueue *vq)
687 {
688         uint64_t log_gpa;
689
690         vhost_user_iotlb_rd_lock(vq);
691         log_gpa = translate_log_addr(dev, vq, vq->ring_addrs.log_guest_addr);
692         vhost_user_iotlb_rd_unlock(vq);
693
694         return log_gpa;
695 }
696
697 static struct virtio_net *
698 translate_ring_addresses(struct virtio_net *dev, int vq_index)
699 {
700         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
701         struct vhost_vring_addr *addr = &vq->ring_addrs;
702         uint64_t len, expected_len;
703
704         if (addr->flags & (1 << VHOST_VRING_F_LOG)) {
705                 vq->log_guest_addr =
706                         log_addr_to_gpa(dev, vq);
707                 if (vq->log_guest_addr == 0) {
708                         VHOST_LOG_CONFIG(DEBUG,
709                                 "(%d) failed to map log_guest_addr.\n",
710                                 dev->vid);
711                         return dev;
712                 }
713         }
714
715         if (vq_is_packed(dev)) {
716                 len = sizeof(struct vring_packed_desc) * vq->size;
717                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
718                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
719                 if (vq->desc_packed == NULL ||
720                                 len != sizeof(struct vring_packed_desc) *
721                                 vq->size) {
722                         VHOST_LOG_CONFIG(DEBUG,
723                                 "(%d) failed to map desc_packed ring.\n",
724                                 dev->vid);
725                         return dev;
726                 }
727
728                 dev = numa_realloc(dev, vq_index);
729                 vq = dev->virtqueue[vq_index];
730                 addr = &vq->ring_addrs;
731
732                 len = sizeof(struct vring_packed_desc_event);
733                 vq->driver_event = (struct vring_packed_desc_event *)
734                                         (uintptr_t)ring_addr_to_vva(dev,
735                                         vq, addr->avail_user_addr, &len);
736                 if (vq->driver_event == NULL ||
737                                 len != sizeof(struct vring_packed_desc_event)) {
738                         VHOST_LOG_CONFIG(DEBUG,
739                                 "(%d) failed to find driver area address.\n",
740                                 dev->vid);
741                         return dev;
742                 }
743
744                 len = sizeof(struct vring_packed_desc_event);
745                 vq->device_event = (struct vring_packed_desc_event *)
746                                         (uintptr_t)ring_addr_to_vva(dev,
747                                         vq, addr->used_user_addr, &len);
748                 if (vq->device_event == NULL ||
749                                 len != sizeof(struct vring_packed_desc_event)) {
750                         VHOST_LOG_CONFIG(DEBUG,
751                                 "(%d) failed to find device area address.\n",
752                                 dev->vid);
753                         return dev;
754                 }
755
756                 vq->access_ok = 1;
757                 return dev;
758         }
759
760         /* The addresses are converted from QEMU virtual to Vhost virtual. */
761         if (vq->desc && vq->avail && vq->used)
762                 return dev;
763
764         len = sizeof(struct vring_desc) * vq->size;
765         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
766                         vq, addr->desc_user_addr, &len);
767         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
768                 VHOST_LOG_CONFIG(DEBUG,
769                         "(%d) failed to map desc ring.\n",
770                         dev->vid);
771                 return dev;
772         }
773
774         dev = numa_realloc(dev, vq_index);
775         vq = dev->virtqueue[vq_index];
776         addr = &vq->ring_addrs;
777
778         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
779         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
780                 len += sizeof(uint16_t);
781         expected_len = len;
782         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
783                         vq, addr->avail_user_addr, &len);
784         if (vq->avail == 0 || len != expected_len) {
785                 VHOST_LOG_CONFIG(DEBUG,
786                         "(%d) failed to map avail ring.\n",
787                         dev->vid);
788                 return dev;
789         }
790
791         len = sizeof(struct vring_used) +
792                 sizeof(struct vring_used_elem) * vq->size;
793         if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))
794                 len += sizeof(uint16_t);
795         expected_len = len;
796         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
797                         vq, addr->used_user_addr, &len);
798         if (vq->used == 0 || len != expected_len) {
799                 VHOST_LOG_CONFIG(DEBUG,
800                         "(%d) failed to map used ring.\n",
801                         dev->vid);
802                 return dev;
803         }
804
805         if (vq->last_used_idx != vq->used->idx) {
806                 VHOST_LOG_CONFIG(WARNING,
807                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
808                         "some packets maybe resent for Tx and dropped for Rx\n",
809                         vq->last_used_idx, vq->used->idx);
810                 vq->last_used_idx  = vq->used->idx;
811                 vq->last_avail_idx = vq->used->idx;
812         }
813
814         vq->access_ok = 1;
815
816         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address desc: %p\n",
817                         dev->vid, vq->desc);
818         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address avail: %p\n",
819                         dev->vid, vq->avail);
820         VHOST_LOG_CONFIG(DEBUG, "(%d) mapped address used: %p\n",
821                         dev->vid, vq->used);
822         VHOST_LOG_CONFIG(DEBUG, "(%d) log_guest_addr: %" PRIx64 "\n",
823                         dev->vid, vq->log_guest_addr);
824
825         return dev;
826 }
827
828 /*
829  * The virtio device sends us the desc, used and avail ring addresses.
830  * This function then converts these to our address space.
831  */
832 static int
833 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
834                         int main_fd __rte_unused)
835 {
836         struct virtio_net *dev = *pdev;
837         struct vhost_virtqueue *vq;
838         struct vhost_vring_addr *addr = &msg->payload.addr;
839         bool access_ok;
840
841         if (validate_msg_fds(msg, 0) != 0)
842                 return RTE_VHOST_MSG_RESULT_ERR;
843
844         if (dev->mem == NULL)
845                 return RTE_VHOST_MSG_RESULT_ERR;
846
847         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
848         vq = dev->virtqueue[msg->payload.addr.index];
849
850         access_ok = vq->access_ok;
851
852         /*
853          * Rings addresses should not be interpreted as long as the ring is not
854          * started and enabled
855          */
856         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
857
858         vring_invalidate(dev, vq);
859
860         if ((vq->enabled && (dev->features &
861                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
862                         access_ok) {
863                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
864                 if (!dev)
865                         return RTE_VHOST_MSG_RESULT_ERR;
866
867                 *pdev = dev;
868         }
869
870         return RTE_VHOST_MSG_RESULT_OK;
871 }
872
873 /*
874  * The virtio device sends us the available ring last used index.
875  */
876 static int
877 vhost_user_set_vring_base(struct virtio_net **pdev,
878                         struct VhostUserMsg *msg,
879                         int main_fd __rte_unused)
880 {
881         struct virtio_net *dev = *pdev;
882         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
883         uint64_t val = msg->payload.state.num;
884
885         if (validate_msg_fds(msg, 0) != 0)
886                 return RTE_VHOST_MSG_RESULT_ERR;
887
888         if (vq_is_packed(dev)) {
889                 /*
890                  * Bit[0:14]: avail index
891                  * Bit[15]: avail wrap counter
892                  */
893                 vq->last_avail_idx = val & 0x7fff;
894                 vq->avail_wrap_counter = !!(val & (0x1 << 15));
895                 /*
896                  * Set used index to same value as available one, as
897                  * their values should be the same since ring processing
898                  * was stopped at get time.
899                  */
900                 vq->last_used_idx = vq->last_avail_idx;
901                 vq->used_wrap_counter = vq->avail_wrap_counter;
902         } else {
903                 vq->last_used_idx = msg->payload.state.num;
904                 vq->last_avail_idx = msg->payload.state.num;
905         }
906
907         return RTE_VHOST_MSG_RESULT_OK;
908 }
909
910 static int
911 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
912                    uint64_t host_phys_addr, uint64_t size)
913 {
914         struct guest_page *page, *last_page;
915         struct guest_page *old_pages;
916
917         if (dev->nr_guest_pages == dev->max_guest_pages) {
918                 dev->max_guest_pages *= 2;
919                 old_pages = dev->guest_pages;
920                 dev->guest_pages = rte_realloc(dev->guest_pages,
921                                         dev->max_guest_pages * sizeof(*page),
922                                         RTE_CACHE_LINE_SIZE);
923                 if (dev->guest_pages == NULL) {
924                         VHOST_LOG_CONFIG(ERR, "cannot realloc guest_pages\n");
925                         rte_free(old_pages);
926                         return -1;
927                 }
928         }
929
930         if (dev->nr_guest_pages > 0) {
931                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
932                 /* merge if the two pages are continuous */
933                 if (host_phys_addr == last_page->host_phys_addr +
934                                       last_page->size) {
935                         last_page->size += size;
936                         return 0;
937                 }
938         }
939
940         page = &dev->guest_pages[dev->nr_guest_pages++];
941         page->guest_phys_addr = guest_phys_addr;
942         page->host_phys_addr  = host_phys_addr;
943         page->size = size;
944
945         return 0;
946 }
947
948 static int
949 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
950                 uint64_t page_size)
951 {
952         uint64_t reg_size = reg->size;
953         uint64_t host_user_addr  = reg->host_user_addr;
954         uint64_t guest_phys_addr = reg->guest_phys_addr;
955         uint64_t host_phys_addr;
956         uint64_t size;
957
958         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
959         size = page_size - (guest_phys_addr & (page_size - 1));
960         size = RTE_MIN(size, reg_size);
961
962         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
963                 return -1;
964
965         host_user_addr  += size;
966         guest_phys_addr += size;
967         reg_size -= size;
968
969         while (reg_size > 0) {
970                 size = RTE_MIN(reg_size, page_size);
971                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
972                                                   host_user_addr);
973                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
974                                 size) < 0)
975                         return -1;
976
977                 host_user_addr  += size;
978                 guest_phys_addr += size;
979                 reg_size -= size;
980         }
981
982         /* sort guest page array if over binary search threshold */
983         if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
984                 qsort((void *)dev->guest_pages, dev->nr_guest_pages,
985                         sizeof(struct guest_page), guest_page_addrcmp);
986         }
987
988         return 0;
989 }
990
991 #ifdef RTE_LIBRTE_VHOST_DEBUG
992 /* TODO: enable it only in debug mode? */
993 static void
994 dump_guest_pages(struct virtio_net *dev)
995 {
996         uint32_t i;
997         struct guest_page *page;
998
999         for (i = 0; i < dev->nr_guest_pages; i++) {
1000                 page = &dev->guest_pages[i];
1001
1002                 VHOST_LOG_CONFIG(INFO,
1003                         "guest physical page region %u\n"
1004                         "\t guest_phys_addr: %" PRIx64 "\n"
1005                         "\t host_phys_addr : %" PRIx64 "\n"
1006                         "\t size           : %" PRIx64 "\n",
1007                         i,
1008                         page->guest_phys_addr,
1009                         page->host_phys_addr,
1010                         page->size);
1011         }
1012 }
1013 #else
1014 #define dump_guest_pages(dev)
1015 #endif
1016
1017 static bool
1018 vhost_memory_changed(struct VhostUserMemory *new,
1019                      struct rte_vhost_memory *old)
1020 {
1021         uint32_t i;
1022
1023         if (new->nregions != old->nregions)
1024                 return true;
1025
1026         for (i = 0; i < new->nregions; ++i) {
1027                 VhostUserMemoryRegion *new_r = &new->regions[i];
1028                 struct rte_vhost_mem_region *old_r = &old->regions[i];
1029
1030                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
1031                         return true;
1032                 if (new_r->memory_size != old_r->size)
1033                         return true;
1034                 if (new_r->userspace_addr != old_r->guest_user_addr)
1035                         return true;
1036         }
1037
1038         return false;
1039 }
1040
1041 static int
1042 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
1043                         int main_fd)
1044 {
1045         struct virtio_net *dev = *pdev;
1046         struct VhostUserMemory *memory = &msg->payload.memory;
1047         struct rte_vhost_mem_region *reg;
1048         void *mmap_addr;
1049         uint64_t mmap_size;
1050         uint64_t mmap_offset;
1051         uint64_t alignment;
1052         uint32_t i;
1053         int populate;
1054         int fd;
1055
1056         if (validate_msg_fds(msg, memory->nregions) != 0)
1057                 return RTE_VHOST_MSG_RESULT_ERR;
1058
1059         if (memory->nregions > VHOST_MEMORY_MAX_NREGIONS) {
1060                 VHOST_LOG_CONFIG(ERR,
1061                         "too many memory regions (%u)\n", memory->nregions);
1062                 return RTE_VHOST_MSG_RESULT_ERR;
1063         }
1064
1065         if (dev->mem && !vhost_memory_changed(memory, dev->mem)) {
1066                 VHOST_LOG_CONFIG(INFO,
1067                         "(%d) memory regions not changed\n", dev->vid);
1068
1069                 close_msg_fds(msg);
1070
1071                 return RTE_VHOST_MSG_RESULT_OK;
1072         }
1073
1074         if (dev->mem) {
1075                 if (dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) {
1076                         struct rte_vdpa_device *vdpa_dev = dev->vdpa_dev;
1077
1078                         if (vdpa_dev && vdpa_dev->ops->dev_close)
1079                                 vdpa_dev->ops->dev_close(dev->vid);
1080                         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1081                 }
1082                 free_mem_region(dev);
1083                 rte_free(dev->mem);
1084                 dev->mem = NULL;
1085         }
1086
1087         /* Flush IOTLB cache as previous HVAs are now invalid */
1088         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
1089                 for (i = 0; i < dev->nr_vring; i++)
1090                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
1091
1092         dev->nr_guest_pages = 0;
1093         if (dev->guest_pages == NULL) {
1094                 dev->max_guest_pages = 8;
1095                 dev->guest_pages = rte_zmalloc(NULL,
1096                                         dev->max_guest_pages *
1097                                         sizeof(struct guest_page),
1098                                         RTE_CACHE_LINE_SIZE);
1099                 if (dev->guest_pages == NULL) {
1100                         VHOST_LOG_CONFIG(ERR,
1101                                 "(%d) failed to allocate memory "
1102                                 "for dev->guest_pages\n",
1103                                 dev->vid);
1104                         return RTE_VHOST_MSG_RESULT_ERR;
1105                 }
1106         }
1107
1108         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
1109                 sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
1110         if (dev->mem == NULL) {
1111                 VHOST_LOG_CONFIG(ERR,
1112                         "(%d) failed to allocate memory for dev->mem\n",
1113                         dev->vid);
1114                 return RTE_VHOST_MSG_RESULT_ERR;
1115         }
1116         dev->mem->nregions = memory->nregions;
1117
1118         for (i = 0; i < memory->nregions; i++) {
1119                 fd  = msg->fds[i];
1120                 reg = &dev->mem->regions[i];
1121
1122                 reg->guest_phys_addr = memory->regions[i].guest_phys_addr;
1123                 reg->guest_user_addr = memory->regions[i].userspace_addr;
1124                 reg->size            = memory->regions[i].memory_size;
1125                 reg->fd              = fd;
1126
1127                 mmap_offset = memory->regions[i].mmap_offset;
1128
1129                 /* Check for memory_size + mmap_offset overflow */
1130                 if (mmap_offset >= -reg->size) {
1131                         VHOST_LOG_CONFIG(ERR,
1132                                 "mmap_offset (%#"PRIx64") and memory_size "
1133                                 "(%#"PRIx64") overflow\n",
1134                                 mmap_offset, reg->size);
1135                         goto err_mmap;
1136                 }
1137
1138                 mmap_size = reg->size + mmap_offset;
1139
1140                 /* mmap() without flag of MAP_ANONYMOUS, should be called
1141                  * with length argument aligned with hugepagesz at older
1142                  * longterm version Linux, like 2.6.32 and 3.2.72, or
1143                  * mmap() will fail with EINVAL.
1144                  *
1145                  * to avoid failure, make sure in caller to keep length
1146                  * aligned.
1147                  */
1148                 alignment = get_blk_size(fd);
1149                 if (alignment == (uint64_t)-1) {
1150                         VHOST_LOG_CONFIG(ERR,
1151                                 "couldn't get hugepage size through fstat\n");
1152                         goto err_mmap;
1153                 }
1154                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
1155                 if (mmap_size == 0) {
1156                         /*
1157                          * It could happen if initial mmap_size + alignment
1158                          * overflows the sizeof uint64, which could happen if
1159                          * either mmap_size or alignment value is wrong.
1160                          *
1161                          * mmap() kernel implementation would return an error,
1162                          * but better catch it before and provide useful info
1163                          * in the logs.
1164                          */
1165                         VHOST_LOG_CONFIG(ERR, "mmap size (0x%" PRIx64 ") "
1166                                         "or alignment (0x%" PRIx64 ") is invalid\n",
1167                                         reg->size + mmap_offset, alignment);
1168                         goto err_mmap;
1169                 }
1170
1171                 populate = (dev->dequeue_zero_copy || dev->async_copy) ?
1172                         MAP_POPULATE : 0;
1173                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
1174                                  MAP_SHARED | populate, fd, 0);
1175
1176                 if (mmap_addr == MAP_FAILED) {
1177                         VHOST_LOG_CONFIG(ERR,
1178                                 "mmap region %u failed.\n", i);
1179                         goto err_mmap;
1180                 }
1181
1182                 reg->mmap_addr = mmap_addr;
1183                 reg->mmap_size = mmap_size;
1184                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
1185                                       mmap_offset;
1186
1187                 if (dev->dequeue_zero_copy || dev->async_copy)
1188                         if (add_guest_pages(dev, reg, alignment) < 0) {
1189                                 VHOST_LOG_CONFIG(ERR,
1190                                         "adding guest pages to region %u failed.\n",
1191                                         i);
1192                                 goto err_mmap;
1193                         }
1194
1195                 VHOST_LOG_CONFIG(INFO,
1196                         "guest memory region %u, size: 0x%" PRIx64 "\n"
1197                         "\t guest physical addr: 0x%" PRIx64 "\n"
1198                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
1199                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
1200                         "\t mmap addr : 0x%" PRIx64 "\n"
1201                         "\t mmap size : 0x%" PRIx64 "\n"
1202                         "\t mmap align: 0x%" PRIx64 "\n"
1203                         "\t mmap off  : 0x%" PRIx64 "\n",
1204                         i, reg->size,
1205                         reg->guest_phys_addr,
1206                         reg->guest_user_addr,
1207                         reg->host_user_addr,
1208                         (uint64_t)(uintptr_t)mmap_addr,
1209                         mmap_size,
1210                         alignment,
1211                         mmap_offset);
1212
1213                 if (dev->postcopy_listening) {
1214                         /*
1215                          * We haven't a better way right now than sharing
1216                          * DPDK's virtual address with Qemu, so that Qemu can
1217                          * retrieve the region offset when handling userfaults.
1218                          */
1219                         memory->regions[i].userspace_addr =
1220                                 reg->host_user_addr;
1221                 }
1222         }
1223         if (dev->postcopy_listening) {
1224                 /* Send the addresses back to qemu */
1225                 msg->fd_num = 0;
1226                 send_vhost_reply(main_fd, msg);
1227
1228                 /* Wait for qemu to acknolwedge it's got the addresses
1229                  * we've got to wait before we're allowed to generate faults.
1230                  */
1231                 VhostUserMsg ack_msg;
1232                 if (read_vhost_message(main_fd, &ack_msg) <= 0) {
1233                         VHOST_LOG_CONFIG(ERR,
1234                                 "Failed to read qemu ack on postcopy set-mem-table\n");
1235                         goto err_mmap;
1236                 }
1237
1238                 if (validate_msg_fds(&ack_msg, 0) != 0)
1239                         goto err_mmap;
1240
1241                 if (ack_msg.request.master != VHOST_USER_SET_MEM_TABLE) {
1242                         VHOST_LOG_CONFIG(ERR,
1243                                 "Bad qemu ack on postcopy set-mem-table (%d)\n",
1244                                 ack_msg.request.master);
1245                         goto err_mmap;
1246                 }
1247
1248                 /* Now userfault register and we can use the memory */
1249                 for (i = 0; i < memory->nregions; i++) {
1250 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
1251                         reg = &dev->mem->regions[i];
1252                         struct uffdio_register reg_struct;
1253
1254                         /*
1255                          * Let's register all the mmap'ed area to ensure
1256                          * alignment on page boundary.
1257                          */
1258                         reg_struct.range.start =
1259                                 (uint64_t)(uintptr_t)reg->mmap_addr;
1260                         reg_struct.range.len = reg->mmap_size;
1261                         reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
1262
1263                         if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER,
1264                                                 &reg_struct)) {
1265                                 VHOST_LOG_CONFIG(ERR,
1266                                         "Failed to register ufd for region %d: (ufd = %d) %s\n",
1267                                         i, dev->postcopy_ufd,
1268                                         strerror(errno));
1269                                 goto err_mmap;
1270                         }
1271                         VHOST_LOG_CONFIG(INFO,
1272                                 "\t userfaultfd registered for range : "
1273                                 "%" PRIx64 " - %" PRIx64 "\n",
1274                                 (uint64_t)reg_struct.range.start,
1275                                 (uint64_t)reg_struct.range.start +
1276                                 (uint64_t)reg_struct.range.len - 1);
1277 #else
1278                         goto err_mmap;
1279 #endif
1280                 }
1281         }
1282
1283         for (i = 0; i < dev->nr_vring; i++) {
1284                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1285
1286                 if (vq->desc || vq->avail || vq->used) {
1287                         /*
1288                          * If the memory table got updated, the ring addresses
1289                          * need to be translated again as virtual addresses have
1290                          * changed.
1291                          */
1292                         vring_invalidate(dev, vq);
1293
1294                         dev = translate_ring_addresses(dev, i);
1295                         if (!dev) {
1296                                 dev = *pdev;
1297                                 goto err_mmap;
1298                         }
1299
1300                         *pdev = dev;
1301                 }
1302         }
1303
1304         dump_guest_pages(dev);
1305
1306         return RTE_VHOST_MSG_RESULT_OK;
1307
1308 err_mmap:
1309         free_mem_region(dev);
1310         rte_free(dev->mem);
1311         dev->mem = NULL;
1312         return RTE_VHOST_MSG_RESULT_ERR;
1313 }
1314
1315 static bool
1316 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
1317 {
1318         bool rings_ok;
1319
1320         if (!vq)
1321                 return false;
1322
1323         if (vq_is_packed(dev))
1324                 rings_ok = vq->desc_packed && vq->driver_event &&
1325                         vq->device_event;
1326         else
1327                 rings_ok = vq->desc && vq->avail && vq->used;
1328
1329         return rings_ok &&
1330                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1331                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
1332                vq->enabled;
1333 }
1334
1335 #define VIRTIO_DEV_NUM_VQS_TO_BE_READY 2u
1336
1337 static int
1338 virtio_is_ready(struct virtio_net *dev)
1339 {
1340         struct vhost_virtqueue *vq;
1341         uint32_t i;
1342
1343         if (dev->nr_vring < VIRTIO_DEV_NUM_VQS_TO_BE_READY)
1344                 return 0;
1345
1346         for (i = 0; i < VIRTIO_DEV_NUM_VQS_TO_BE_READY; i++) {
1347                 vq = dev->virtqueue[i];
1348
1349                 if (!vq_is_ready(dev, vq))
1350                         return 0;
1351         }
1352
1353         if (!(dev->flags & VIRTIO_DEV_RUNNING))
1354                 VHOST_LOG_CONFIG(INFO,
1355                         "virtio is now ready for processing.\n");
1356         return 1;
1357 }
1358
1359 static void *
1360 inflight_mem_alloc(const char *name, size_t size, int *fd)
1361 {
1362         void *ptr;
1363         int mfd = -1;
1364         char fname[20] = "/tmp/memfd-XXXXXX";
1365
1366         *fd = -1;
1367 #ifdef MEMFD_SUPPORTED
1368         mfd = memfd_create(name, MFD_CLOEXEC);
1369 #else
1370         RTE_SET_USED(name);
1371 #endif
1372         if (mfd == -1) {
1373                 mfd = mkstemp(fname);
1374                 if (mfd == -1) {
1375                         VHOST_LOG_CONFIG(ERR,
1376                                 "failed to get inflight buffer fd\n");
1377                         return NULL;
1378                 }
1379
1380                 unlink(fname);
1381         }
1382
1383         if (ftruncate(mfd, size) == -1) {
1384                 VHOST_LOG_CONFIG(ERR,
1385                         "failed to alloc inflight buffer\n");
1386                 close(mfd);
1387                 return NULL;
1388         }
1389
1390         ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
1391         if (ptr == MAP_FAILED) {
1392                 VHOST_LOG_CONFIG(ERR,
1393                         "failed to mmap inflight buffer\n");
1394                 close(mfd);
1395                 return NULL;
1396         }
1397
1398         *fd = mfd;
1399         return ptr;
1400 }
1401
1402 static uint32_t
1403 get_pervq_shm_size_split(uint16_t queue_size)
1404 {
1405         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
1406                                   queue_size + sizeof(uint64_t) +
1407                                   sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
1408 }
1409
1410 static uint32_t
1411 get_pervq_shm_size_packed(uint16_t queue_size)
1412 {
1413         return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
1414                                   * queue_size + sizeof(uint64_t) +
1415                                   sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
1416                                   INFLIGHT_ALIGNMENT);
1417 }
1418
1419 static int
1420 vhost_user_get_inflight_fd(struct virtio_net **pdev,
1421                            VhostUserMsg *msg,
1422                            int main_fd __rte_unused)
1423 {
1424         struct rte_vhost_inflight_info_packed *inflight_packed;
1425         uint64_t pervq_inflight_size, mmap_size;
1426         uint16_t num_queues, queue_size;
1427         struct virtio_net *dev = *pdev;
1428         int fd, i, j;
1429         void *addr;
1430
1431         if (msg->size != sizeof(msg->payload.inflight)) {
1432                 VHOST_LOG_CONFIG(ERR,
1433                         "invalid get_inflight_fd message size is %d\n",
1434                         msg->size);
1435                 return RTE_VHOST_MSG_RESULT_ERR;
1436         }
1437
1438         if (dev->inflight_info == NULL) {
1439                 dev->inflight_info = calloc(1,
1440                                             sizeof(struct inflight_mem_info));
1441                 if (!dev->inflight_info) {
1442                         VHOST_LOG_CONFIG(ERR,
1443                                 "failed to alloc dev inflight area\n");
1444                         return RTE_VHOST_MSG_RESULT_ERR;
1445                 }
1446                 dev->inflight_info->fd = -1;
1447         }
1448
1449         num_queues = msg->payload.inflight.num_queues;
1450         queue_size = msg->payload.inflight.queue_size;
1451
1452         VHOST_LOG_CONFIG(INFO, "get_inflight_fd num_queues: %u\n",
1453                 msg->payload.inflight.num_queues);
1454         VHOST_LOG_CONFIG(INFO, "get_inflight_fd queue_size: %u\n",
1455                 msg->payload.inflight.queue_size);
1456
1457         if (vq_is_packed(dev))
1458                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1459         else
1460                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1461
1462         mmap_size = num_queues * pervq_inflight_size;
1463         addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
1464         if (!addr) {
1465                 VHOST_LOG_CONFIG(ERR,
1466                         "failed to alloc vhost inflight area\n");
1467                         msg->payload.inflight.mmap_size = 0;
1468                 return RTE_VHOST_MSG_RESULT_ERR;
1469         }
1470         memset(addr, 0, mmap_size);
1471
1472         if (dev->inflight_info->addr) {
1473                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1474                 dev->inflight_info->addr = NULL;
1475         }
1476
1477         if (dev->inflight_info->fd >= 0) {
1478                 close(dev->inflight_info->fd);
1479                 dev->inflight_info->fd = -1;
1480         }
1481
1482         dev->inflight_info->addr = addr;
1483         dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
1484         dev->inflight_info->fd = msg->fds[0] = fd;
1485         msg->payload.inflight.mmap_offset = 0;
1486         msg->fd_num = 1;
1487
1488         if (vq_is_packed(dev)) {
1489                 for (i = 0; i < num_queues; i++) {
1490                         inflight_packed =
1491                                 (struct rte_vhost_inflight_info_packed *)addr;
1492                         inflight_packed->used_wrap_counter = 1;
1493                         inflight_packed->old_used_wrap_counter = 1;
1494                         for (j = 0; j < queue_size; j++)
1495                                 inflight_packed->desc[j].next = j + 1;
1496                         addr = (void *)((char *)addr + pervq_inflight_size);
1497                 }
1498         }
1499
1500         VHOST_LOG_CONFIG(INFO,
1501                 "send inflight mmap_size: %"PRIu64"\n",
1502                 msg->payload.inflight.mmap_size);
1503         VHOST_LOG_CONFIG(INFO,
1504                 "send inflight mmap_offset: %"PRIu64"\n",
1505                 msg->payload.inflight.mmap_offset);
1506         VHOST_LOG_CONFIG(INFO,
1507                 "send inflight fd: %d\n", msg->fds[0]);
1508
1509         return RTE_VHOST_MSG_RESULT_REPLY;
1510 }
1511
1512 static int
1513 vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
1514                            int main_fd __rte_unused)
1515 {
1516         uint64_t mmap_size, mmap_offset;
1517         uint16_t num_queues, queue_size;
1518         struct virtio_net *dev = *pdev;
1519         uint32_t pervq_inflight_size;
1520         struct vhost_virtqueue *vq;
1521         void *addr;
1522         int fd, i;
1523
1524         fd = msg->fds[0];
1525         if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
1526                 VHOST_LOG_CONFIG(ERR,
1527                         "invalid set_inflight_fd message size is %d,fd is %d\n",
1528                         msg->size, fd);
1529                 return RTE_VHOST_MSG_RESULT_ERR;
1530         }
1531
1532         mmap_size = msg->payload.inflight.mmap_size;
1533         mmap_offset = msg->payload.inflight.mmap_offset;
1534         num_queues = msg->payload.inflight.num_queues;
1535         queue_size = msg->payload.inflight.queue_size;
1536
1537         if (vq_is_packed(dev))
1538                 pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
1539         else
1540                 pervq_inflight_size = get_pervq_shm_size_split(queue_size);
1541
1542         VHOST_LOG_CONFIG(INFO,
1543                 "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
1544         VHOST_LOG_CONFIG(INFO,
1545                 "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
1546         VHOST_LOG_CONFIG(INFO,
1547                 "set_inflight_fd num_queues: %u\n", num_queues);
1548         VHOST_LOG_CONFIG(INFO,
1549                 "set_inflight_fd queue_size: %u\n", queue_size);
1550         VHOST_LOG_CONFIG(INFO,
1551                 "set_inflight_fd fd: %d\n", fd);
1552         VHOST_LOG_CONFIG(INFO,
1553                 "set_inflight_fd pervq_inflight_size: %d\n",
1554                 pervq_inflight_size);
1555
1556         if (!dev->inflight_info) {
1557                 dev->inflight_info = calloc(1,
1558                                             sizeof(struct inflight_mem_info));
1559                 if (dev->inflight_info == NULL) {
1560                         VHOST_LOG_CONFIG(ERR,
1561                                 "failed to alloc dev inflight area\n");
1562                         return RTE_VHOST_MSG_RESULT_ERR;
1563                 }
1564                 dev->inflight_info->fd = -1;
1565         }
1566
1567         if (dev->inflight_info->addr) {
1568                 munmap(dev->inflight_info->addr, dev->inflight_info->size);
1569                 dev->inflight_info->addr = NULL;
1570         }
1571
1572         addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1573                     fd, mmap_offset);
1574         if (addr == MAP_FAILED) {
1575                 VHOST_LOG_CONFIG(ERR, "failed to mmap share memory.\n");
1576                 return RTE_VHOST_MSG_RESULT_ERR;
1577         }
1578
1579         if (dev->inflight_info->fd >= 0) {
1580                 close(dev->inflight_info->fd);
1581                 dev->inflight_info->fd = -1;
1582         }
1583
1584         dev->inflight_info->fd = fd;
1585         dev->inflight_info->addr = addr;
1586         dev->inflight_info->size = mmap_size;
1587
1588         for (i = 0; i < num_queues; i++) {
1589                 vq = dev->virtqueue[i];
1590                 if (vq_is_packed(dev)) {
1591                         vq->inflight_packed = addr;
1592                         vq->inflight_packed->desc_num = queue_size;
1593                 } else {
1594                         vq->inflight_split = addr;
1595                         vq->inflight_split->desc_num = queue_size;
1596                 }
1597                 addr = (void *)((char *)addr + pervq_inflight_size);
1598         }
1599
1600         return RTE_VHOST_MSG_RESULT_OK;
1601 }
1602
1603 static int
1604 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
1605                         int main_fd __rte_unused)
1606 {
1607         struct virtio_net *dev = *pdev;
1608         struct vhost_vring_file file;
1609         struct vhost_virtqueue *vq;
1610         int expected_fds;
1611
1612         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1613         if (validate_msg_fds(msg, expected_fds) != 0)
1614                 return RTE_VHOST_MSG_RESULT_ERR;
1615
1616         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1617         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1618                 file.fd = VIRTIO_INVALID_EVENTFD;
1619         else
1620                 file.fd = msg->fds[0];
1621         VHOST_LOG_CONFIG(INFO,
1622                 "vring call idx:%d file:%d\n", file.index, file.fd);
1623
1624         vq = dev->virtqueue[file.index];
1625
1626         if (vq->ready) {
1627                 vhost_user_notify_queue_state(dev, file.index, 0);
1628                 vq->ready = 0;
1629         }
1630
1631         if (vq->callfd >= 0)
1632                 close(vq->callfd);
1633
1634         vq->callfd = file.fd;
1635
1636         return RTE_VHOST_MSG_RESULT_OK;
1637 }
1638
1639 static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
1640                         struct VhostUserMsg *msg,
1641                         int main_fd __rte_unused)
1642 {
1643         int expected_fds;
1644
1645         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1646         if (validate_msg_fds(msg, expected_fds) != 0)
1647                 return RTE_VHOST_MSG_RESULT_ERR;
1648
1649         if (!(msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1650                 close(msg->fds[0]);
1651         VHOST_LOG_CONFIG(INFO, "not implemented\n");
1652
1653         return RTE_VHOST_MSG_RESULT_OK;
1654 }
1655
1656 static int
1657 resubmit_desc_compare(const void *a, const void *b)
1658 {
1659         const struct rte_vhost_resubmit_desc *desc0 = a;
1660         const struct rte_vhost_resubmit_desc *desc1 = b;
1661
1662         if (desc1->counter > desc0->counter)
1663                 return 1;
1664
1665         return -1;
1666 }
1667
1668 static int
1669 vhost_check_queue_inflights_split(struct virtio_net *dev,
1670                                   struct vhost_virtqueue *vq)
1671 {
1672         uint16_t i;
1673         uint16_t resubmit_num = 0, last_io, num;
1674         struct vring_used *used = vq->used;
1675         struct rte_vhost_resubmit_info *resubmit;
1676         struct rte_vhost_inflight_info_split *inflight_split;
1677
1678         if (!(dev->protocol_features &
1679             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1680                 return RTE_VHOST_MSG_RESULT_OK;
1681
1682         /* The frontend may still not support the inflight feature
1683          * although we negotiate the protocol feature.
1684          */
1685         if ((!vq->inflight_split))
1686                 return RTE_VHOST_MSG_RESULT_OK;
1687
1688         if (!vq->inflight_split->version) {
1689                 vq->inflight_split->version = INFLIGHT_VERSION;
1690                 return RTE_VHOST_MSG_RESULT_OK;
1691         }
1692
1693         if (vq->resubmit_inflight)
1694                 return RTE_VHOST_MSG_RESULT_OK;
1695
1696         inflight_split = vq->inflight_split;
1697         vq->global_counter = 0;
1698         last_io = inflight_split->last_inflight_io;
1699
1700         if (inflight_split->used_idx != used->idx) {
1701                 inflight_split->desc[last_io].inflight = 0;
1702                 rte_smp_mb();
1703                 inflight_split->used_idx = used->idx;
1704         }
1705
1706         for (i = 0; i < inflight_split->desc_num; i++) {
1707                 if (inflight_split->desc[i].inflight == 1)
1708                         resubmit_num++;
1709         }
1710
1711         vq->last_avail_idx += resubmit_num;
1712
1713         if (resubmit_num) {
1714                 resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1715                 if (!resubmit) {
1716                         VHOST_LOG_CONFIG(ERR,
1717                                 "failed to allocate memory for resubmit info.\n");
1718                         return RTE_VHOST_MSG_RESULT_ERR;
1719                 }
1720
1721                 resubmit->resubmit_list = calloc(resubmit_num,
1722                         sizeof(struct rte_vhost_resubmit_desc));
1723                 if (!resubmit->resubmit_list) {
1724                         VHOST_LOG_CONFIG(ERR,
1725                                 "failed to allocate memory for inflight desc.\n");
1726                         free(resubmit);
1727                         return RTE_VHOST_MSG_RESULT_ERR;
1728                 }
1729
1730                 num = 0;
1731                 for (i = 0; i < vq->inflight_split->desc_num; i++) {
1732                         if (vq->inflight_split->desc[i].inflight == 1) {
1733                                 resubmit->resubmit_list[num].index = i;
1734                                 resubmit->resubmit_list[num].counter =
1735                                         inflight_split->desc[i].counter;
1736                                 num++;
1737                         }
1738                 }
1739                 resubmit->resubmit_num = num;
1740
1741                 if (resubmit->resubmit_num > 1)
1742                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1743                               sizeof(struct rte_vhost_resubmit_desc),
1744                               resubmit_desc_compare);
1745
1746                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1747                 vq->resubmit_inflight = resubmit;
1748         }
1749
1750         return RTE_VHOST_MSG_RESULT_OK;
1751 }
1752
1753 static int
1754 vhost_check_queue_inflights_packed(struct virtio_net *dev,
1755                                    struct vhost_virtqueue *vq)
1756 {
1757         uint16_t i;
1758         uint16_t resubmit_num = 0, old_used_idx, num;
1759         struct rte_vhost_resubmit_info *resubmit;
1760         struct rte_vhost_inflight_info_packed *inflight_packed;
1761
1762         if (!(dev->protocol_features &
1763             (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
1764                 return RTE_VHOST_MSG_RESULT_OK;
1765
1766         /* The frontend may still not support the inflight feature
1767          * although we negotiate the protocol feature.
1768          */
1769         if ((!vq->inflight_packed))
1770                 return RTE_VHOST_MSG_RESULT_OK;
1771
1772         if (!vq->inflight_packed->version) {
1773                 vq->inflight_packed->version = INFLIGHT_VERSION;
1774                 return RTE_VHOST_MSG_RESULT_OK;
1775         }
1776
1777         if (vq->resubmit_inflight)
1778                 return RTE_VHOST_MSG_RESULT_OK;
1779
1780         inflight_packed = vq->inflight_packed;
1781         vq->global_counter = 0;
1782         old_used_idx = inflight_packed->old_used_idx;
1783
1784         if (inflight_packed->used_idx != old_used_idx) {
1785                 if (inflight_packed->desc[old_used_idx].inflight == 0) {
1786                         inflight_packed->old_used_idx =
1787                                 inflight_packed->used_idx;
1788                         inflight_packed->old_used_wrap_counter =
1789                                 inflight_packed->used_wrap_counter;
1790                         inflight_packed->old_free_head =
1791                                 inflight_packed->free_head;
1792                 } else {
1793                         inflight_packed->used_idx =
1794                                 inflight_packed->old_used_idx;
1795                         inflight_packed->used_wrap_counter =
1796                                 inflight_packed->old_used_wrap_counter;
1797                         inflight_packed->free_head =
1798                                 inflight_packed->old_free_head;
1799                 }
1800         }
1801
1802         for (i = 0; i < inflight_packed->desc_num; i++) {
1803                 if (inflight_packed->desc[i].inflight == 1)
1804                         resubmit_num++;
1805         }
1806
1807         if (resubmit_num) {
1808                 resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
1809                 if (resubmit == NULL) {
1810                         VHOST_LOG_CONFIG(ERR,
1811                                 "failed to allocate memory for resubmit info.\n");
1812                         return RTE_VHOST_MSG_RESULT_ERR;
1813                 }
1814
1815                 resubmit->resubmit_list = calloc(resubmit_num,
1816                         sizeof(struct rte_vhost_resubmit_desc));
1817                 if (resubmit->resubmit_list == NULL) {
1818                         VHOST_LOG_CONFIG(ERR,
1819                                 "failed to allocate memory for resubmit desc.\n");
1820                         free(resubmit);
1821                         return RTE_VHOST_MSG_RESULT_ERR;
1822                 }
1823
1824                 num = 0;
1825                 for (i = 0; i < inflight_packed->desc_num; i++) {
1826                         if (vq->inflight_packed->desc[i].inflight == 1) {
1827                                 resubmit->resubmit_list[num].index = i;
1828                                 resubmit->resubmit_list[num].counter =
1829                                         inflight_packed->desc[i].counter;
1830                                 num++;
1831                         }
1832                 }
1833                 resubmit->resubmit_num = num;
1834
1835                 if (resubmit->resubmit_num > 1)
1836                         qsort(resubmit->resubmit_list, resubmit->resubmit_num,
1837                               sizeof(struct rte_vhost_resubmit_desc),
1838                               resubmit_desc_compare);
1839
1840                 vq->global_counter = resubmit->resubmit_list[0].counter + 1;
1841                 vq->resubmit_inflight = resubmit;
1842         }
1843
1844         return RTE_VHOST_MSG_RESULT_OK;
1845 }
1846
1847 static int
1848 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
1849                         int main_fd __rte_unused)
1850 {
1851         struct virtio_net *dev = *pdev;
1852         struct vhost_vring_file file;
1853         struct vhost_virtqueue *vq;
1854         int expected_fds;
1855
1856         expected_fds = (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK) ? 0 : 1;
1857         if (validate_msg_fds(msg, expected_fds) != 0)
1858                 return RTE_VHOST_MSG_RESULT_ERR;
1859
1860         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1861         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1862                 file.fd = VIRTIO_INVALID_EVENTFD;
1863         else
1864                 file.fd = msg->fds[0];
1865         VHOST_LOG_CONFIG(INFO,
1866                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1867
1868         /* Interpret ring addresses only when ring is started. */
1869         dev = translate_ring_addresses(dev, file.index);
1870         if (!dev)
1871                 return RTE_VHOST_MSG_RESULT_ERR;
1872
1873         *pdev = dev;
1874
1875         vq = dev->virtqueue[file.index];
1876
1877         /*
1878          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1879          * the ring starts already enabled. Otherwise, it is enabled via
1880          * the SET_VRING_ENABLE message.
1881          */
1882         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
1883                 vq->enabled = 1;
1884                 if (dev->notify_ops->vring_state_changed)
1885                         dev->notify_ops->vring_state_changed(
1886                                 dev->vid, file.index, 1);
1887         }
1888
1889         if (vq->ready) {
1890                 vhost_user_notify_queue_state(dev, file.index, 0);
1891                 vq->ready = 0;
1892         }
1893
1894         if (vq->kickfd >= 0)
1895                 close(vq->kickfd);
1896         vq->kickfd = file.fd;
1897
1898         if (vq_is_packed(dev)) {
1899                 if (vhost_check_queue_inflights_packed(dev, vq)) {
1900                         VHOST_LOG_CONFIG(ERR,
1901                                 "failed to inflights for vq: %d\n", file.index);
1902                         return RTE_VHOST_MSG_RESULT_ERR;
1903                 }
1904         } else {
1905                 if (vhost_check_queue_inflights_split(dev, vq)) {
1906                         VHOST_LOG_CONFIG(ERR,
1907                                 "failed to inflights for vq: %d\n", file.index);
1908                         return RTE_VHOST_MSG_RESULT_ERR;
1909                 }
1910         }
1911
1912         return RTE_VHOST_MSG_RESULT_OK;
1913 }
1914
1915 static void
1916 free_zmbufs(struct vhost_virtqueue *vq)
1917 {
1918         drain_zmbuf_list(vq);
1919
1920         rte_free(vq->zmbufs);
1921 }
1922
1923 /*
1924  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1925  */
1926 static int
1927 vhost_user_get_vring_base(struct virtio_net **pdev,
1928                         struct VhostUserMsg *msg,
1929                         int main_fd __rte_unused)
1930 {
1931         struct virtio_net *dev = *pdev;
1932         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1933         uint64_t val;
1934
1935         if (validate_msg_fds(msg, 0) != 0)
1936                 return RTE_VHOST_MSG_RESULT_ERR;
1937
1938         /* We have to stop the queue (virtio) if it is running. */
1939         vhost_destroy_device_notify(dev);
1940
1941         dev->flags &= ~VIRTIO_DEV_READY;
1942         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1943
1944         /* Here we are safe to get the indexes */
1945         if (vq_is_packed(dev)) {
1946                 /*
1947                  * Bit[0:14]: avail index
1948                  * Bit[15]: avail wrap counter
1949                  */
1950                 val = vq->last_avail_idx & 0x7fff;
1951                 val |= vq->avail_wrap_counter << 15;
1952                 msg->payload.state.num = val;
1953         } else {
1954                 msg->payload.state.num = vq->last_avail_idx;
1955         }
1956
1957         VHOST_LOG_CONFIG(INFO,
1958                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1959                 msg->payload.state.num);
1960         /*
1961          * Based on current qemu vhost-user implementation, this message is
1962          * sent and only sent in vhost_vring_stop.
1963          * TODO: cleanup the vring, it isn't usable since here.
1964          */
1965         if (vq->kickfd >= 0)
1966                 close(vq->kickfd);
1967
1968         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1969
1970         if (vq->callfd >= 0)
1971                 close(vq->callfd);
1972
1973         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1974
1975         vq->signalled_used_valid = false;
1976
1977         if (dev->dequeue_zero_copy)
1978                 free_zmbufs(vq);
1979         if (vq_is_packed(dev)) {
1980                 rte_free(vq->shadow_used_packed);
1981                 vq->shadow_used_packed = NULL;
1982         } else {
1983                 rte_free(vq->shadow_used_split);
1984                 vq->shadow_used_split = NULL;
1985                 if (vq->async_pkts_pending)
1986                         rte_free(vq->async_pkts_pending);
1987                 if (vq->async_pending_info)
1988                         rte_free(vq->async_pending_info);
1989                 vq->async_pkts_pending = NULL;
1990                 vq->async_pending_info = NULL;
1991         }
1992
1993         rte_free(vq->batch_copy_elems);
1994         vq->batch_copy_elems = NULL;
1995
1996         msg->size = sizeof(msg->payload.state);
1997         msg->fd_num = 0;
1998
1999         vring_invalidate(dev, vq);
2000
2001         return RTE_VHOST_MSG_RESULT_REPLY;
2002 }
2003
2004 /*
2005  * when virtio queues are ready to work, qemu will send us to
2006  * enable the virtio queue pair.
2007  */
2008 static int
2009 vhost_user_set_vring_enable(struct virtio_net **pdev,
2010                         struct VhostUserMsg *msg,
2011                         int main_fd __rte_unused)
2012 {
2013         struct virtio_net *dev = *pdev;
2014         int enable = (int)msg->payload.state.num;
2015         int index = (int)msg->payload.state.index;
2016
2017         if (validate_msg_fds(msg, 0) != 0)
2018                 return RTE_VHOST_MSG_RESULT_ERR;
2019
2020         VHOST_LOG_CONFIG(INFO,
2021                 "set queue enable: %d to qp idx: %d\n",
2022                 enable, index);
2023
2024         if (!enable && dev->virtqueue[index]->async_registered) {
2025                 if (dev->virtqueue[index]->async_pkts_inflight_n) {
2026                         VHOST_LOG_CONFIG(ERR, "failed to disable vring. "
2027                         "async inflight packets must be completed first\n");
2028                         return RTE_VHOST_MSG_RESULT_ERR;
2029                 }
2030         }
2031
2032         /* On disable, rings have to be stopped being processed. */
2033         if (!enable && dev->dequeue_zero_copy)
2034                 drain_zmbuf_list(dev->virtqueue[index]);
2035
2036         dev->virtqueue[index]->enabled = enable;
2037
2038         return RTE_VHOST_MSG_RESULT_OK;
2039 }
2040
2041 static int
2042 vhost_user_get_protocol_features(struct virtio_net **pdev,
2043                         struct VhostUserMsg *msg,
2044                         int main_fd __rte_unused)
2045 {
2046         struct virtio_net *dev = *pdev;
2047         uint64_t features, protocol_features;
2048
2049         if (validate_msg_fds(msg, 0) != 0)
2050                 return RTE_VHOST_MSG_RESULT_ERR;
2051
2052         rte_vhost_driver_get_features(dev->ifname, &features);
2053         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
2054
2055         msg->payload.u64 = protocol_features;
2056         msg->size = sizeof(msg->payload.u64);
2057         msg->fd_num = 0;
2058
2059         return RTE_VHOST_MSG_RESULT_REPLY;
2060 }
2061
2062 static int
2063 vhost_user_set_protocol_features(struct virtio_net **pdev,
2064                         struct VhostUserMsg *msg,
2065                         int main_fd __rte_unused)
2066 {
2067         struct virtio_net *dev = *pdev;
2068         uint64_t protocol_features = msg->payload.u64;
2069         uint64_t slave_protocol_features = 0;
2070
2071         if (validate_msg_fds(msg, 0) != 0)
2072                 return RTE_VHOST_MSG_RESULT_ERR;
2073
2074         rte_vhost_driver_get_protocol_features(dev->ifname,
2075                         &slave_protocol_features);
2076         if (protocol_features & ~slave_protocol_features) {
2077                 VHOST_LOG_CONFIG(ERR,
2078                         "(%d) received invalid protocol features.\n",
2079                         dev->vid);
2080                 return RTE_VHOST_MSG_RESULT_ERR;
2081         }
2082
2083         dev->protocol_features = protocol_features;
2084         VHOST_LOG_CONFIG(INFO,
2085                 "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
2086                 dev->protocol_features);
2087
2088         return RTE_VHOST_MSG_RESULT_OK;
2089 }
2090
2091 static int
2092 vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
2093                         int main_fd __rte_unused)
2094 {
2095         struct virtio_net *dev = *pdev;
2096         int fd = msg->fds[0];
2097         uint64_t size, off;
2098         void *addr;
2099
2100         if (validate_msg_fds(msg, 1) != 0)
2101                 return RTE_VHOST_MSG_RESULT_ERR;
2102
2103         if (fd < 0) {
2104                 VHOST_LOG_CONFIG(ERR, "invalid log fd: %d\n", fd);
2105                 return RTE_VHOST_MSG_RESULT_ERR;
2106         }
2107
2108         if (msg->size != sizeof(VhostUserLog)) {
2109                 VHOST_LOG_CONFIG(ERR,
2110                         "invalid log base msg size: %"PRId32" != %d\n",
2111                         msg->size, (int)sizeof(VhostUserLog));
2112                 return RTE_VHOST_MSG_RESULT_ERR;
2113         }
2114
2115         size = msg->payload.log.mmap_size;
2116         off  = msg->payload.log.mmap_offset;
2117
2118         /* Check for mmap size and offset overflow. */
2119         if (off >= -size) {
2120                 VHOST_LOG_CONFIG(ERR,
2121                         "log offset %#"PRIx64" and log size %#"PRIx64" overflow\n",
2122                         off, size);
2123                 return RTE_VHOST_MSG_RESULT_ERR;
2124         }
2125
2126         VHOST_LOG_CONFIG(INFO,
2127                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
2128                 size, off);
2129
2130         /*
2131          * mmap from 0 to workaround a hugepage mmap bug: mmap will
2132          * fail when offset is not page size aligned.
2133          */
2134         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2135         close(fd);
2136         if (addr == MAP_FAILED) {
2137                 VHOST_LOG_CONFIG(ERR, "mmap log base failed!\n");
2138                 return RTE_VHOST_MSG_RESULT_ERR;
2139         }
2140
2141         /*
2142          * Free previously mapped log memory on occasionally
2143          * multiple VHOST_USER_SET_LOG_BASE.
2144          */
2145         if (dev->log_addr) {
2146                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
2147         }
2148         dev->log_addr = (uint64_t)(uintptr_t)addr;
2149         dev->log_base = dev->log_addr + off;
2150         dev->log_size = size;
2151
2152         /*
2153          * The spec is not clear about it (yet), but QEMU doesn't expect
2154          * any payload in the reply.
2155          */
2156         msg->size = 0;
2157         msg->fd_num = 0;
2158
2159         return RTE_VHOST_MSG_RESULT_REPLY;
2160 }
2161
2162 static int vhost_user_set_log_fd(struct virtio_net **pdev __rte_unused,
2163                         struct VhostUserMsg *msg,
2164                         int main_fd __rte_unused)
2165 {
2166         if (validate_msg_fds(msg, 1) != 0)
2167                 return RTE_VHOST_MSG_RESULT_ERR;
2168
2169         close(msg->fds[0]);
2170         VHOST_LOG_CONFIG(INFO, "not implemented.\n");
2171
2172         return RTE_VHOST_MSG_RESULT_OK;
2173 }
2174
2175 /*
2176  * An rarp packet is constructed and broadcasted to notify switches about
2177  * the new location of the migrated VM, so that packets from outside will
2178  * not be lost after migration.
2179  *
2180  * However, we don't actually "send" a rarp packet here, instead, we set
2181  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
2182  */
2183 static int
2184 vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
2185                         int main_fd __rte_unused)
2186 {
2187         struct virtio_net *dev = *pdev;
2188         uint8_t *mac = (uint8_t *)&msg->payload.u64;
2189         struct rte_vdpa_device *vdpa_dev;
2190
2191         if (validate_msg_fds(msg, 0) != 0)
2192                 return RTE_VHOST_MSG_RESULT_ERR;
2193
2194         VHOST_LOG_CONFIG(DEBUG,
2195                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
2196                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
2197         memcpy(dev->mac.addr_bytes, mac, 6);
2198
2199         /*
2200          * Set the flag to inject a RARP broadcast packet at
2201          * rte_vhost_dequeue_burst().
2202          *
2203          * __ATOMIC_RELEASE ordering is for making sure the mac is
2204          * copied before the flag is set.
2205          */
2206         __atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
2207         vdpa_dev = dev->vdpa_dev;
2208         if (vdpa_dev && vdpa_dev->ops->migration_done)
2209                 vdpa_dev->ops->migration_done(dev->vid);
2210
2211         return RTE_VHOST_MSG_RESULT_OK;
2212 }
2213
2214 static int
2215 vhost_user_net_set_mtu(struct virtio_net **pdev, struct VhostUserMsg *msg,
2216                         int main_fd __rte_unused)
2217 {
2218         struct virtio_net *dev = *pdev;
2219
2220         if (validate_msg_fds(msg, 0) != 0)
2221                 return RTE_VHOST_MSG_RESULT_ERR;
2222
2223         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
2224                         msg->payload.u64 > VIRTIO_MAX_MTU) {
2225                 VHOST_LOG_CONFIG(ERR, "Invalid MTU size (%"PRIu64")\n",
2226                                 msg->payload.u64);
2227
2228                 return RTE_VHOST_MSG_RESULT_ERR;
2229         }
2230
2231         dev->mtu = msg->payload.u64;
2232
2233         return RTE_VHOST_MSG_RESULT_OK;
2234 }
2235
2236 static int
2237 vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
2238                         int main_fd __rte_unused)
2239 {
2240         struct virtio_net *dev = *pdev;
2241         int fd = msg->fds[0];
2242
2243         if (validate_msg_fds(msg, 1) != 0)
2244                 return RTE_VHOST_MSG_RESULT_ERR;
2245
2246         if (fd < 0) {
2247                 VHOST_LOG_CONFIG(ERR,
2248                                 "Invalid file descriptor for slave channel (%d)\n",
2249                                 fd);
2250                 return RTE_VHOST_MSG_RESULT_ERR;
2251         }
2252
2253         if (dev->slave_req_fd >= 0)
2254                 close(dev->slave_req_fd);
2255
2256         dev->slave_req_fd = fd;
2257
2258         return RTE_VHOST_MSG_RESULT_OK;
2259 }
2260
2261 static int
2262 is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2263 {
2264         struct vhost_vring_addr *ra;
2265         uint64_t start, end, len;
2266
2267         start = imsg->iova;
2268         end = start + imsg->size;
2269
2270         ra = &vq->ring_addrs;
2271         len = sizeof(struct vring_desc) * vq->size;
2272         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2273                 return 1;
2274
2275         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
2276         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2277                 return 1;
2278
2279         len = sizeof(struct vring_used) +
2280                sizeof(struct vring_used_elem) * vq->size;
2281         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2282                 return 1;
2283
2284         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2285                 len = sizeof(uint64_t);
2286                 if (ra->log_guest_addr < end &&
2287                     (ra->log_guest_addr + len) > start)
2288                         return 1;
2289         }
2290
2291         return 0;
2292 }
2293
2294 static int
2295 is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
2296 {
2297         struct vhost_vring_addr *ra;
2298         uint64_t start, end, len;
2299
2300         start = imsg->iova;
2301         end = start + imsg->size;
2302
2303         ra = &vq->ring_addrs;
2304         len = sizeof(struct vring_packed_desc) * vq->size;
2305         if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
2306                 return 1;
2307
2308         len = sizeof(struct vring_packed_desc_event);
2309         if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
2310                 return 1;
2311
2312         len = sizeof(struct vring_packed_desc_event);
2313         if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
2314                 return 1;
2315
2316         if (ra->flags & (1 << VHOST_VRING_F_LOG)) {
2317                 len = sizeof(uint64_t);
2318                 if (ra->log_guest_addr < end &&
2319                     (ra->log_guest_addr + len) > start)
2320                         return 1;
2321         }
2322
2323         return 0;
2324 }
2325
2326 static int is_vring_iotlb(struct virtio_net *dev,
2327                           struct vhost_virtqueue *vq,
2328                           struct vhost_iotlb_msg *imsg)
2329 {
2330         if (vq_is_packed(dev))
2331                 return is_vring_iotlb_packed(vq, imsg);
2332         else
2333                 return is_vring_iotlb_split(vq, imsg);
2334 }
2335
2336 static int
2337 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
2338                         int main_fd __rte_unused)
2339 {
2340         struct virtio_net *dev = *pdev;
2341         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
2342         uint16_t i;
2343         uint64_t vva, len;
2344
2345         if (validate_msg_fds(msg, 0) != 0)
2346                 return RTE_VHOST_MSG_RESULT_ERR;
2347
2348         switch (imsg->type) {
2349         case VHOST_IOTLB_UPDATE:
2350                 len = imsg->size;
2351                 vva = qva_to_vva(dev, imsg->uaddr, &len);
2352                 if (!vva)
2353                         return RTE_VHOST_MSG_RESULT_ERR;
2354
2355                 for (i = 0; i < dev->nr_vring; i++) {
2356                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2357
2358                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
2359                                         len, imsg->perm);
2360
2361                         if (is_vring_iotlb(dev, vq, imsg))
2362                                 *pdev = dev = translate_ring_addresses(dev, i);
2363                 }
2364                 break;
2365         case VHOST_IOTLB_INVALIDATE:
2366                 for (i = 0; i < dev->nr_vring; i++) {
2367                         struct vhost_virtqueue *vq = dev->virtqueue[i];
2368
2369                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
2370                                         imsg->size);
2371
2372                         if (is_vring_iotlb(dev, vq, imsg))
2373                                 vring_invalidate(dev, vq);
2374                 }
2375                 break;
2376         default:
2377                 VHOST_LOG_CONFIG(ERR, "Invalid IOTLB message type (%d)\n",
2378                                 imsg->type);
2379                 return RTE_VHOST_MSG_RESULT_ERR;
2380         }
2381
2382         return RTE_VHOST_MSG_RESULT_OK;
2383 }
2384
2385 static int
2386 vhost_user_set_postcopy_advise(struct virtio_net **pdev,
2387                         struct VhostUserMsg *msg,
2388                         int main_fd __rte_unused)
2389 {
2390         struct virtio_net *dev = *pdev;
2391 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
2392         struct uffdio_api api_struct;
2393
2394         if (validate_msg_fds(msg, 0) != 0)
2395                 return RTE_VHOST_MSG_RESULT_ERR;
2396
2397         dev->postcopy_ufd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK);
2398
2399         if (dev->postcopy_ufd == -1) {
2400                 VHOST_LOG_CONFIG(ERR, "Userfaultfd not available: %s\n",
2401                         strerror(errno));
2402                 return RTE_VHOST_MSG_RESULT_ERR;
2403         }
2404         api_struct.api = UFFD_API;
2405         api_struct.features = 0;
2406         if (ioctl(dev->postcopy_ufd, UFFDIO_API, &api_struct)) {
2407                 VHOST_LOG_CONFIG(ERR, "UFFDIO_API ioctl failure: %s\n",
2408                         strerror(errno));
2409                 close(dev->postcopy_ufd);
2410                 dev->postcopy_ufd = -1;
2411                 return RTE_VHOST_MSG_RESULT_ERR;
2412         }
2413         msg->fds[0] = dev->postcopy_ufd;
2414         msg->fd_num = 1;
2415
2416         return RTE_VHOST_MSG_RESULT_REPLY;
2417 #else
2418         dev->postcopy_ufd = -1;
2419         msg->fd_num = 0;
2420
2421         return RTE_VHOST_MSG_RESULT_ERR;
2422 #endif
2423 }
2424
2425 static int
2426 vhost_user_set_postcopy_listen(struct virtio_net **pdev,
2427                         struct VhostUserMsg *msg __rte_unused,
2428                         int main_fd __rte_unused)
2429 {
2430         struct virtio_net *dev = *pdev;
2431
2432         if (validate_msg_fds(msg, 0) != 0)
2433                 return RTE_VHOST_MSG_RESULT_ERR;
2434
2435         if (dev->mem && dev->mem->nregions) {
2436                 VHOST_LOG_CONFIG(ERR,
2437                         "Regions already registered at postcopy-listen\n");
2438                 return RTE_VHOST_MSG_RESULT_ERR;
2439         }
2440         dev->postcopy_listening = 1;
2441
2442         return RTE_VHOST_MSG_RESULT_OK;
2443 }
2444
2445 static int
2446 vhost_user_postcopy_end(struct virtio_net **pdev, struct VhostUserMsg *msg,
2447                         int main_fd __rte_unused)
2448 {
2449         struct virtio_net *dev = *pdev;
2450
2451         if (validate_msg_fds(msg, 0) != 0)
2452                 return RTE_VHOST_MSG_RESULT_ERR;
2453
2454         dev->postcopy_listening = 0;
2455         if (dev->postcopy_ufd >= 0) {
2456                 close(dev->postcopy_ufd);
2457                 dev->postcopy_ufd = -1;
2458         }
2459
2460         msg->payload.u64 = 0;
2461         msg->size = sizeof(msg->payload.u64);
2462         msg->fd_num = 0;
2463
2464         return RTE_VHOST_MSG_RESULT_REPLY;
2465 }
2466
2467 typedef int (*vhost_message_handler_t)(struct virtio_net **pdev,
2468                                         struct VhostUserMsg *msg,
2469                                         int main_fd);
2470 static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
2471         [VHOST_USER_NONE] = NULL,
2472         [VHOST_USER_GET_FEATURES] = vhost_user_get_features,
2473         [VHOST_USER_SET_FEATURES] = vhost_user_set_features,
2474         [VHOST_USER_SET_OWNER] = vhost_user_set_owner,
2475         [VHOST_USER_RESET_OWNER] = vhost_user_reset_owner,
2476         [VHOST_USER_SET_MEM_TABLE] = vhost_user_set_mem_table,
2477         [VHOST_USER_SET_LOG_BASE] = vhost_user_set_log_base,
2478         [VHOST_USER_SET_LOG_FD] = vhost_user_set_log_fd,
2479         [VHOST_USER_SET_VRING_NUM] = vhost_user_set_vring_num,
2480         [VHOST_USER_SET_VRING_ADDR] = vhost_user_set_vring_addr,
2481         [VHOST_USER_SET_VRING_BASE] = vhost_user_set_vring_base,
2482         [VHOST_USER_GET_VRING_BASE] = vhost_user_get_vring_base,
2483         [VHOST_USER_SET_VRING_KICK] = vhost_user_set_vring_kick,
2484         [VHOST_USER_SET_VRING_CALL] = vhost_user_set_vring_call,
2485         [VHOST_USER_SET_VRING_ERR] = vhost_user_set_vring_err,
2486         [VHOST_USER_GET_PROTOCOL_FEATURES] = vhost_user_get_protocol_features,
2487         [VHOST_USER_SET_PROTOCOL_FEATURES] = vhost_user_set_protocol_features,
2488         [VHOST_USER_GET_QUEUE_NUM] = vhost_user_get_queue_num,
2489         [VHOST_USER_SET_VRING_ENABLE] = vhost_user_set_vring_enable,
2490         [VHOST_USER_SEND_RARP] = vhost_user_send_rarp,
2491         [VHOST_USER_NET_SET_MTU] = vhost_user_net_set_mtu,
2492         [VHOST_USER_SET_SLAVE_REQ_FD] = vhost_user_set_req_fd,
2493         [VHOST_USER_IOTLB_MSG] = vhost_user_iotlb_msg,
2494         [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
2495         [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
2496         [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
2497         [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
2498         [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
2499 };
2500
2501 /* return bytes# of read on success or negative val on failure. */
2502 static int
2503 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
2504 {
2505         int ret;
2506
2507         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
2508                 msg->fds, VHOST_MEMORY_MAX_NREGIONS, &msg->fd_num);
2509         if (ret <= 0) {
2510                 return ret;
2511         } else if (ret != VHOST_USER_HDR_SIZE) {
2512                 VHOST_LOG_CONFIG(ERR, "Unexpected header size read\n");
2513                 close_msg_fds(msg);
2514                 return -1;
2515         }
2516
2517         if (msg->size) {
2518                 if (msg->size > sizeof(msg->payload)) {
2519                         VHOST_LOG_CONFIG(ERR,
2520                                 "invalid msg size: %d\n", msg->size);
2521                         return -1;
2522                 }
2523                 ret = read(sockfd, &msg->payload, msg->size);
2524                 if (ret <= 0)
2525                         return ret;
2526                 if (ret != (int)msg->size) {
2527                         VHOST_LOG_CONFIG(ERR,
2528                                 "read control message failed\n");
2529                         return -1;
2530                 }
2531         }
2532
2533         return ret;
2534 }
2535
2536 static int
2537 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
2538 {
2539         if (!msg)
2540                 return 0;
2541
2542         return send_fd_message(sockfd, (char *)msg,
2543                 VHOST_USER_HDR_SIZE + msg->size, msg->fds, msg->fd_num);
2544 }
2545
2546 static int
2547 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
2548 {
2549         if (!msg)
2550                 return 0;
2551
2552         msg->flags &= ~VHOST_USER_VERSION_MASK;
2553         msg->flags &= ~VHOST_USER_NEED_REPLY;
2554         msg->flags |= VHOST_USER_VERSION;
2555         msg->flags |= VHOST_USER_REPLY_MASK;
2556
2557         return send_vhost_message(sockfd, msg);
2558 }
2559
2560 static int
2561 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg)
2562 {
2563         int ret;
2564
2565         if (msg->flags & VHOST_USER_NEED_REPLY)
2566                 rte_spinlock_lock(&dev->slave_req_lock);
2567
2568         ret = send_vhost_message(dev->slave_req_fd, msg);
2569         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
2570                 rte_spinlock_unlock(&dev->slave_req_lock);
2571
2572         return ret;
2573 }
2574
2575 /*
2576  * Allocate a queue pair if it hasn't been allocated yet
2577  */
2578 static int
2579 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
2580                         struct VhostUserMsg *msg)
2581 {
2582         uint32_t vring_idx;
2583
2584         switch (msg->request.master) {
2585         case VHOST_USER_SET_VRING_KICK:
2586         case VHOST_USER_SET_VRING_CALL:
2587         case VHOST_USER_SET_VRING_ERR:
2588                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
2589                 break;
2590         case VHOST_USER_SET_VRING_NUM:
2591         case VHOST_USER_SET_VRING_BASE:
2592         case VHOST_USER_SET_VRING_ENABLE:
2593                 vring_idx = msg->payload.state.index;
2594                 break;
2595         case VHOST_USER_SET_VRING_ADDR:
2596                 vring_idx = msg->payload.addr.index;
2597                 break;
2598         default:
2599                 return 0;
2600         }
2601
2602         if (vring_idx >= VHOST_MAX_VRING) {
2603                 VHOST_LOG_CONFIG(ERR,
2604                         "invalid vring index: %u\n", vring_idx);
2605                 return -1;
2606         }
2607
2608         if (dev->virtqueue[vring_idx])
2609                 return 0;
2610
2611         return alloc_vring_queue(dev, vring_idx);
2612 }
2613
2614 static void
2615 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
2616 {
2617         unsigned int i = 0;
2618         unsigned int vq_num = 0;
2619
2620         while (vq_num < dev->nr_vring) {
2621                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2622
2623                 if (vq) {
2624                         rte_spinlock_lock(&vq->access_lock);
2625                         vq_num++;
2626                 }
2627                 i++;
2628         }
2629 }
2630
2631 static void
2632 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
2633 {
2634         unsigned int i = 0;
2635         unsigned int vq_num = 0;
2636
2637         while (vq_num < dev->nr_vring) {
2638                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2639
2640                 if (vq) {
2641                         rte_spinlock_unlock(&vq->access_lock);
2642                         vq_num++;
2643                 }
2644                 i++;
2645         }
2646 }
2647
2648 int
2649 vhost_user_msg_handler(int vid, int fd)
2650 {
2651         struct virtio_net *dev;
2652         struct VhostUserMsg msg;
2653         struct rte_vdpa_device *vdpa_dev;
2654         int ret;
2655         int unlock_required = 0;
2656         bool handled;
2657         int request;
2658         uint32_t i;
2659
2660         dev = get_device(vid);
2661         if (dev == NULL)
2662                 return -1;
2663
2664         if (!dev->notify_ops) {
2665                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
2666                 if (!dev->notify_ops) {
2667                         VHOST_LOG_CONFIG(ERR,
2668                                 "failed to get callback ops for driver %s\n",
2669                                 dev->ifname);
2670                         return -1;
2671                 }
2672         }
2673
2674         ret = read_vhost_message(fd, &msg);
2675         if (ret <= 0) {
2676                 if (ret < 0)
2677                         VHOST_LOG_CONFIG(ERR,
2678                                 "vhost read message failed\n");
2679                 else
2680                         VHOST_LOG_CONFIG(INFO,
2681                                 "vhost peer closed\n");
2682
2683                 return -1;
2684         }
2685
2686         ret = 0;
2687         request = msg.request.master;
2688         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
2689                         vhost_message_str[request]) {
2690                 if (request != VHOST_USER_IOTLB_MSG)
2691                         VHOST_LOG_CONFIG(INFO, "read message %s\n",
2692                                 vhost_message_str[request]);
2693                 else
2694                         VHOST_LOG_CONFIG(DEBUG, "read message %s\n",
2695                                 vhost_message_str[request]);
2696         } else {
2697                 VHOST_LOG_CONFIG(DEBUG, "External request %d\n", request);
2698         }
2699
2700         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
2701         if (ret < 0) {
2702                 VHOST_LOG_CONFIG(ERR,
2703                         "failed to alloc queue\n");
2704                 return -1;
2705         }
2706
2707         /*
2708          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
2709          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
2710          * and device is destroyed. destroy_device waits for queues to be
2711          * inactive, so it is safe. Otherwise taking the access_lock
2712          * would cause a dead lock.
2713          */
2714         switch (request) {
2715         case VHOST_USER_SET_FEATURES:
2716         case VHOST_USER_SET_PROTOCOL_FEATURES:
2717         case VHOST_USER_SET_OWNER:
2718         case VHOST_USER_SET_MEM_TABLE:
2719         case VHOST_USER_SET_LOG_BASE:
2720         case VHOST_USER_SET_LOG_FD:
2721         case VHOST_USER_SET_VRING_NUM:
2722         case VHOST_USER_SET_VRING_ADDR:
2723         case VHOST_USER_SET_VRING_BASE:
2724         case VHOST_USER_SET_VRING_KICK:
2725         case VHOST_USER_SET_VRING_CALL:
2726         case VHOST_USER_SET_VRING_ERR:
2727         case VHOST_USER_SET_VRING_ENABLE:
2728         case VHOST_USER_SEND_RARP:
2729         case VHOST_USER_NET_SET_MTU:
2730         case VHOST_USER_SET_SLAVE_REQ_FD:
2731                 if (!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2732                         vhost_user_lock_all_queue_pairs(dev);
2733                         unlock_required = 1;
2734                 }
2735                 break;
2736         default:
2737                 break;
2738
2739         }
2740
2741         handled = false;
2742         if (dev->extern_ops.pre_msg_handle) {
2743                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
2744                                 (void *)&msg);
2745                 switch (ret) {
2746                 case RTE_VHOST_MSG_RESULT_REPLY:
2747                         send_vhost_reply(fd, &msg);
2748                         /* Fall-through */
2749                 case RTE_VHOST_MSG_RESULT_ERR:
2750                 case RTE_VHOST_MSG_RESULT_OK:
2751                         handled = true;
2752                         goto skip_to_post_handle;
2753                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2754                 default:
2755                         break;
2756                 }
2757         }
2758
2759         if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
2760                 if (!vhost_message_handlers[request])
2761                         goto skip_to_post_handle;
2762                 ret = vhost_message_handlers[request](&dev, &msg, fd);
2763
2764                 switch (ret) {
2765                 case RTE_VHOST_MSG_RESULT_ERR:
2766                         VHOST_LOG_CONFIG(ERR,
2767                                 "Processing %s failed.\n",
2768                                 vhost_message_str[request]);
2769                         handled = true;
2770                         break;
2771                 case RTE_VHOST_MSG_RESULT_OK:
2772                         VHOST_LOG_CONFIG(DEBUG,
2773                                 "Processing %s succeeded.\n",
2774                                 vhost_message_str[request]);
2775                         handled = true;
2776                         break;
2777                 case RTE_VHOST_MSG_RESULT_REPLY:
2778                         VHOST_LOG_CONFIG(DEBUG,
2779                                 "Processing %s succeeded and needs reply.\n",
2780                                 vhost_message_str[request]);
2781                         send_vhost_reply(fd, &msg);
2782                         handled = true;
2783                         break;
2784                 default:
2785                         break;
2786                 }
2787         }
2788
2789 skip_to_post_handle:
2790         if (ret != RTE_VHOST_MSG_RESULT_ERR &&
2791                         dev->extern_ops.post_msg_handle) {
2792                 ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
2793                                 (void *)&msg);
2794                 switch (ret) {
2795                 case RTE_VHOST_MSG_RESULT_REPLY:
2796                         send_vhost_reply(fd, &msg);
2797                         /* Fall-through */
2798                 case RTE_VHOST_MSG_RESULT_ERR:
2799                 case RTE_VHOST_MSG_RESULT_OK:
2800                         handled = true;
2801                 case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
2802                 default:
2803                         break;
2804                 }
2805         }
2806
2807         if (unlock_required)
2808                 vhost_user_unlock_all_queue_pairs(dev);
2809
2810         /* If message was not handled at this stage, treat it as an error */
2811         if (!handled) {
2812                 VHOST_LOG_CONFIG(ERR,
2813                         "vhost message (req: %d) was not handled.\n", request);
2814                 close_msg_fds(&msg);
2815                 ret = RTE_VHOST_MSG_RESULT_ERR;
2816         }
2817
2818         /*
2819          * If the request required a reply that was already sent,
2820          * this optional reply-ack won't be sent as the
2821          * VHOST_USER_NEED_REPLY was cleared in send_vhost_reply().
2822          */
2823         if (msg.flags & VHOST_USER_NEED_REPLY) {
2824                 msg.payload.u64 = ret == RTE_VHOST_MSG_RESULT_ERR;
2825                 msg.size = sizeof(msg.payload.u64);
2826                 msg.fd_num = 0;
2827                 send_vhost_reply(fd, &msg);
2828         } else if (ret == RTE_VHOST_MSG_RESULT_ERR) {
2829                 VHOST_LOG_CONFIG(ERR,
2830                         "vhost message handling failed.\n");
2831                 return -1;
2832         }
2833
2834         for (i = 0; i < dev->nr_vring; i++) {
2835                 struct vhost_virtqueue *vq = dev->virtqueue[i];
2836                 bool cur_ready = vq_is_ready(dev, vq);
2837
2838                 if (cur_ready != (vq && vq->ready)) {
2839                         vhost_user_notify_queue_state(dev, i, cur_ready);
2840                         vq->ready = cur_ready;
2841                 }
2842         }
2843
2844
2845         if (!(dev->flags & VIRTIO_DEV_READY) && virtio_is_ready(dev)) {
2846                 dev->flags |= VIRTIO_DEV_READY;
2847
2848                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
2849                         if (dev->dequeue_zero_copy) {
2850                                 VHOST_LOG_CONFIG(INFO,
2851                                                 "dequeue zero copy is enabled\n");
2852                         }
2853
2854                         if (dev->notify_ops->new_device(dev->vid) == 0)
2855                                 dev->flags |= VIRTIO_DEV_RUNNING;
2856                 }
2857         }
2858
2859         vdpa_dev = dev->vdpa_dev;
2860         if (vdpa_dev && virtio_is_ready(dev) &&
2861             !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED)) {
2862                 if (vdpa_dev->ops->dev_conf)
2863                         vdpa_dev->ops->dev_conf(dev->vid);
2864                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
2865         }
2866
2867         return 0;
2868 }
2869
2870 static int process_slave_message_reply(struct virtio_net *dev,
2871                                        const struct VhostUserMsg *msg)
2872 {
2873         struct VhostUserMsg msg_reply;
2874         int ret;
2875
2876         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
2877                 return 0;
2878
2879         ret = read_vhost_message(dev->slave_req_fd, &msg_reply);
2880         if (ret <= 0) {
2881                 if (ret < 0)
2882                         VHOST_LOG_CONFIG(ERR,
2883                                 "vhost read slave message reply failed\n");
2884                 else
2885                         VHOST_LOG_CONFIG(INFO,
2886                                 "vhost peer closed\n");
2887                 ret = -1;
2888                 goto out;
2889         }
2890
2891         ret = 0;
2892         if (msg_reply.request.slave != msg->request.slave) {
2893                 VHOST_LOG_CONFIG(ERR,
2894                         "Received unexpected msg type (%u), expected %u\n",
2895                         msg_reply.request.slave, msg->request.slave);
2896                 ret = -1;
2897                 goto out;
2898         }
2899
2900         ret = msg_reply.payload.u64 ? -1 : 0;
2901
2902 out:
2903         rte_spinlock_unlock(&dev->slave_req_lock);
2904         return ret;
2905 }
2906
2907 int
2908 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
2909 {
2910         int ret;
2911         struct VhostUserMsg msg = {
2912                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
2913                 .flags = VHOST_USER_VERSION,
2914                 .size = sizeof(msg.payload.iotlb),
2915                 .payload.iotlb = {
2916                         .iova = iova,
2917                         .perm = perm,
2918                         .type = VHOST_IOTLB_MISS,
2919                 },
2920         };
2921
2922         ret = send_vhost_message(dev->slave_req_fd, &msg);
2923         if (ret < 0) {
2924                 VHOST_LOG_CONFIG(ERR,
2925                                 "Failed to send IOTLB miss message (%d)\n",
2926                                 ret);
2927                 return ret;
2928         }
2929
2930         return 0;
2931 }
2932
2933 static int
2934 vhost_user_slave_config_change(struct virtio_net *dev, bool need_reply)
2935 {
2936         int ret;
2937         struct VhostUserMsg msg = {
2938                 .request.slave = VHOST_USER_SLAVE_CONFIG_CHANGE_MSG,
2939                 .flags = VHOST_USER_VERSION,
2940                 .size = 0,
2941         };
2942
2943         if (need_reply)
2944                 msg.flags |= VHOST_USER_NEED_REPLY;
2945
2946         ret = send_vhost_slave_message(dev, &msg);
2947         if (ret < 0) {
2948                 VHOST_LOG_CONFIG(ERR,
2949                                 "Failed to send config change (%d)\n",
2950                                 ret);
2951                 return ret;
2952         }
2953
2954         return process_slave_message_reply(dev, &msg);
2955 }
2956
2957 int
2958 rte_vhost_slave_config_change(int vid, bool need_reply)
2959 {
2960         struct virtio_net *dev;
2961
2962         dev = get_device(vid);
2963         if (!dev)
2964                 return -ENODEV;
2965
2966         return vhost_user_slave_config_change(dev, need_reply);
2967 }
2968
2969 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
2970                                                     int index, int fd,
2971                                                     uint64_t offset,
2972                                                     uint64_t size)
2973 {
2974         int ret;
2975         struct VhostUserMsg msg = {
2976                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
2977                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
2978                 .size = sizeof(msg.payload.area),
2979                 .payload.area = {
2980                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
2981                         .size = size,
2982                         .offset = offset,
2983                 },
2984         };
2985
2986         if (fd < 0)
2987                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
2988         else {
2989                 msg.fds[0] = fd;
2990                 msg.fd_num = 1;
2991         }
2992
2993         ret = send_vhost_slave_message(dev, &msg);
2994         if (ret < 0) {
2995                 VHOST_LOG_CONFIG(ERR,
2996                         "Failed to set host notifier (%d)\n", ret);
2997                 return ret;
2998         }
2999
3000         return process_slave_message_reply(dev, &msg);
3001 }
3002
3003 int rte_vhost_host_notifier_ctrl(int vid, uint16_t qid, bool enable)
3004 {
3005         struct virtio_net *dev;
3006         struct rte_vdpa_device *vdpa_dev;
3007         int vfio_device_fd, ret = 0;
3008         uint64_t offset, size;
3009         unsigned int i, q_start, q_last;
3010
3011         dev = get_device(vid);
3012         if (!dev)
3013                 return -ENODEV;
3014
3015         vdpa_dev = dev->vdpa_dev;
3016         if (vdpa_dev == NULL)
3017                 return -ENODEV;
3018
3019         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
3020             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
3021             !(dev->protocol_features &
3022                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
3023             !(dev->protocol_features &
3024                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
3025             !(dev->protocol_features &
3026                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
3027                 return -ENOTSUP;
3028
3029         if (qid == RTE_VHOST_QUEUE_ALL) {
3030                 q_start = 0;
3031                 q_last = dev->nr_vring - 1;
3032         } else {
3033                 if (qid >= dev->nr_vring)
3034                         return -EINVAL;
3035                 q_start = qid;
3036                 q_last = qid;
3037         }
3038
3039         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
3040         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
3041
3042         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
3043         if (vfio_device_fd < 0)
3044                 return -ENOTSUP;
3045
3046         if (enable) {
3047                 for (i = q_start; i <= q_last; i++) {
3048                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
3049                                         &size) < 0) {
3050                                 ret = -ENOTSUP;
3051                                 goto disable;
3052                         }
3053
3054                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
3055                                         vfio_device_fd, offset, size) < 0) {
3056                                 ret = -EFAULT;
3057                                 goto disable;
3058                         }
3059                 }
3060         } else {
3061 disable:
3062                 for (i = q_start; i <= q_last; i++) {
3063                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
3064                                         0, 0);
3065                 }
3066         }
3067
3068         return ret;
3069 }