vhost: unify struct VhostUserMsg usage
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
71         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
72 };
73
74 static uint64_t
75 get_blk_size(int fd)
76 {
77         struct stat stat;
78         int ret;
79
80         ret = fstat(fd, &stat);
81         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
82 }
83
84 static void
85 free_mem_region(struct virtio_net *dev)
86 {
87         uint32_t i;
88         struct rte_vhost_mem_region *reg;
89
90         if (!dev || !dev->mem)
91                 return;
92
93         for (i = 0; i < dev->mem->nregions; i++) {
94                 reg = &dev->mem->regions[i];
95                 if (reg->host_user_addr) {
96                         munmap(reg->mmap_addr, reg->mmap_size);
97                         close(reg->fd);
98                 }
99         }
100 }
101
102 void
103 vhost_backend_cleanup(struct virtio_net *dev)
104 {
105         if (dev->mem) {
106                 free_mem_region(dev);
107                 rte_free(dev->mem);
108                 dev->mem = NULL;
109         }
110
111         free(dev->guest_pages);
112         dev->guest_pages = NULL;
113
114         if (dev->log_addr) {
115                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
116                 dev->log_addr = 0;
117         }
118
119         if (dev->slave_req_fd >= 0) {
120                 close(dev->slave_req_fd);
121                 dev->slave_req_fd = -1;
122         }
123 }
124
125 /*
126  * This function just returns success at the moment unless
127  * the device hasn't been initialised.
128  */
129 static int
130 vhost_user_set_owner(void)
131 {
132         return 0;
133 }
134
135 static int
136 vhost_user_reset_owner(struct virtio_net *dev)
137 {
138         vhost_destroy_device_notify(dev);
139
140         cleanup_device(dev, 0);
141         reset_device(dev);
142         return 0;
143 }
144
145 /*
146  * The features that we support are requested.
147  */
148 static uint64_t
149 vhost_user_get_features(struct virtio_net *dev)
150 {
151         uint64_t features = 0;
152
153         rte_vhost_driver_get_features(dev->ifname, &features);
154         return features;
155 }
156
157 /*
158  * The queue number that we support are requested.
159  */
160 static uint32_t
161 vhost_user_get_queue_num(struct virtio_net *dev)
162 {
163         uint32_t queue_num = 0;
164
165         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
166         return queue_num;
167 }
168
169 /*
170  * We receive the negotiated features supported by us and the virtio device.
171  */
172 static int
173 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
174 {
175         uint64_t vhost_features = 0;
176         struct rte_vdpa_device *vdpa_dev;
177         int did = -1;
178
179         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
180         if (features & ~vhost_features) {
181                 RTE_LOG(ERR, VHOST_CONFIG,
182                         "(%d) received invalid negotiated features.\n",
183                         dev->vid);
184                 return -1;
185         }
186
187         if (dev->flags & VIRTIO_DEV_RUNNING) {
188                 if (dev->features == features)
189                         return 0;
190
191                 /*
192                  * Error out if master tries to change features while device is
193                  * in running state. The exception being VHOST_F_LOG_ALL, which
194                  * is enabled when the live-migration starts.
195                  */
196                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
197                         RTE_LOG(ERR, VHOST_CONFIG,
198                                 "(%d) features changed while device is running.\n",
199                                 dev->vid);
200                         return -1;
201                 }
202
203                 if (dev->notify_ops->features_changed)
204                         dev->notify_ops->features_changed(dev->vid, features);
205         }
206
207         dev->features = features;
208         if (dev->features &
209                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
210                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
211         } else {
212                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
213         }
214         VHOST_LOG_DEBUG(VHOST_CONFIG,
215                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
216                 dev->vid,
217                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
218                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
219
220         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
221             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
222                 /*
223                  * Remove all but first queue pair if MQ hasn't been
224                  * negotiated. This is safe because the device is not
225                  * running at this stage.
226                  */
227                 while (dev->nr_vring > 2) {
228                         struct vhost_virtqueue *vq;
229
230                         vq = dev->virtqueue[--dev->nr_vring];
231                         if (!vq)
232                                 continue;
233
234                         dev->virtqueue[dev->nr_vring] = NULL;
235                         cleanup_vq(vq, 1);
236                         free_vq(dev, vq);
237                 }
238         }
239
240         did = dev->vdpa_dev_id;
241         vdpa_dev = rte_vdpa_get_device(did);
242         if (vdpa_dev && vdpa_dev->ops->set_features)
243                 vdpa_dev->ops->set_features(dev->vid);
244
245         return 0;
246 }
247
248 /*
249  * The virtio device sends us the size of the descriptor ring.
250  */
251 static int
252 vhost_user_set_vring_num(struct virtio_net *dev,
253                          struct VhostUserMsg *msg)
254 {
255         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
256
257         vq->size = msg->payload.state.num;
258
259         /* VIRTIO 1.0, 2.4 Virtqueues says:
260          *
261          *   Queue Size value is always a power of 2. The maximum Queue Size
262          *   value is 32768.
263          */
264         if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
265                 RTE_LOG(ERR, VHOST_CONFIG,
266                         "invalid virtqueue size %u\n", vq->size);
267                 return -1;
268         }
269
270         if (dev->dequeue_zero_copy) {
271                 vq->nr_zmbuf = 0;
272                 vq->last_zmbuf_idx = 0;
273                 vq->zmbuf_size = vq->size;
274                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
275                                          sizeof(struct zcopy_mbuf), 0);
276                 if (vq->zmbufs == NULL) {
277                         RTE_LOG(WARNING, VHOST_CONFIG,
278                                 "failed to allocate mem for zero copy; "
279                                 "zero copy is force disabled\n");
280                         dev->dequeue_zero_copy = 0;
281                 }
282                 TAILQ_INIT(&vq->zmbuf_list);
283         }
284
285         if (vq_is_packed(dev)) {
286                 vq->shadow_used_packed = rte_malloc(NULL,
287                                 vq->size *
288                                 sizeof(struct vring_used_elem_packed),
289                                 RTE_CACHE_LINE_SIZE);
290                 if (!vq->shadow_used_packed) {
291                         RTE_LOG(ERR, VHOST_CONFIG,
292                                         "failed to allocate memory for shadow used ring.\n");
293                         return -1;
294                 }
295
296         } else {
297                 vq->shadow_used_split = rte_malloc(NULL,
298                                 vq->size * sizeof(struct vring_used_elem),
299                                 RTE_CACHE_LINE_SIZE);
300                 if (!vq->shadow_used_split) {
301                         RTE_LOG(ERR, VHOST_CONFIG,
302                                         "failed to allocate memory for shadow used ring.\n");
303                         return -1;
304                 }
305         }
306
307         vq->batch_copy_elems = rte_malloc(NULL,
308                                 vq->size * sizeof(struct batch_copy_elem),
309                                 RTE_CACHE_LINE_SIZE);
310         if (!vq->batch_copy_elems) {
311                 RTE_LOG(ERR, VHOST_CONFIG,
312                         "failed to allocate memory for batching copy.\n");
313                 return -1;
314         }
315
316         return 0;
317 }
318
319 /*
320  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
321  * same numa node as the memory of vring descriptor.
322  */
323 #ifdef RTE_LIBRTE_VHOST_NUMA
324 static struct virtio_net*
325 numa_realloc(struct virtio_net *dev, int index)
326 {
327         int oldnode, newnode;
328         struct virtio_net *old_dev;
329         struct vhost_virtqueue *old_vq, *vq;
330         struct zcopy_mbuf *new_zmbuf;
331         struct vring_used_elem *new_shadow_used_split;
332         struct vring_used_elem_packed *new_shadow_used_packed;
333         struct batch_copy_elem *new_batch_copy_elems;
334         int ret;
335
336         old_dev = dev;
337         vq = old_vq = dev->virtqueue[index];
338
339         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
340                             MPOL_F_NODE | MPOL_F_ADDR);
341
342         /* check if we need to reallocate vq */
343         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
344                              MPOL_F_NODE | MPOL_F_ADDR);
345         if (ret) {
346                 RTE_LOG(ERR, VHOST_CONFIG,
347                         "Unable to get vq numa information.\n");
348                 return dev;
349         }
350         if (oldnode != newnode) {
351                 RTE_LOG(INFO, VHOST_CONFIG,
352                         "reallocate vq from %d to %d node\n", oldnode, newnode);
353                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
354                 if (!vq)
355                         return dev;
356
357                 memcpy(vq, old_vq, sizeof(*vq));
358                 TAILQ_INIT(&vq->zmbuf_list);
359
360                 if (dev->dequeue_zero_copy) {
361                         new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
362                                         sizeof(struct zcopy_mbuf), 0, newnode);
363                         if (new_zmbuf) {
364                                 rte_free(vq->zmbufs);
365                                 vq->zmbufs = new_zmbuf;
366                         }
367                 }
368
369                 if (vq_is_packed(dev)) {
370                         new_shadow_used_packed = rte_malloc_socket(NULL,
371                                         vq->size *
372                                         sizeof(struct vring_used_elem_packed),
373                                         RTE_CACHE_LINE_SIZE,
374                                         newnode);
375                         if (new_shadow_used_packed) {
376                                 rte_free(vq->shadow_used_packed);
377                                 vq->shadow_used_packed = new_shadow_used_packed;
378                         }
379                 } else {
380                         new_shadow_used_split = rte_malloc_socket(NULL,
381                                         vq->size *
382                                         sizeof(struct vring_used_elem),
383                                         RTE_CACHE_LINE_SIZE,
384                                         newnode);
385                         if (new_shadow_used_split) {
386                                 rte_free(vq->shadow_used_split);
387                                 vq->shadow_used_split = new_shadow_used_split;
388                         }
389                 }
390
391                 new_batch_copy_elems = rte_malloc_socket(NULL,
392                         vq->size * sizeof(struct batch_copy_elem),
393                         RTE_CACHE_LINE_SIZE,
394                         newnode);
395                 if (new_batch_copy_elems) {
396                         rte_free(vq->batch_copy_elems);
397                         vq->batch_copy_elems = new_batch_copy_elems;
398                 }
399
400                 rte_free(old_vq);
401         }
402
403         /* check if we need to reallocate dev */
404         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
405                             MPOL_F_NODE | MPOL_F_ADDR);
406         if (ret) {
407                 RTE_LOG(ERR, VHOST_CONFIG,
408                         "Unable to get dev numa information.\n");
409                 goto out;
410         }
411         if (oldnode != newnode) {
412                 RTE_LOG(INFO, VHOST_CONFIG,
413                         "reallocate dev from %d to %d node\n",
414                         oldnode, newnode);
415                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
416                 if (!dev) {
417                         dev = old_dev;
418                         goto out;
419                 }
420
421                 memcpy(dev, old_dev, sizeof(*dev));
422                 rte_free(old_dev);
423         }
424
425 out:
426         dev->virtqueue[index] = vq;
427         vhost_devices[dev->vid] = dev;
428
429         if (old_vq != vq)
430                 vhost_user_iotlb_init(dev, index);
431
432         return dev;
433 }
434 #else
435 static struct virtio_net*
436 numa_realloc(struct virtio_net *dev, int index __rte_unused)
437 {
438         return dev;
439 }
440 #endif
441
442 /* Converts QEMU virtual address to Vhost virtual address. */
443 static uint64_t
444 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
445 {
446         struct rte_vhost_mem_region *r;
447         uint32_t i;
448
449         /* Find the region where the address lives. */
450         for (i = 0; i < dev->mem->nregions; i++) {
451                 r = &dev->mem->regions[i];
452
453                 if (qva >= r->guest_user_addr &&
454                     qva <  r->guest_user_addr + r->size) {
455
456                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
457                                 *len = r->guest_user_addr + r->size - qva;
458
459                         return qva - r->guest_user_addr +
460                                r->host_user_addr;
461                 }
462         }
463         *len = 0;
464
465         return 0;
466 }
467
468
469 /*
470  * Converts ring address to Vhost virtual address.
471  * If IOMMU is enabled, the ring address is a guest IO virtual address,
472  * else it is a QEMU virtual address.
473  */
474 static uint64_t
475 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
476                 uint64_t ra, uint64_t *size)
477 {
478         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
479                 uint64_t vva;
480
481                 vva = vhost_user_iotlb_cache_find(vq, ra,
482                                         size, VHOST_ACCESS_RW);
483                 if (!vva)
484                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
485
486                 return vva;
487         }
488
489         return qva_to_vva(dev, ra, size);
490 }
491
492 static struct virtio_net *
493 translate_ring_addresses(struct virtio_net *dev, int vq_index)
494 {
495         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
496         struct vhost_vring_addr *addr = &vq->ring_addrs;
497         uint64_t len;
498
499         if (vq_is_packed(dev)) {
500                 len = sizeof(struct vring_packed_desc) * vq->size;
501                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
502                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
503                 vq->log_guest_addr = 0;
504                 if (vq->desc_packed == NULL ||
505                                 len != sizeof(struct vring_packed_desc) *
506                                 vq->size) {
507                         RTE_LOG(DEBUG, VHOST_CONFIG,
508                                 "(%d) failed to map desc_packed ring.\n",
509                                 dev->vid);
510                         return dev;
511                 }
512
513                 dev = numa_realloc(dev, vq_index);
514                 vq = dev->virtqueue[vq_index];
515                 addr = &vq->ring_addrs;
516
517                 len = sizeof(struct vring_packed_desc_event);
518                 vq->driver_event = (struct vring_packed_desc_event *)
519                                         (uintptr_t)ring_addr_to_vva(dev,
520                                         vq, addr->avail_user_addr, &len);
521                 if (vq->driver_event == NULL ||
522                                 len != sizeof(struct vring_packed_desc_event)) {
523                         RTE_LOG(DEBUG, VHOST_CONFIG,
524                                 "(%d) failed to find driver area address.\n",
525                                 dev->vid);
526                         return dev;
527                 }
528
529                 len = sizeof(struct vring_packed_desc_event);
530                 vq->device_event = (struct vring_packed_desc_event *)
531                                         (uintptr_t)ring_addr_to_vva(dev,
532                                         vq, addr->used_user_addr, &len);
533                 if (vq->device_event == NULL ||
534                                 len != sizeof(struct vring_packed_desc_event)) {
535                         RTE_LOG(DEBUG, VHOST_CONFIG,
536                                 "(%d) failed to find device area address.\n",
537                                 dev->vid);
538                         return dev;
539                 }
540
541                 return dev;
542         }
543
544         /* The addresses are converted from QEMU virtual to Vhost virtual. */
545         if (vq->desc && vq->avail && vq->used)
546                 return dev;
547
548         len = sizeof(struct vring_desc) * vq->size;
549         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
550                         vq, addr->desc_user_addr, &len);
551         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
552                 RTE_LOG(DEBUG, VHOST_CONFIG,
553                         "(%d) failed to map desc ring.\n",
554                         dev->vid);
555                 return dev;
556         }
557
558         dev = numa_realloc(dev, vq_index);
559         vq = dev->virtqueue[vq_index];
560         addr = &vq->ring_addrs;
561
562         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
563         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
564                         vq, addr->avail_user_addr, &len);
565         if (vq->avail == 0 ||
566                         len != sizeof(struct vring_avail) +
567                         sizeof(uint16_t) * vq->size) {
568                 RTE_LOG(DEBUG, VHOST_CONFIG,
569                         "(%d) failed to map avail ring.\n",
570                         dev->vid);
571                 return dev;
572         }
573
574         len = sizeof(struct vring_used) +
575                 sizeof(struct vring_used_elem) * vq->size;
576         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
577                         vq, addr->used_user_addr, &len);
578         if (vq->used == 0 || len != sizeof(struct vring_used) +
579                         sizeof(struct vring_used_elem) * vq->size) {
580                 RTE_LOG(DEBUG, VHOST_CONFIG,
581                         "(%d) failed to map used ring.\n",
582                         dev->vid);
583                 return dev;
584         }
585
586         if (vq->last_used_idx != vq->used->idx) {
587                 RTE_LOG(WARNING, VHOST_CONFIG,
588                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
589                         "some packets maybe resent for Tx and dropped for Rx\n",
590                         vq->last_used_idx, vq->used->idx);
591                 vq->last_used_idx  = vq->used->idx;
592                 vq->last_avail_idx = vq->used->idx;
593         }
594
595         vq->log_guest_addr = addr->log_guest_addr;
596
597         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
598                         dev->vid, vq->desc);
599         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
600                         dev->vid, vq->avail);
601         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
602                         dev->vid, vq->used);
603         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
604                         dev->vid, vq->log_guest_addr);
605
606         return dev;
607 }
608
609 /*
610  * The virtio device sends us the desc, used and avail ring addresses.
611  * This function then converts these to our address space.
612  */
613 static int
614 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg)
615 {
616         struct vhost_virtqueue *vq;
617         struct vhost_vring_addr *addr = &msg->payload.addr;
618         struct virtio_net *dev = *pdev;
619
620         if (dev->mem == NULL)
621                 return -1;
622
623         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
624         vq = dev->virtqueue[msg->payload.addr.index];
625
626         /*
627          * Rings addresses should not be interpreted as long as the ring is not
628          * started and enabled
629          */
630         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
631
632         vring_invalidate(dev, vq);
633
634         if (vq->enabled && (dev->features &
635                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
636                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
637                 if (!dev)
638                         return -1;
639
640                 *pdev = dev;
641         }
642
643         return 0;
644 }
645
646 /*
647  * The virtio device sends us the available ring last used index.
648  */
649 static int
650 vhost_user_set_vring_base(struct virtio_net *dev,
651                           struct VhostUserMsg *msg)
652 {
653         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
654                         msg->payload.state.num;
655         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
656                         msg->payload.state.num;
657
658         return 0;
659 }
660
661 static int
662 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
663                    uint64_t host_phys_addr, uint64_t size)
664 {
665         struct guest_page *page, *last_page;
666
667         if (dev->nr_guest_pages == dev->max_guest_pages) {
668                 dev->max_guest_pages *= 2;
669                 dev->guest_pages = realloc(dev->guest_pages,
670                                         dev->max_guest_pages * sizeof(*page));
671                 if (!dev->guest_pages) {
672                         RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
673                         return -1;
674                 }
675         }
676
677         if (dev->nr_guest_pages > 0) {
678                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
679                 /* merge if the two pages are continuous */
680                 if (host_phys_addr == last_page->host_phys_addr +
681                                       last_page->size) {
682                         last_page->size += size;
683                         return 0;
684                 }
685         }
686
687         page = &dev->guest_pages[dev->nr_guest_pages++];
688         page->guest_phys_addr = guest_phys_addr;
689         page->host_phys_addr  = host_phys_addr;
690         page->size = size;
691
692         return 0;
693 }
694
695 static int
696 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
697                 uint64_t page_size)
698 {
699         uint64_t reg_size = reg->size;
700         uint64_t host_user_addr  = reg->host_user_addr;
701         uint64_t guest_phys_addr = reg->guest_phys_addr;
702         uint64_t host_phys_addr;
703         uint64_t size;
704
705         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
706         size = page_size - (guest_phys_addr & (page_size - 1));
707         size = RTE_MIN(size, reg_size);
708
709         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
710                 return -1;
711
712         host_user_addr  += size;
713         guest_phys_addr += size;
714         reg_size -= size;
715
716         while (reg_size > 0) {
717                 size = RTE_MIN(reg_size, page_size);
718                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
719                                                   host_user_addr);
720                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
721                                 size) < 0)
722                         return -1;
723
724                 host_user_addr  += size;
725                 guest_phys_addr += size;
726                 reg_size -= size;
727         }
728
729         return 0;
730 }
731
732 #ifdef RTE_LIBRTE_VHOST_DEBUG
733 /* TODO: enable it only in debug mode? */
734 static void
735 dump_guest_pages(struct virtio_net *dev)
736 {
737         uint32_t i;
738         struct guest_page *page;
739
740         for (i = 0; i < dev->nr_guest_pages; i++) {
741                 page = &dev->guest_pages[i];
742
743                 RTE_LOG(INFO, VHOST_CONFIG,
744                         "guest physical page region %u\n"
745                         "\t guest_phys_addr: %" PRIx64 "\n"
746                         "\t host_phys_addr : %" PRIx64 "\n"
747                         "\t size           : %" PRIx64 "\n",
748                         i,
749                         page->guest_phys_addr,
750                         page->host_phys_addr,
751                         page->size);
752         }
753 }
754 #else
755 #define dump_guest_pages(dev)
756 #endif
757
758 static bool
759 vhost_memory_changed(struct VhostUserMemory *new,
760                      struct rte_vhost_memory *old)
761 {
762         uint32_t i;
763
764         if (new->nregions != old->nregions)
765                 return true;
766
767         for (i = 0; i < new->nregions; ++i) {
768                 VhostUserMemoryRegion *new_r = &new->regions[i];
769                 struct rte_vhost_mem_region *old_r = &old->regions[i];
770
771                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
772                         return true;
773                 if (new_r->memory_size != old_r->size)
774                         return true;
775                 if (new_r->userspace_addr != old_r->guest_user_addr)
776                         return true;
777         }
778
779         return false;
780 }
781
782 static int
783 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
784 {
785         struct virtio_net *dev = *pdev;
786         struct VhostUserMemory memory = msg->payload.memory;
787         struct rte_vhost_mem_region *reg;
788         void *mmap_addr;
789         uint64_t mmap_size;
790         uint64_t mmap_offset;
791         uint64_t alignment;
792         uint32_t i;
793         int populate;
794         int fd;
795
796         if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
797                 RTE_LOG(ERR, VHOST_CONFIG,
798                         "too many memory regions (%u)\n", memory.nregions);
799                 return -1;
800         }
801
802         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
803                 RTE_LOG(INFO, VHOST_CONFIG,
804                         "(%d) memory regions not changed\n", dev->vid);
805
806                 for (i = 0; i < memory.nregions; i++)
807                         close(msg->fds[i]);
808
809                 return 0;
810         }
811
812         if (dev->mem) {
813                 free_mem_region(dev);
814                 rte_free(dev->mem);
815                 dev->mem = NULL;
816         }
817
818         /* Flush IOTLB cache as previous HVAs are now invalid */
819         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
820                 for (i = 0; i < dev->nr_vring; i++)
821                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
822
823         dev->nr_guest_pages = 0;
824         if (!dev->guest_pages) {
825                 dev->max_guest_pages = 8;
826                 dev->guest_pages = malloc(dev->max_guest_pages *
827                                                 sizeof(struct guest_page));
828                 if (dev->guest_pages == NULL) {
829                         RTE_LOG(ERR, VHOST_CONFIG,
830                                 "(%d) failed to allocate memory "
831                                 "for dev->guest_pages\n",
832                                 dev->vid);
833                         return -1;
834                 }
835         }
836
837         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
838                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
839         if (dev->mem == NULL) {
840                 RTE_LOG(ERR, VHOST_CONFIG,
841                         "(%d) failed to allocate memory for dev->mem\n",
842                         dev->vid);
843                 return -1;
844         }
845         dev->mem->nregions = memory.nregions;
846
847         for (i = 0; i < memory.nregions; i++) {
848                 fd  = msg->fds[i];
849                 reg = &dev->mem->regions[i];
850
851                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
852                 reg->guest_user_addr = memory.regions[i].userspace_addr;
853                 reg->size            = memory.regions[i].memory_size;
854                 reg->fd              = fd;
855
856                 mmap_offset = memory.regions[i].mmap_offset;
857
858                 /* Check for memory_size + mmap_offset overflow */
859                 if (mmap_offset >= -reg->size) {
860                         RTE_LOG(ERR, VHOST_CONFIG,
861                                 "mmap_offset (%#"PRIx64") and memory_size "
862                                 "(%#"PRIx64") overflow\n",
863                                 mmap_offset, reg->size);
864                         goto err_mmap;
865                 }
866
867                 mmap_size = reg->size + mmap_offset;
868
869                 /* mmap() without flag of MAP_ANONYMOUS, should be called
870                  * with length argument aligned with hugepagesz at older
871                  * longterm version Linux, like 2.6.32 and 3.2.72, or
872                  * mmap() will fail with EINVAL.
873                  *
874                  * to avoid failure, make sure in caller to keep length
875                  * aligned.
876                  */
877                 alignment = get_blk_size(fd);
878                 if (alignment == (uint64_t)-1) {
879                         RTE_LOG(ERR, VHOST_CONFIG,
880                                 "couldn't get hugepage size through fstat\n");
881                         goto err_mmap;
882                 }
883                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
884
885                 populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
886                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
887                                  MAP_SHARED | populate, fd, 0);
888
889                 if (mmap_addr == MAP_FAILED) {
890                         RTE_LOG(ERR, VHOST_CONFIG,
891                                 "mmap region %u failed.\n", i);
892                         goto err_mmap;
893                 }
894
895                 reg->mmap_addr = mmap_addr;
896                 reg->mmap_size = mmap_size;
897                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
898                                       mmap_offset;
899
900                 if (dev->dequeue_zero_copy)
901                         if (add_guest_pages(dev, reg, alignment) < 0) {
902                                 RTE_LOG(ERR, VHOST_CONFIG,
903                                         "adding guest pages to region %u failed.\n",
904                                         i);
905                                 goto err_mmap;
906                         }
907
908                 RTE_LOG(INFO, VHOST_CONFIG,
909                         "guest memory region %u, size: 0x%" PRIx64 "\n"
910                         "\t guest physical addr: 0x%" PRIx64 "\n"
911                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
912                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
913                         "\t mmap addr : 0x%" PRIx64 "\n"
914                         "\t mmap size : 0x%" PRIx64 "\n"
915                         "\t mmap align: 0x%" PRIx64 "\n"
916                         "\t mmap off  : 0x%" PRIx64 "\n",
917                         i, reg->size,
918                         reg->guest_phys_addr,
919                         reg->guest_user_addr,
920                         reg->host_user_addr,
921                         (uint64_t)(uintptr_t)mmap_addr,
922                         mmap_size,
923                         alignment,
924                         mmap_offset);
925         }
926
927         for (i = 0; i < dev->nr_vring; i++) {
928                 struct vhost_virtqueue *vq = dev->virtqueue[i];
929
930                 if (vq->desc || vq->avail || vq->used) {
931                         /*
932                          * If the memory table got updated, the ring addresses
933                          * need to be translated again as virtual addresses have
934                          * changed.
935                          */
936                         vring_invalidate(dev, vq);
937
938                         dev = translate_ring_addresses(dev, i);
939                         if (!dev)
940                                 return -1;
941
942                         *pdev = dev;
943                 }
944         }
945
946         dump_guest_pages(dev);
947
948         return 0;
949
950 err_mmap:
951         free_mem_region(dev);
952         rte_free(dev->mem);
953         dev->mem = NULL;
954         return -1;
955 }
956
957 static bool
958 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
959 {
960         bool rings_ok;
961
962         if (!vq)
963                 return false;
964
965         if (vq_is_packed(dev))
966                 rings_ok = !!vq->desc_packed;
967         else
968                 rings_ok = vq->desc && vq->avail && vq->used;
969
970         return rings_ok &&
971                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
972                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
973 }
974
975 static int
976 virtio_is_ready(struct virtio_net *dev)
977 {
978         struct vhost_virtqueue *vq;
979         uint32_t i;
980
981         if (dev->nr_vring == 0)
982                 return 0;
983
984         for (i = 0; i < dev->nr_vring; i++) {
985                 vq = dev->virtqueue[i];
986
987                 if (!vq_is_ready(dev, vq))
988                         return 0;
989         }
990
991         RTE_LOG(INFO, VHOST_CONFIG,
992                 "virtio is now ready for processing.\n");
993         return 1;
994 }
995
996 static void
997 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *msg)
998 {
999         struct vhost_vring_file file;
1000         struct vhost_virtqueue *vq;
1001
1002         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1003         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1004                 file.fd = VIRTIO_INVALID_EVENTFD;
1005         else
1006                 file.fd = msg->fds[0];
1007         RTE_LOG(INFO, VHOST_CONFIG,
1008                 "vring call idx:%d file:%d\n", file.index, file.fd);
1009
1010         vq = dev->virtqueue[file.index];
1011         if (vq->callfd >= 0)
1012                 close(vq->callfd);
1013
1014         vq->callfd = file.fd;
1015 }
1016
1017 static int
1018 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
1019 {
1020         struct vhost_vring_file file;
1021         struct vhost_virtqueue *vq;
1022         struct virtio_net *dev = *pdev;
1023
1024         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1025         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1026                 file.fd = VIRTIO_INVALID_EVENTFD;
1027         else
1028                 file.fd = msg->fds[0];
1029         RTE_LOG(INFO, VHOST_CONFIG,
1030                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1031
1032         /* Interpret ring addresses only when ring is started. */
1033         dev = translate_ring_addresses(dev, file.index);
1034         if (!dev)
1035                 return -1;
1036
1037         *pdev = dev;
1038
1039         vq = dev->virtqueue[file.index];
1040
1041         /*
1042          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1043          * the ring starts already enabled. Otherwise, it is enabled via
1044          * the SET_VRING_ENABLE message.
1045          */
1046         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
1047                 vq->enabled = 1;
1048
1049         if (vq->kickfd >= 0)
1050                 close(vq->kickfd);
1051         vq->kickfd = file.fd;
1052         return 0;
1053 }
1054
1055 static void
1056 free_zmbufs(struct vhost_virtqueue *vq)
1057 {
1058         struct zcopy_mbuf *zmbuf, *next;
1059
1060         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1061              zmbuf != NULL; zmbuf = next) {
1062                 next = TAILQ_NEXT(zmbuf, next);
1063
1064                 rte_pktmbuf_free(zmbuf->mbuf);
1065                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1066         }
1067
1068         rte_free(vq->zmbufs);
1069 }
1070
1071 /*
1072  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1073  */
1074 static int
1075 vhost_user_get_vring_base(struct virtio_net *dev,
1076                           struct VhostUserMsg *msg)
1077 {
1078         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1079
1080         /* We have to stop the queue (virtio) if it is running. */
1081         vhost_destroy_device_notify(dev);
1082
1083         dev->flags &= ~VIRTIO_DEV_READY;
1084         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1085
1086         /* Here we are safe to get the last avail index */
1087         msg->payload.state.num = vq->last_avail_idx;
1088
1089         RTE_LOG(INFO, VHOST_CONFIG,
1090                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1091                 msg->payload.state.num);
1092         /*
1093          * Based on current qemu vhost-user implementation, this message is
1094          * sent and only sent in vhost_vring_stop.
1095          * TODO: cleanup the vring, it isn't usable since here.
1096          */
1097         if (vq->kickfd >= 0)
1098                 close(vq->kickfd);
1099
1100         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1101
1102         if (vq->callfd >= 0)
1103                 close(vq->callfd);
1104
1105         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1106
1107         if (dev->dequeue_zero_copy)
1108                 free_zmbufs(vq);
1109         if (vq_is_packed(dev)) {
1110                 rte_free(vq->shadow_used_packed);
1111                 vq->shadow_used_packed = NULL;
1112         } else {
1113                 rte_free(vq->shadow_used_split);
1114                 vq->shadow_used_split = NULL;
1115         }
1116
1117         rte_free(vq->batch_copy_elems);
1118         vq->batch_copy_elems = NULL;
1119
1120         return 0;
1121 }
1122
1123 /*
1124  * when virtio queues are ready to work, qemu will send us to
1125  * enable the virtio queue pair.
1126  */
1127 static int
1128 vhost_user_set_vring_enable(struct virtio_net *dev,
1129                             struct VhostUserMsg *msg)
1130 {
1131         int enable = (int)msg->payload.state.num;
1132         int index = (int)msg->payload.state.index;
1133         struct rte_vdpa_device *vdpa_dev;
1134         int did = -1;
1135
1136         RTE_LOG(INFO, VHOST_CONFIG,
1137                 "set queue enable: %d to qp idx: %d\n",
1138                 enable, index);
1139
1140         did = dev->vdpa_dev_id;
1141         vdpa_dev = rte_vdpa_get_device(did);
1142         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
1143                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
1144
1145         if (dev->notify_ops->vring_state_changed)
1146                 dev->notify_ops->vring_state_changed(dev->vid,
1147                                 index, enable);
1148
1149         dev->virtqueue[index]->enabled = enable;
1150
1151         return 0;
1152 }
1153
1154 static void
1155 vhost_user_get_protocol_features(struct virtio_net *dev,
1156                                  struct VhostUserMsg *msg)
1157 {
1158         uint64_t features, protocol_features;
1159
1160         rte_vhost_driver_get_features(dev->ifname, &features);
1161         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
1162
1163         /*
1164          * REPLY_ACK protocol feature is only mandatory for now
1165          * for IOMMU feature. If IOMMU is explicitly disabled by the
1166          * application, disable also REPLY_ACK feature for older buggy
1167          * Qemu versions (from v2.7.0 to v2.9.0).
1168          */
1169         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
1170                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
1171
1172         msg->payload.u64 = protocol_features;
1173         msg->size = sizeof(msg->payload.u64);
1174 }
1175
1176 static int
1177 vhost_user_set_protocol_features(struct virtio_net *dev,
1178                                  uint64_t protocol_features)
1179 {
1180         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) {
1181                 RTE_LOG(ERR, VHOST_CONFIG,
1182                         "(%d) received invalid protocol features.\n",
1183                         dev->vid);
1184                 return -1;
1185         }
1186
1187         dev->protocol_features = protocol_features;
1188         return 0;
1189 }
1190
1191 static int
1192 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
1193 {
1194         int fd = msg->fds[0];
1195         uint64_t size, off;
1196         void *addr;
1197
1198         if (fd < 0) {
1199                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
1200                 return -1;
1201         }
1202
1203         if (msg->size != sizeof(VhostUserLog)) {
1204                 RTE_LOG(ERR, VHOST_CONFIG,
1205                         "invalid log base msg size: %"PRId32" != %d\n",
1206                         msg->size, (int)sizeof(VhostUserLog));
1207                 return -1;
1208         }
1209
1210         size = msg->payload.log.mmap_size;
1211         off  = msg->payload.log.mmap_offset;
1212
1213         /* Don't allow mmap_offset to point outside the mmap region */
1214         if (off > size) {
1215                 RTE_LOG(ERR, VHOST_CONFIG,
1216                         "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
1217                         off, size);
1218                 return -1;
1219         }
1220
1221         RTE_LOG(INFO, VHOST_CONFIG,
1222                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1223                 size, off);
1224
1225         /*
1226          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1227          * fail when offset is not page size aligned.
1228          */
1229         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1230         close(fd);
1231         if (addr == MAP_FAILED) {
1232                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1233                 return -1;
1234         }
1235
1236         /*
1237          * Free previously mapped log memory on occasionally
1238          * multiple VHOST_USER_SET_LOG_BASE.
1239          */
1240         if (dev->log_addr) {
1241                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1242         }
1243         dev->log_addr = (uint64_t)(uintptr_t)addr;
1244         dev->log_base = dev->log_addr + off;
1245         dev->log_size = size;
1246
1247         return 0;
1248 }
1249
1250 /*
1251  * An rarp packet is constructed and broadcasted to notify switches about
1252  * the new location of the migrated VM, so that packets from outside will
1253  * not be lost after migration.
1254  *
1255  * However, we don't actually "send" a rarp packet here, instead, we set
1256  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1257  */
1258 static int
1259 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
1260 {
1261         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1262         struct rte_vdpa_device *vdpa_dev;
1263         int did = -1;
1264
1265         RTE_LOG(DEBUG, VHOST_CONFIG,
1266                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1267                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1268         memcpy(dev->mac.addr_bytes, mac, 6);
1269
1270         /*
1271          * Set the flag to inject a RARP broadcast packet at
1272          * rte_vhost_dequeue_burst().
1273          *
1274          * rte_smp_wmb() is for making sure the mac is copied
1275          * before the flag is set.
1276          */
1277         rte_smp_wmb();
1278         rte_atomic16_set(&dev->broadcast_rarp, 1);
1279         did = dev->vdpa_dev_id;
1280         vdpa_dev = rte_vdpa_get_device(did);
1281         if (vdpa_dev && vdpa_dev->ops->migration_done)
1282                 vdpa_dev->ops->migration_done(dev->vid);
1283
1284         return 0;
1285 }
1286
1287 static int
1288 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
1289 {
1290         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1291                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1292                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1293                                 msg->payload.u64);
1294
1295                 return -1;
1296         }
1297
1298         dev->mtu = msg->payload.u64;
1299
1300         return 0;
1301 }
1302
1303 static int
1304 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1305 {
1306         int fd = msg->fds[0];
1307
1308         if (fd < 0) {
1309                 RTE_LOG(ERR, VHOST_CONFIG,
1310                                 "Invalid file descriptor for slave channel (%d)\n",
1311                                 fd);
1312                 return -1;
1313         }
1314
1315         dev->slave_req_fd = fd;
1316
1317         return 0;
1318 }
1319
1320 static int
1321 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1322 {
1323         struct vhost_vring_addr *ra;
1324         uint64_t start, end;
1325
1326         start = imsg->iova;
1327         end = start + imsg->size;
1328
1329         ra = &vq->ring_addrs;
1330         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1331                 return 1;
1332         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1333                 return 1;
1334         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1335                 return 1;
1336
1337         return 0;
1338 }
1339
1340 static int
1341 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1342                                 struct vhost_iotlb_msg *imsg)
1343 {
1344         uint64_t istart, iend, vstart, vend;
1345
1346         istart = imsg->iova;
1347         iend = istart + imsg->size - 1;
1348
1349         vstart = (uintptr_t)vq->desc;
1350         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1351         if (vstart <= iend && istart <= vend)
1352                 return 1;
1353
1354         vstart = (uintptr_t)vq->avail;
1355         vend = vstart + sizeof(struct vring_avail);
1356         vend += sizeof(uint16_t) * vq->size - 1;
1357         if (vstart <= iend && istart <= vend)
1358                 return 1;
1359
1360         vstart = (uintptr_t)vq->used;
1361         vend = vstart + sizeof(struct vring_used);
1362         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1363         if (vstart <= iend && istart <= vend)
1364                 return 1;
1365
1366         return 0;
1367 }
1368
1369 static int
1370 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1371 {
1372         struct virtio_net *dev = *pdev;
1373         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1374         uint16_t i;
1375         uint64_t vva, len;
1376
1377         switch (imsg->type) {
1378         case VHOST_IOTLB_UPDATE:
1379                 len = imsg->size;
1380                 vva = qva_to_vva(dev, imsg->uaddr, &len);
1381                 if (!vva)
1382                         return -1;
1383
1384                 for (i = 0; i < dev->nr_vring; i++) {
1385                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1386
1387                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1388                                         len, imsg->perm);
1389
1390                         if (is_vring_iotlb_update(vq, imsg))
1391                                 *pdev = dev = translate_ring_addresses(dev, i);
1392                 }
1393                 break;
1394         case VHOST_IOTLB_INVALIDATE:
1395                 for (i = 0; i < dev->nr_vring; i++) {
1396                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1397
1398                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1399                                         imsg->size);
1400
1401                         if (is_vring_iotlb_invalidate(vq, imsg))
1402                                 vring_invalidate(dev, vq);
1403                 }
1404                 break;
1405         default:
1406                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1407                                 imsg->type);
1408                 return -1;
1409         }
1410
1411         return 0;
1412 }
1413
1414 /* return bytes# of read on success or negative val on failure. */
1415 static int
1416 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1417 {
1418         int ret;
1419
1420         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1421                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1422         if (ret <= 0)
1423                 return ret;
1424
1425         if (msg && msg->size) {
1426                 if (msg->size > sizeof(msg->payload)) {
1427                         RTE_LOG(ERR, VHOST_CONFIG,
1428                                 "invalid msg size: %d\n", msg->size);
1429                         return -1;
1430                 }
1431                 ret = read(sockfd, &msg->payload, msg->size);
1432                 if (ret <= 0)
1433                         return ret;
1434                 if (ret != (int)msg->size) {
1435                         RTE_LOG(ERR, VHOST_CONFIG,
1436                                 "read control message failed\n");
1437                         return -1;
1438                 }
1439         }
1440
1441         return ret;
1442 }
1443
1444 static int
1445 send_vhost_message(int sockfd, struct VhostUserMsg *msg, int *fds, int fd_num)
1446 {
1447         if (!msg)
1448                 return 0;
1449
1450         return send_fd_message(sockfd, (char *)msg,
1451                 VHOST_USER_HDR_SIZE + msg->size, fds, fd_num);
1452 }
1453
1454 static int
1455 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1456 {
1457         if (!msg)
1458                 return 0;
1459
1460         msg->flags &= ~VHOST_USER_VERSION_MASK;
1461         msg->flags &= ~VHOST_USER_NEED_REPLY;
1462         msg->flags |= VHOST_USER_VERSION;
1463         msg->flags |= VHOST_USER_REPLY_MASK;
1464
1465         return send_vhost_message(sockfd, msg, NULL, 0);
1466 }
1467
1468 static int
1469 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
1470                          int *fds, int fd_num)
1471 {
1472         int ret;
1473
1474         if (msg->flags & VHOST_USER_NEED_REPLY)
1475                 rte_spinlock_lock(&dev->slave_req_lock);
1476
1477         ret = send_vhost_message(dev->slave_req_fd, msg, fds, fd_num);
1478         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
1479                 rte_spinlock_unlock(&dev->slave_req_lock);
1480
1481         return ret;
1482 }
1483
1484 /*
1485  * Allocate a queue pair if it hasn't been allocated yet
1486  */
1487 static int
1488 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
1489                         struct VhostUserMsg *msg)
1490 {
1491         uint16_t vring_idx;
1492
1493         switch (msg->request.master) {
1494         case VHOST_USER_SET_VRING_KICK:
1495         case VHOST_USER_SET_VRING_CALL:
1496         case VHOST_USER_SET_VRING_ERR:
1497                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1498                 break;
1499         case VHOST_USER_SET_VRING_NUM:
1500         case VHOST_USER_SET_VRING_BASE:
1501         case VHOST_USER_SET_VRING_ENABLE:
1502                 vring_idx = msg->payload.state.index;
1503                 break;
1504         case VHOST_USER_SET_VRING_ADDR:
1505                 vring_idx = msg->payload.addr.index;
1506                 break;
1507         default:
1508                 return 0;
1509         }
1510
1511         if (vring_idx >= VHOST_MAX_VRING) {
1512                 RTE_LOG(ERR, VHOST_CONFIG,
1513                         "invalid vring index: %u\n", vring_idx);
1514                 return -1;
1515         }
1516
1517         if (dev->virtqueue[vring_idx])
1518                 return 0;
1519
1520         return alloc_vring_queue(dev, vring_idx);
1521 }
1522
1523 static void
1524 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1525 {
1526         unsigned int i = 0;
1527         unsigned int vq_num = 0;
1528
1529         while (vq_num < dev->nr_vring) {
1530                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1531
1532                 if (vq) {
1533                         rte_spinlock_lock(&vq->access_lock);
1534                         vq_num++;
1535                 }
1536                 i++;
1537         }
1538 }
1539
1540 static void
1541 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1542 {
1543         unsigned int i = 0;
1544         unsigned int vq_num = 0;
1545
1546         while (vq_num < dev->nr_vring) {
1547                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1548
1549                 if (vq) {
1550                         rte_spinlock_unlock(&vq->access_lock);
1551                         vq_num++;
1552                 }
1553                 i++;
1554         }
1555 }
1556
1557 int
1558 vhost_user_msg_handler(int vid, int fd)
1559 {
1560         struct virtio_net *dev;
1561         struct VhostUserMsg msg;
1562         struct rte_vdpa_device *vdpa_dev;
1563         int did = -1;
1564         int ret;
1565         int unlock_required = 0;
1566         uint32_t skip_master = 0;
1567
1568         dev = get_device(vid);
1569         if (dev == NULL)
1570                 return -1;
1571
1572         if (!dev->notify_ops) {
1573                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1574                 if (!dev->notify_ops) {
1575                         RTE_LOG(ERR, VHOST_CONFIG,
1576                                 "failed to get callback ops for driver %s\n",
1577                                 dev->ifname);
1578                         return -1;
1579                 }
1580         }
1581
1582         ret = read_vhost_message(fd, &msg);
1583         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1584                 if (ret < 0)
1585                         RTE_LOG(ERR, VHOST_CONFIG,
1586                                 "vhost read message failed\n");
1587                 else if (ret == 0)
1588                         RTE_LOG(INFO, VHOST_CONFIG,
1589                                 "vhost peer closed\n");
1590                 else
1591                         RTE_LOG(ERR, VHOST_CONFIG,
1592                                 "vhost read incorrect message\n");
1593
1594                 return -1;
1595         }
1596
1597         ret = 0;
1598         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1599                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1600                         vhost_message_str[msg.request.master]);
1601         else
1602                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1603                         vhost_message_str[msg.request.master]);
1604
1605         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1606         if (ret < 0) {
1607                 RTE_LOG(ERR, VHOST_CONFIG,
1608                         "failed to alloc queue\n");
1609                 return -1;
1610         }
1611
1612         /*
1613          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1614          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1615          * and device is destroyed. destroy_device waits for queues to be
1616          * inactive, so it is safe. Otherwise taking the access_lock
1617          * would cause a dead lock.
1618          */
1619         switch (msg.request.master) {
1620         case VHOST_USER_SET_FEATURES:
1621         case VHOST_USER_SET_PROTOCOL_FEATURES:
1622         case VHOST_USER_SET_OWNER:
1623         case VHOST_USER_SET_MEM_TABLE:
1624         case VHOST_USER_SET_LOG_BASE:
1625         case VHOST_USER_SET_LOG_FD:
1626         case VHOST_USER_SET_VRING_NUM:
1627         case VHOST_USER_SET_VRING_ADDR:
1628         case VHOST_USER_SET_VRING_BASE:
1629         case VHOST_USER_SET_VRING_KICK:
1630         case VHOST_USER_SET_VRING_CALL:
1631         case VHOST_USER_SET_VRING_ERR:
1632         case VHOST_USER_SET_VRING_ENABLE:
1633         case VHOST_USER_SEND_RARP:
1634         case VHOST_USER_NET_SET_MTU:
1635         case VHOST_USER_SET_SLAVE_REQ_FD:
1636                 vhost_user_lock_all_queue_pairs(dev);
1637                 unlock_required = 1;
1638                 break;
1639         default:
1640                 break;
1641
1642         }
1643
1644         if (dev->extern_ops.pre_msg_handle) {
1645                 uint32_t need_reply;
1646
1647                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
1648                                 (void *)&msg, &need_reply, &skip_master);
1649                 if (ret < 0)
1650                         goto skip_to_reply;
1651
1652                 if (need_reply)
1653                         send_vhost_reply(fd, &msg);
1654
1655                 if (skip_master)
1656                         goto skip_to_post_handle;
1657         }
1658
1659         switch (msg.request.master) {
1660         case VHOST_USER_GET_FEATURES:
1661                 msg.payload.u64 = vhost_user_get_features(dev);
1662                 msg.size = sizeof(msg.payload.u64);
1663                 send_vhost_reply(fd, &msg);
1664                 break;
1665         case VHOST_USER_SET_FEATURES:
1666                 ret = vhost_user_set_features(dev, msg.payload.u64);
1667                 break;
1668
1669         case VHOST_USER_GET_PROTOCOL_FEATURES:
1670                 vhost_user_get_protocol_features(dev, &msg);
1671                 send_vhost_reply(fd, &msg);
1672                 break;
1673         case VHOST_USER_SET_PROTOCOL_FEATURES:
1674                 ret = vhost_user_set_protocol_features(dev, msg.payload.u64);
1675                 break;
1676
1677         case VHOST_USER_SET_OWNER:
1678                 ret = vhost_user_set_owner();
1679                 break;
1680         case VHOST_USER_RESET_OWNER:
1681                 ret = vhost_user_reset_owner(dev);
1682                 break;
1683
1684         case VHOST_USER_SET_MEM_TABLE:
1685                 ret = vhost_user_set_mem_table(&dev, &msg);
1686                 break;
1687
1688         case VHOST_USER_SET_LOG_BASE:
1689                 ret = vhost_user_set_log_base(dev, &msg);
1690                 if (ret)
1691                         goto skip_to_reply;
1692                 /* it needs a reply */
1693                 msg.size = sizeof(msg.payload.u64);
1694                 send_vhost_reply(fd, &msg);
1695                 break;
1696         case VHOST_USER_SET_LOG_FD:
1697                 close(msg.fds[0]);
1698                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1699                 break;
1700
1701         case VHOST_USER_SET_VRING_NUM:
1702                 ret = vhost_user_set_vring_num(dev, &msg);
1703                 break;
1704         case VHOST_USER_SET_VRING_ADDR:
1705                 ret = vhost_user_set_vring_addr(&dev, &msg);
1706                 break;
1707         case VHOST_USER_SET_VRING_BASE:
1708                 ret = vhost_user_set_vring_base(dev, &msg);
1709                 break;
1710
1711         case VHOST_USER_GET_VRING_BASE:
1712                 ret = vhost_user_get_vring_base(dev, &msg);
1713                 if (ret)
1714                         goto skip_to_reply;
1715                 msg.size = sizeof(msg.payload.state);
1716                 send_vhost_reply(fd, &msg);
1717                 break;
1718
1719         case VHOST_USER_SET_VRING_KICK:
1720                 ret = vhost_user_set_vring_kick(&dev, &msg);
1721                 break;
1722         case VHOST_USER_SET_VRING_CALL:
1723                 vhost_user_set_vring_call(dev, &msg);
1724                 break;
1725
1726         case VHOST_USER_SET_VRING_ERR:
1727                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1728                         close(msg.fds[0]);
1729                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1730                 break;
1731
1732         case VHOST_USER_GET_QUEUE_NUM:
1733                 msg.payload.u64 = (uint64_t)vhost_user_get_queue_num(dev);
1734                 msg.size = sizeof(msg.payload.u64);
1735                 send_vhost_reply(fd, &msg);
1736                 break;
1737
1738         case VHOST_USER_SET_VRING_ENABLE:
1739                 ret = vhost_user_set_vring_enable(dev, &msg);
1740                 break;
1741         case VHOST_USER_SEND_RARP:
1742                 ret = vhost_user_send_rarp(dev, &msg);
1743                 break;
1744
1745         case VHOST_USER_NET_SET_MTU:
1746                 ret = vhost_user_net_set_mtu(dev, &msg);
1747                 break;
1748
1749         case VHOST_USER_SET_SLAVE_REQ_FD:
1750                 ret = vhost_user_set_req_fd(dev, &msg);
1751                 break;
1752
1753         case VHOST_USER_IOTLB_MSG:
1754                 ret = vhost_user_iotlb_msg(&dev, &msg);
1755                 break;
1756
1757         default:
1758                 ret = -1;
1759                 break;
1760         }
1761
1762 skip_to_post_handle:
1763         if (!ret && dev->extern_ops.post_msg_handle) {
1764                 uint32_t need_reply;
1765
1766                 ret = (*dev->extern_ops.post_msg_handle)(
1767                                 dev->vid, (void *)&msg, &need_reply);
1768                 if (ret < 0)
1769                         goto skip_to_reply;
1770
1771                 if (need_reply)
1772                         send_vhost_reply(fd, &msg);
1773         }
1774
1775 skip_to_reply:
1776         if (unlock_required)
1777                 vhost_user_unlock_all_queue_pairs(dev);
1778
1779         if (msg.flags & VHOST_USER_NEED_REPLY) {
1780                 msg.payload.u64 = !!ret;
1781                 msg.size = sizeof(msg.payload.u64);
1782                 send_vhost_reply(fd, &msg);
1783         } else if (ret) {
1784                 RTE_LOG(ERR, VHOST_CONFIG,
1785                         "vhost message handling failed.\n");
1786                 return -1;
1787         }
1788
1789         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1790                 dev->flags |= VIRTIO_DEV_READY;
1791
1792                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1793                         if (dev->dequeue_zero_copy) {
1794                                 RTE_LOG(INFO, VHOST_CONFIG,
1795                                                 "dequeue zero copy is enabled\n");
1796                         }
1797
1798                         if (dev->notify_ops->new_device(dev->vid) == 0)
1799                                 dev->flags |= VIRTIO_DEV_RUNNING;
1800                 }
1801         }
1802
1803         did = dev->vdpa_dev_id;
1804         vdpa_dev = rte_vdpa_get_device(did);
1805         if (vdpa_dev && virtio_is_ready(dev) &&
1806                         !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
1807                         msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
1808                 if (vdpa_dev->ops->dev_conf)
1809                         vdpa_dev->ops->dev_conf(dev->vid);
1810                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
1811                 if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
1812                         RTE_LOG(INFO, VHOST_CONFIG,
1813                                 "(%d) software relay is used for vDPA, performance may be low.\n",
1814                                 dev->vid);
1815                 }
1816         }
1817
1818         return 0;
1819 }
1820
1821 static int process_slave_message_reply(struct virtio_net *dev,
1822                                        const struct VhostUserMsg *msg)
1823 {
1824         struct VhostUserMsg msg_reply;
1825         int ret;
1826
1827         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
1828                 return 0;
1829
1830         if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
1831                 ret = -1;
1832                 goto out;
1833         }
1834
1835         if (msg_reply.request.slave != msg->request.slave) {
1836                 RTE_LOG(ERR, VHOST_CONFIG,
1837                         "Received unexpected msg type (%u), expected %u\n",
1838                         msg_reply.request.slave, msg->request.slave);
1839                 ret = -1;
1840                 goto out;
1841         }
1842
1843         ret = msg_reply.payload.u64 ? -1 : 0;
1844
1845 out:
1846         rte_spinlock_unlock(&dev->slave_req_lock);
1847         return ret;
1848 }
1849
1850 int
1851 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1852 {
1853         int ret;
1854         struct VhostUserMsg msg = {
1855                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1856                 .flags = VHOST_USER_VERSION,
1857                 .size = sizeof(msg.payload.iotlb),
1858                 .payload.iotlb = {
1859                         .iova = iova,
1860                         .perm = perm,
1861                         .type = VHOST_IOTLB_MISS,
1862                 },
1863         };
1864
1865         ret = send_vhost_message(dev->slave_req_fd, &msg, NULL, 0);
1866         if (ret < 0) {
1867                 RTE_LOG(ERR, VHOST_CONFIG,
1868                                 "Failed to send IOTLB miss message (%d)\n",
1869                                 ret);
1870                 return ret;
1871         }
1872
1873         return 0;
1874 }
1875
1876 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
1877                                                     int index, int fd,
1878                                                     uint64_t offset,
1879                                                     uint64_t size)
1880 {
1881         int *fdp = NULL;
1882         size_t fd_num = 0;
1883         int ret;
1884         struct VhostUserMsg msg = {
1885                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1886                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
1887                 .size = sizeof(msg.payload.area),
1888                 .payload.area = {
1889                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
1890                         .size = size,
1891                         .offset = offset,
1892                 },
1893         };
1894
1895         if (fd < 0)
1896                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1897         else {
1898                 fdp = &fd;
1899                 fd_num = 1;
1900         }
1901
1902         ret = send_vhost_slave_message(dev, &msg, fdp, fd_num);
1903         if (ret < 0) {
1904                 RTE_LOG(ERR, VHOST_CONFIG,
1905                         "Failed to set host notifier (%d)\n", ret);
1906                 return ret;
1907         }
1908
1909         return process_slave_message_reply(dev, &msg);
1910 }
1911
1912 int vhost_user_host_notifier_ctrl(int vid, bool enable)
1913 {
1914         struct virtio_net *dev;
1915         struct rte_vdpa_device *vdpa_dev;
1916         int vfio_device_fd, did, ret = 0;
1917         uint64_t offset, size;
1918         unsigned int i;
1919
1920         dev = get_device(vid);
1921         if (!dev)
1922                 return -ENODEV;
1923
1924         did = dev->vdpa_dev_id;
1925         if (did < 0)
1926                 return -EINVAL;
1927
1928         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
1929             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
1930             !(dev->protocol_features &
1931                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
1932             !(dev->protocol_features &
1933                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
1934             !(dev->protocol_features &
1935                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
1936                 return -ENOTSUP;
1937
1938         vdpa_dev = rte_vdpa_get_device(did);
1939         if (!vdpa_dev)
1940                 return -ENODEV;
1941
1942         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
1943         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
1944
1945         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
1946         if (vfio_device_fd < 0)
1947                 return -ENOTSUP;
1948
1949         if (enable) {
1950                 for (i = 0; i < dev->nr_vring; i++) {
1951                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
1952                                         &size) < 0) {
1953                                 ret = -ENOTSUP;
1954                                 goto disable;
1955                         }
1956
1957                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
1958                                         vfio_device_fd, offset, size) < 0) {
1959                                 ret = -EFAULT;
1960                                 goto disable;
1961                         }
1962                 }
1963         } else {
1964 disable:
1965                 for (i = 0; i < dev->nr_vring; i++) {
1966                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
1967                                         0, 0);
1968                 }
1969         }
1970
1971         return ret;
1972 }