vhost: make message handling functions prepare the reply
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2018 Intel Corporation
3  */
4
5 /* Security model
6  * --------------
7  * The vhost-user protocol connection is an external interface, so it must be
8  * robust against invalid inputs.
9  *
10  * This is important because the vhost-user master is only one step removed
11  * from the guest.  Malicious guests that have escaped will then launch further
12  * attacks from the vhost-user master.
13  *
14  * Even in deployments where guests are trusted, a bug in the vhost-user master
15  * can still cause invalid messages to be sent.  Such messages must not
16  * compromise the stability of the DPDK application by causing crashes, memory
17  * corruption, or other problematic behavior.
18  *
19  * Do not assume received VhostUserMsg fields contain sensible values!
20  */
21
22 #include <stdint.h>
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <string.h>
26 #include <unistd.h>
27 #include <sys/mman.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <assert.h>
31 #ifdef RTE_LIBRTE_VHOST_NUMA
32 #include <numaif.h>
33 #endif
34
35 #include <rte_common.h>
36 #include <rte_malloc.h>
37 #include <rte_log.h>
38
39 #include "iotlb.h"
40 #include "vhost.h"
41 #include "vhost_user.h"
42
43 #define VIRTIO_MIN_MTU 68
44 #define VIRTIO_MAX_MTU 65535
45
46 static const char *vhost_message_str[VHOST_USER_MAX] = {
47         [VHOST_USER_NONE] = "VHOST_USER_NONE",
48         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
49         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
50         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
51         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
52         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
53         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
54         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
55         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
56         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
57         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
58         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
59         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
60         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
61         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
62         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
63         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
64         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
65         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
66         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
67         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
68         [VHOST_USER_SET_SLAVE_REQ_FD]  = "VHOST_USER_SET_SLAVE_REQ_FD",
69         [VHOST_USER_IOTLB_MSG]  = "VHOST_USER_IOTLB_MSG",
70         [VHOST_USER_CRYPTO_CREATE_SESS] = "VHOST_USER_CRYPTO_CREATE_SESS",
71         [VHOST_USER_CRYPTO_CLOSE_SESS] = "VHOST_USER_CRYPTO_CLOSE_SESS",
72 };
73
74 static uint64_t
75 get_blk_size(int fd)
76 {
77         struct stat stat;
78         int ret;
79
80         ret = fstat(fd, &stat);
81         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
82 }
83
84 static void
85 free_mem_region(struct virtio_net *dev)
86 {
87         uint32_t i;
88         struct rte_vhost_mem_region *reg;
89
90         if (!dev || !dev->mem)
91                 return;
92
93         for (i = 0; i < dev->mem->nregions; i++) {
94                 reg = &dev->mem->regions[i];
95                 if (reg->host_user_addr) {
96                         munmap(reg->mmap_addr, reg->mmap_size);
97                         close(reg->fd);
98                 }
99         }
100 }
101
102 void
103 vhost_backend_cleanup(struct virtio_net *dev)
104 {
105         if (dev->mem) {
106                 free_mem_region(dev);
107                 rte_free(dev->mem);
108                 dev->mem = NULL;
109         }
110
111         free(dev->guest_pages);
112         dev->guest_pages = NULL;
113
114         if (dev->log_addr) {
115                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
116                 dev->log_addr = 0;
117         }
118
119         if (dev->slave_req_fd >= 0) {
120                 close(dev->slave_req_fd);
121                 dev->slave_req_fd = -1;
122         }
123 }
124
125 /*
126  * This function just returns success at the moment unless
127  * the device hasn't been initialised.
128  */
129 static int
130 vhost_user_set_owner(void)
131 {
132         return 0;
133 }
134
135 static int
136 vhost_user_reset_owner(struct virtio_net *dev)
137 {
138         vhost_destroy_device_notify(dev);
139
140         cleanup_device(dev, 0);
141         reset_device(dev);
142         return 0;
143 }
144
145 /*
146  * The features that we support are requested.
147  */
148 static uint64_t
149 vhost_user_get_features(struct virtio_net *dev, struct VhostUserMsg *msg)
150 {
151         uint64_t features = 0;
152
153         rte_vhost_driver_get_features(dev->ifname, &features);
154
155         msg->payload.u64 = features;
156         msg->size = sizeof(msg->payload.u64);
157
158         return features;
159 }
160
161 /*
162  * The queue number that we support are requested.
163  */
164 static uint32_t
165 vhost_user_get_queue_num(struct virtio_net *dev, struct VhostUserMsg *msg)
166 {
167         uint32_t queue_num = 0;
168
169         rte_vhost_driver_get_queue_num(dev->ifname, &queue_num);
170
171         msg->payload.u64 = (uint64_t)queue_num;
172         msg->size = sizeof(msg->payload.u64);
173
174         return queue_num;
175 }
176
177 /*
178  * We receive the negotiated features supported by us and the virtio device.
179  */
180 static int
181 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
182 {
183         uint64_t vhost_features = 0;
184         struct rte_vdpa_device *vdpa_dev;
185         int did = -1;
186
187         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
188         if (features & ~vhost_features) {
189                 RTE_LOG(ERR, VHOST_CONFIG,
190                         "(%d) received invalid negotiated features.\n",
191                         dev->vid);
192                 return -1;
193         }
194
195         if (dev->flags & VIRTIO_DEV_RUNNING) {
196                 if (dev->features == features)
197                         return 0;
198
199                 /*
200                  * Error out if master tries to change features while device is
201                  * in running state. The exception being VHOST_F_LOG_ALL, which
202                  * is enabled when the live-migration starts.
203                  */
204                 if ((dev->features ^ features) & ~(1ULL << VHOST_F_LOG_ALL)) {
205                         RTE_LOG(ERR, VHOST_CONFIG,
206                                 "(%d) features changed while device is running.\n",
207                                 dev->vid);
208                         return -1;
209                 }
210
211                 if (dev->notify_ops->features_changed)
212                         dev->notify_ops->features_changed(dev->vid, features);
213         }
214
215         dev->features = features;
216         if (dev->features &
217                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
218                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
219         } else {
220                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
221         }
222         VHOST_LOG_DEBUG(VHOST_CONFIG,
223                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
224                 dev->vid,
225                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
226                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
227
228         if ((dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET) &&
229             !(dev->features & (1ULL << VIRTIO_NET_F_MQ))) {
230                 /*
231                  * Remove all but first queue pair if MQ hasn't been
232                  * negotiated. This is safe because the device is not
233                  * running at this stage.
234                  */
235                 while (dev->nr_vring > 2) {
236                         struct vhost_virtqueue *vq;
237
238                         vq = dev->virtqueue[--dev->nr_vring];
239                         if (!vq)
240                                 continue;
241
242                         dev->virtqueue[dev->nr_vring] = NULL;
243                         cleanup_vq(vq, 1);
244                         free_vq(dev, vq);
245                 }
246         }
247
248         did = dev->vdpa_dev_id;
249         vdpa_dev = rte_vdpa_get_device(did);
250         if (vdpa_dev && vdpa_dev->ops->set_features)
251                 vdpa_dev->ops->set_features(dev->vid);
252
253         return 0;
254 }
255
256 /*
257  * The virtio device sends us the size of the descriptor ring.
258  */
259 static int
260 vhost_user_set_vring_num(struct virtio_net *dev,
261                          struct VhostUserMsg *msg)
262 {
263         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
264
265         vq->size = msg->payload.state.num;
266
267         /* VIRTIO 1.0, 2.4 Virtqueues says:
268          *
269          *   Queue Size value is always a power of 2. The maximum Queue Size
270          *   value is 32768.
271          */
272         if ((vq->size & (vq->size - 1)) || vq->size > 32768) {
273                 RTE_LOG(ERR, VHOST_CONFIG,
274                         "invalid virtqueue size %u\n", vq->size);
275                 return -1;
276         }
277
278         if (dev->dequeue_zero_copy) {
279                 vq->nr_zmbuf = 0;
280                 vq->last_zmbuf_idx = 0;
281                 vq->zmbuf_size = vq->size;
282                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
283                                          sizeof(struct zcopy_mbuf), 0);
284                 if (vq->zmbufs == NULL) {
285                         RTE_LOG(WARNING, VHOST_CONFIG,
286                                 "failed to allocate mem for zero copy; "
287                                 "zero copy is force disabled\n");
288                         dev->dequeue_zero_copy = 0;
289                 }
290                 TAILQ_INIT(&vq->zmbuf_list);
291         }
292
293         if (vq_is_packed(dev)) {
294                 vq->shadow_used_packed = rte_malloc(NULL,
295                                 vq->size *
296                                 sizeof(struct vring_used_elem_packed),
297                                 RTE_CACHE_LINE_SIZE);
298                 if (!vq->shadow_used_packed) {
299                         RTE_LOG(ERR, VHOST_CONFIG,
300                                         "failed to allocate memory for shadow used ring.\n");
301                         return -1;
302                 }
303
304         } else {
305                 vq->shadow_used_split = rte_malloc(NULL,
306                                 vq->size * sizeof(struct vring_used_elem),
307                                 RTE_CACHE_LINE_SIZE);
308                 if (!vq->shadow_used_split) {
309                         RTE_LOG(ERR, VHOST_CONFIG,
310                                         "failed to allocate memory for shadow used ring.\n");
311                         return -1;
312                 }
313         }
314
315         vq->batch_copy_elems = rte_malloc(NULL,
316                                 vq->size * sizeof(struct batch_copy_elem),
317                                 RTE_CACHE_LINE_SIZE);
318         if (!vq->batch_copy_elems) {
319                 RTE_LOG(ERR, VHOST_CONFIG,
320                         "failed to allocate memory for batching copy.\n");
321                 return -1;
322         }
323
324         return 0;
325 }
326
327 /*
328  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
329  * same numa node as the memory of vring descriptor.
330  */
331 #ifdef RTE_LIBRTE_VHOST_NUMA
332 static struct virtio_net*
333 numa_realloc(struct virtio_net *dev, int index)
334 {
335         int oldnode, newnode;
336         struct virtio_net *old_dev;
337         struct vhost_virtqueue *old_vq, *vq;
338         struct zcopy_mbuf *new_zmbuf;
339         struct vring_used_elem *new_shadow_used_split;
340         struct vring_used_elem_packed *new_shadow_used_packed;
341         struct batch_copy_elem *new_batch_copy_elems;
342         int ret;
343
344         old_dev = dev;
345         vq = old_vq = dev->virtqueue[index];
346
347         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
348                             MPOL_F_NODE | MPOL_F_ADDR);
349
350         /* check if we need to reallocate vq */
351         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
352                              MPOL_F_NODE | MPOL_F_ADDR);
353         if (ret) {
354                 RTE_LOG(ERR, VHOST_CONFIG,
355                         "Unable to get vq numa information.\n");
356                 return dev;
357         }
358         if (oldnode != newnode) {
359                 RTE_LOG(INFO, VHOST_CONFIG,
360                         "reallocate vq from %d to %d node\n", oldnode, newnode);
361                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
362                 if (!vq)
363                         return dev;
364
365                 memcpy(vq, old_vq, sizeof(*vq));
366                 TAILQ_INIT(&vq->zmbuf_list);
367
368                 if (dev->dequeue_zero_copy) {
369                         new_zmbuf = rte_malloc_socket(NULL, vq->zmbuf_size *
370                                         sizeof(struct zcopy_mbuf), 0, newnode);
371                         if (new_zmbuf) {
372                                 rte_free(vq->zmbufs);
373                                 vq->zmbufs = new_zmbuf;
374                         }
375                 }
376
377                 if (vq_is_packed(dev)) {
378                         new_shadow_used_packed = rte_malloc_socket(NULL,
379                                         vq->size *
380                                         sizeof(struct vring_used_elem_packed),
381                                         RTE_CACHE_LINE_SIZE,
382                                         newnode);
383                         if (new_shadow_used_packed) {
384                                 rte_free(vq->shadow_used_packed);
385                                 vq->shadow_used_packed = new_shadow_used_packed;
386                         }
387                 } else {
388                         new_shadow_used_split = rte_malloc_socket(NULL,
389                                         vq->size *
390                                         sizeof(struct vring_used_elem),
391                                         RTE_CACHE_LINE_SIZE,
392                                         newnode);
393                         if (new_shadow_used_split) {
394                                 rte_free(vq->shadow_used_split);
395                                 vq->shadow_used_split = new_shadow_used_split;
396                         }
397                 }
398
399                 new_batch_copy_elems = rte_malloc_socket(NULL,
400                         vq->size * sizeof(struct batch_copy_elem),
401                         RTE_CACHE_LINE_SIZE,
402                         newnode);
403                 if (new_batch_copy_elems) {
404                         rte_free(vq->batch_copy_elems);
405                         vq->batch_copy_elems = new_batch_copy_elems;
406                 }
407
408                 rte_free(old_vq);
409         }
410
411         /* check if we need to reallocate dev */
412         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
413                             MPOL_F_NODE | MPOL_F_ADDR);
414         if (ret) {
415                 RTE_LOG(ERR, VHOST_CONFIG,
416                         "Unable to get dev numa information.\n");
417                 goto out;
418         }
419         if (oldnode != newnode) {
420                 RTE_LOG(INFO, VHOST_CONFIG,
421                         "reallocate dev from %d to %d node\n",
422                         oldnode, newnode);
423                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
424                 if (!dev) {
425                         dev = old_dev;
426                         goto out;
427                 }
428
429                 memcpy(dev, old_dev, sizeof(*dev));
430                 rte_free(old_dev);
431         }
432
433 out:
434         dev->virtqueue[index] = vq;
435         vhost_devices[dev->vid] = dev;
436
437         if (old_vq != vq)
438                 vhost_user_iotlb_init(dev, index);
439
440         return dev;
441 }
442 #else
443 static struct virtio_net*
444 numa_realloc(struct virtio_net *dev, int index __rte_unused)
445 {
446         return dev;
447 }
448 #endif
449
450 /* Converts QEMU virtual address to Vhost virtual address. */
451 static uint64_t
452 qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
453 {
454         struct rte_vhost_mem_region *r;
455         uint32_t i;
456
457         /* Find the region where the address lives. */
458         for (i = 0; i < dev->mem->nregions; i++) {
459                 r = &dev->mem->regions[i];
460
461                 if (qva >= r->guest_user_addr &&
462                     qva <  r->guest_user_addr + r->size) {
463
464                         if (unlikely(*len > r->guest_user_addr + r->size - qva))
465                                 *len = r->guest_user_addr + r->size - qva;
466
467                         return qva - r->guest_user_addr +
468                                r->host_user_addr;
469                 }
470         }
471         *len = 0;
472
473         return 0;
474 }
475
476
477 /*
478  * Converts ring address to Vhost virtual address.
479  * If IOMMU is enabled, the ring address is a guest IO virtual address,
480  * else it is a QEMU virtual address.
481  */
482 static uint64_t
483 ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
484                 uint64_t ra, uint64_t *size)
485 {
486         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
487                 uint64_t vva;
488
489                 vva = vhost_user_iotlb_cache_find(vq, ra,
490                                         size, VHOST_ACCESS_RW);
491                 if (!vva)
492                         vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
493
494                 return vva;
495         }
496
497         return qva_to_vva(dev, ra, size);
498 }
499
500 static struct virtio_net *
501 translate_ring_addresses(struct virtio_net *dev, int vq_index)
502 {
503         struct vhost_virtqueue *vq = dev->virtqueue[vq_index];
504         struct vhost_vring_addr *addr = &vq->ring_addrs;
505         uint64_t len;
506
507         if (vq_is_packed(dev)) {
508                 len = sizeof(struct vring_packed_desc) * vq->size;
509                 vq->desc_packed = (struct vring_packed_desc *)(uintptr_t)
510                         ring_addr_to_vva(dev, vq, addr->desc_user_addr, &len);
511                 vq->log_guest_addr = 0;
512                 if (vq->desc_packed == NULL ||
513                                 len != sizeof(struct vring_packed_desc) *
514                                 vq->size) {
515                         RTE_LOG(DEBUG, VHOST_CONFIG,
516                                 "(%d) failed to map desc_packed ring.\n",
517                                 dev->vid);
518                         return dev;
519                 }
520
521                 dev = numa_realloc(dev, vq_index);
522                 vq = dev->virtqueue[vq_index];
523                 addr = &vq->ring_addrs;
524
525                 len = sizeof(struct vring_packed_desc_event);
526                 vq->driver_event = (struct vring_packed_desc_event *)
527                                         (uintptr_t)ring_addr_to_vva(dev,
528                                         vq, addr->avail_user_addr, &len);
529                 if (vq->driver_event == NULL ||
530                                 len != sizeof(struct vring_packed_desc_event)) {
531                         RTE_LOG(DEBUG, VHOST_CONFIG,
532                                 "(%d) failed to find driver area address.\n",
533                                 dev->vid);
534                         return dev;
535                 }
536
537                 len = sizeof(struct vring_packed_desc_event);
538                 vq->device_event = (struct vring_packed_desc_event *)
539                                         (uintptr_t)ring_addr_to_vva(dev,
540                                         vq, addr->used_user_addr, &len);
541                 if (vq->device_event == NULL ||
542                                 len != sizeof(struct vring_packed_desc_event)) {
543                         RTE_LOG(DEBUG, VHOST_CONFIG,
544                                 "(%d) failed to find device area address.\n",
545                                 dev->vid);
546                         return dev;
547                 }
548
549                 return dev;
550         }
551
552         /* The addresses are converted from QEMU virtual to Vhost virtual. */
553         if (vq->desc && vq->avail && vq->used)
554                 return dev;
555
556         len = sizeof(struct vring_desc) * vq->size;
557         vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev,
558                         vq, addr->desc_user_addr, &len);
559         if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
560                 RTE_LOG(DEBUG, VHOST_CONFIG,
561                         "(%d) failed to map desc ring.\n",
562                         dev->vid);
563                 return dev;
564         }
565
566         dev = numa_realloc(dev, vq_index);
567         vq = dev->virtqueue[vq_index];
568         addr = &vq->ring_addrs;
569
570         len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
571         vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev,
572                         vq, addr->avail_user_addr, &len);
573         if (vq->avail == 0 ||
574                         len != sizeof(struct vring_avail) +
575                         sizeof(uint16_t) * vq->size) {
576                 RTE_LOG(DEBUG, VHOST_CONFIG,
577                         "(%d) failed to map avail ring.\n",
578                         dev->vid);
579                 return dev;
580         }
581
582         len = sizeof(struct vring_used) +
583                 sizeof(struct vring_used_elem) * vq->size;
584         vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev,
585                         vq, addr->used_user_addr, &len);
586         if (vq->used == 0 || len != sizeof(struct vring_used) +
587                         sizeof(struct vring_used_elem) * vq->size) {
588                 RTE_LOG(DEBUG, VHOST_CONFIG,
589                         "(%d) failed to map used ring.\n",
590                         dev->vid);
591                 return dev;
592         }
593
594         if (vq->last_used_idx != vq->used->idx) {
595                 RTE_LOG(WARNING, VHOST_CONFIG,
596                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
597                         "some packets maybe resent for Tx and dropped for Rx\n",
598                         vq->last_used_idx, vq->used->idx);
599                 vq->last_used_idx  = vq->used->idx;
600                 vq->last_avail_idx = vq->used->idx;
601         }
602
603         vq->log_guest_addr = addr->log_guest_addr;
604
605         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
606                         dev->vid, vq->desc);
607         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
608                         dev->vid, vq->avail);
609         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
610                         dev->vid, vq->used);
611         VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
612                         dev->vid, vq->log_guest_addr);
613
614         return dev;
615 }
616
617 /*
618  * The virtio device sends us the desc, used and avail ring addresses.
619  * This function then converts these to our address space.
620  */
621 static int
622 vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg)
623 {
624         struct vhost_virtqueue *vq;
625         struct vhost_vring_addr *addr = &msg->payload.addr;
626         struct virtio_net *dev = *pdev;
627
628         if (dev->mem == NULL)
629                 return -1;
630
631         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
632         vq = dev->virtqueue[msg->payload.addr.index];
633
634         /*
635          * Rings addresses should not be interpreted as long as the ring is not
636          * started and enabled
637          */
638         memcpy(&vq->ring_addrs, addr, sizeof(*addr));
639
640         vring_invalidate(dev, vq);
641
642         if (vq->enabled && (dev->features &
643                                 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
644                 dev = translate_ring_addresses(dev, msg->payload.addr.index);
645                 if (!dev)
646                         return -1;
647
648                 *pdev = dev;
649         }
650
651         return 0;
652 }
653
654 /*
655  * The virtio device sends us the available ring last used index.
656  */
657 static int
658 vhost_user_set_vring_base(struct virtio_net *dev,
659                           struct VhostUserMsg *msg)
660 {
661         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
662                         msg->payload.state.num;
663         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
664                         msg->payload.state.num;
665
666         return 0;
667 }
668
669 static int
670 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
671                    uint64_t host_phys_addr, uint64_t size)
672 {
673         struct guest_page *page, *last_page;
674
675         if (dev->nr_guest_pages == dev->max_guest_pages) {
676                 dev->max_guest_pages *= 2;
677                 dev->guest_pages = realloc(dev->guest_pages,
678                                         dev->max_guest_pages * sizeof(*page));
679                 if (!dev->guest_pages) {
680                         RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
681                         return -1;
682                 }
683         }
684
685         if (dev->nr_guest_pages > 0) {
686                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
687                 /* merge if the two pages are continuous */
688                 if (host_phys_addr == last_page->host_phys_addr +
689                                       last_page->size) {
690                         last_page->size += size;
691                         return 0;
692                 }
693         }
694
695         page = &dev->guest_pages[dev->nr_guest_pages++];
696         page->guest_phys_addr = guest_phys_addr;
697         page->host_phys_addr  = host_phys_addr;
698         page->size = size;
699
700         return 0;
701 }
702
703 static int
704 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
705                 uint64_t page_size)
706 {
707         uint64_t reg_size = reg->size;
708         uint64_t host_user_addr  = reg->host_user_addr;
709         uint64_t guest_phys_addr = reg->guest_phys_addr;
710         uint64_t host_phys_addr;
711         uint64_t size;
712
713         host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)host_user_addr);
714         size = page_size - (guest_phys_addr & (page_size - 1));
715         size = RTE_MIN(size, reg_size);
716
717         if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0)
718                 return -1;
719
720         host_user_addr  += size;
721         guest_phys_addr += size;
722         reg_size -= size;
723
724         while (reg_size > 0) {
725                 size = RTE_MIN(reg_size, page_size);
726                 host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t)
727                                                   host_user_addr);
728                 if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
729                                 size) < 0)
730                         return -1;
731
732                 host_user_addr  += size;
733                 guest_phys_addr += size;
734                 reg_size -= size;
735         }
736
737         return 0;
738 }
739
740 #ifdef RTE_LIBRTE_VHOST_DEBUG
741 /* TODO: enable it only in debug mode? */
742 static void
743 dump_guest_pages(struct virtio_net *dev)
744 {
745         uint32_t i;
746         struct guest_page *page;
747
748         for (i = 0; i < dev->nr_guest_pages; i++) {
749                 page = &dev->guest_pages[i];
750
751                 RTE_LOG(INFO, VHOST_CONFIG,
752                         "guest physical page region %u\n"
753                         "\t guest_phys_addr: %" PRIx64 "\n"
754                         "\t host_phys_addr : %" PRIx64 "\n"
755                         "\t size           : %" PRIx64 "\n",
756                         i,
757                         page->guest_phys_addr,
758                         page->host_phys_addr,
759                         page->size);
760         }
761 }
762 #else
763 #define dump_guest_pages(dev)
764 #endif
765
766 static bool
767 vhost_memory_changed(struct VhostUserMemory *new,
768                      struct rte_vhost_memory *old)
769 {
770         uint32_t i;
771
772         if (new->nregions != old->nregions)
773                 return true;
774
775         for (i = 0; i < new->nregions; ++i) {
776                 VhostUserMemoryRegion *new_r = &new->regions[i];
777                 struct rte_vhost_mem_region *old_r = &old->regions[i];
778
779                 if (new_r->guest_phys_addr != old_r->guest_phys_addr)
780                         return true;
781                 if (new_r->memory_size != old_r->size)
782                         return true;
783                 if (new_r->userspace_addr != old_r->guest_user_addr)
784                         return true;
785         }
786
787         return false;
788 }
789
790 static int
791 vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg)
792 {
793         struct virtio_net *dev = *pdev;
794         struct VhostUserMemory memory = msg->payload.memory;
795         struct rte_vhost_mem_region *reg;
796         void *mmap_addr;
797         uint64_t mmap_size;
798         uint64_t mmap_offset;
799         uint64_t alignment;
800         uint32_t i;
801         int populate;
802         int fd;
803
804         if (memory.nregions > VHOST_MEMORY_MAX_NREGIONS) {
805                 RTE_LOG(ERR, VHOST_CONFIG,
806                         "too many memory regions (%u)\n", memory.nregions);
807                 return -1;
808         }
809
810         if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) {
811                 RTE_LOG(INFO, VHOST_CONFIG,
812                         "(%d) memory regions not changed\n", dev->vid);
813
814                 for (i = 0; i < memory.nregions; i++)
815                         close(msg->fds[i]);
816
817                 return 0;
818         }
819
820         if (dev->mem) {
821                 free_mem_region(dev);
822                 rte_free(dev->mem);
823                 dev->mem = NULL;
824         }
825
826         /* Flush IOTLB cache as previous HVAs are now invalid */
827         if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
828                 for (i = 0; i < dev->nr_vring; i++)
829                         vhost_user_iotlb_flush_all(dev->virtqueue[i]);
830
831         dev->nr_guest_pages = 0;
832         if (!dev->guest_pages) {
833                 dev->max_guest_pages = 8;
834                 dev->guest_pages = malloc(dev->max_guest_pages *
835                                                 sizeof(struct guest_page));
836                 if (dev->guest_pages == NULL) {
837                         RTE_LOG(ERR, VHOST_CONFIG,
838                                 "(%d) failed to allocate memory "
839                                 "for dev->guest_pages\n",
840                                 dev->vid);
841                         return -1;
842                 }
843         }
844
845         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
846                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
847         if (dev->mem == NULL) {
848                 RTE_LOG(ERR, VHOST_CONFIG,
849                         "(%d) failed to allocate memory for dev->mem\n",
850                         dev->vid);
851                 return -1;
852         }
853         dev->mem->nregions = memory.nregions;
854
855         for (i = 0; i < memory.nregions; i++) {
856                 fd  = msg->fds[i];
857                 reg = &dev->mem->regions[i];
858
859                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
860                 reg->guest_user_addr = memory.regions[i].userspace_addr;
861                 reg->size            = memory.regions[i].memory_size;
862                 reg->fd              = fd;
863
864                 mmap_offset = memory.regions[i].mmap_offset;
865
866                 /* Check for memory_size + mmap_offset overflow */
867                 if (mmap_offset >= -reg->size) {
868                         RTE_LOG(ERR, VHOST_CONFIG,
869                                 "mmap_offset (%#"PRIx64") and memory_size "
870                                 "(%#"PRIx64") overflow\n",
871                                 mmap_offset, reg->size);
872                         goto err_mmap;
873                 }
874
875                 mmap_size = reg->size + mmap_offset;
876
877                 /* mmap() without flag of MAP_ANONYMOUS, should be called
878                  * with length argument aligned with hugepagesz at older
879                  * longterm version Linux, like 2.6.32 and 3.2.72, or
880                  * mmap() will fail with EINVAL.
881                  *
882                  * to avoid failure, make sure in caller to keep length
883                  * aligned.
884                  */
885                 alignment = get_blk_size(fd);
886                 if (alignment == (uint64_t)-1) {
887                         RTE_LOG(ERR, VHOST_CONFIG,
888                                 "couldn't get hugepage size through fstat\n");
889                         goto err_mmap;
890                 }
891                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
892
893                 populate = (dev->dequeue_zero_copy) ? MAP_POPULATE : 0;
894                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
895                                  MAP_SHARED | populate, fd, 0);
896
897                 if (mmap_addr == MAP_FAILED) {
898                         RTE_LOG(ERR, VHOST_CONFIG,
899                                 "mmap region %u failed.\n", i);
900                         goto err_mmap;
901                 }
902
903                 reg->mmap_addr = mmap_addr;
904                 reg->mmap_size = mmap_size;
905                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
906                                       mmap_offset;
907
908                 if (dev->dequeue_zero_copy)
909                         if (add_guest_pages(dev, reg, alignment) < 0) {
910                                 RTE_LOG(ERR, VHOST_CONFIG,
911                                         "adding guest pages to region %u failed.\n",
912                                         i);
913                                 goto err_mmap;
914                         }
915
916                 RTE_LOG(INFO, VHOST_CONFIG,
917                         "guest memory region %u, size: 0x%" PRIx64 "\n"
918                         "\t guest physical addr: 0x%" PRIx64 "\n"
919                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
920                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
921                         "\t mmap addr : 0x%" PRIx64 "\n"
922                         "\t mmap size : 0x%" PRIx64 "\n"
923                         "\t mmap align: 0x%" PRIx64 "\n"
924                         "\t mmap off  : 0x%" PRIx64 "\n",
925                         i, reg->size,
926                         reg->guest_phys_addr,
927                         reg->guest_user_addr,
928                         reg->host_user_addr,
929                         (uint64_t)(uintptr_t)mmap_addr,
930                         mmap_size,
931                         alignment,
932                         mmap_offset);
933         }
934
935         for (i = 0; i < dev->nr_vring; i++) {
936                 struct vhost_virtqueue *vq = dev->virtqueue[i];
937
938                 if (vq->desc || vq->avail || vq->used) {
939                         /*
940                          * If the memory table got updated, the ring addresses
941                          * need to be translated again as virtual addresses have
942                          * changed.
943                          */
944                         vring_invalidate(dev, vq);
945
946                         dev = translate_ring_addresses(dev, i);
947                         if (!dev)
948                                 return -1;
949
950                         *pdev = dev;
951                 }
952         }
953
954         dump_guest_pages(dev);
955
956         return 0;
957
958 err_mmap:
959         free_mem_region(dev);
960         rte_free(dev->mem);
961         dev->mem = NULL;
962         return -1;
963 }
964
965 static bool
966 vq_is_ready(struct virtio_net *dev, struct vhost_virtqueue *vq)
967 {
968         bool rings_ok;
969
970         if (!vq)
971                 return false;
972
973         if (vq_is_packed(dev))
974                 rings_ok = !!vq->desc_packed;
975         else
976                 rings_ok = vq->desc && vq->avail && vq->used;
977
978         return rings_ok &&
979                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
980                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
981 }
982
983 static int
984 virtio_is_ready(struct virtio_net *dev)
985 {
986         struct vhost_virtqueue *vq;
987         uint32_t i;
988
989         if (dev->nr_vring == 0)
990                 return 0;
991
992         for (i = 0; i < dev->nr_vring; i++) {
993                 vq = dev->virtqueue[i];
994
995                 if (!vq_is_ready(dev, vq))
996                         return 0;
997         }
998
999         RTE_LOG(INFO, VHOST_CONFIG,
1000                 "virtio is now ready for processing.\n");
1001         return 1;
1002 }
1003
1004 static void
1005 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *msg)
1006 {
1007         struct vhost_vring_file file;
1008         struct vhost_virtqueue *vq;
1009
1010         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1011         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1012                 file.fd = VIRTIO_INVALID_EVENTFD;
1013         else
1014                 file.fd = msg->fds[0];
1015         RTE_LOG(INFO, VHOST_CONFIG,
1016                 "vring call idx:%d file:%d\n", file.index, file.fd);
1017
1018         vq = dev->virtqueue[file.index];
1019         if (vq->callfd >= 0)
1020                 close(vq->callfd);
1021
1022         vq->callfd = file.fd;
1023 }
1024
1025 static int
1026 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg)
1027 {
1028         struct vhost_vring_file file;
1029         struct vhost_virtqueue *vq;
1030         struct virtio_net *dev = *pdev;
1031
1032         file.index = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1033         if (msg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
1034                 file.fd = VIRTIO_INVALID_EVENTFD;
1035         else
1036                 file.fd = msg->fds[0];
1037         RTE_LOG(INFO, VHOST_CONFIG,
1038                 "vring kick idx:%d file:%d\n", file.index, file.fd);
1039
1040         /* Interpret ring addresses only when ring is started. */
1041         dev = translate_ring_addresses(dev, file.index);
1042         if (!dev)
1043                 return -1;
1044
1045         *pdev = dev;
1046
1047         vq = dev->virtqueue[file.index];
1048
1049         /*
1050          * When VHOST_USER_F_PROTOCOL_FEATURES is not negotiated,
1051          * the ring starts already enabled. Otherwise, it is enabled via
1052          * the SET_VRING_ENABLE message.
1053          */
1054         if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
1055                 vq->enabled = 1;
1056
1057         if (vq->kickfd >= 0)
1058                 close(vq->kickfd);
1059         vq->kickfd = file.fd;
1060         return 0;
1061 }
1062
1063 static void
1064 free_zmbufs(struct vhost_virtqueue *vq)
1065 {
1066         struct zcopy_mbuf *zmbuf, *next;
1067
1068         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1069              zmbuf != NULL; zmbuf = next) {
1070                 next = TAILQ_NEXT(zmbuf, next);
1071
1072                 rte_pktmbuf_free(zmbuf->mbuf);
1073                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1074         }
1075
1076         rte_free(vq->zmbufs);
1077 }
1078
1079 /*
1080  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
1081  */
1082 static int
1083 vhost_user_get_vring_base(struct virtio_net *dev,
1084                           struct VhostUserMsg *msg)
1085 {
1086         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
1087
1088         /* We have to stop the queue (virtio) if it is running. */
1089         vhost_destroy_device_notify(dev);
1090
1091         dev->flags &= ~VIRTIO_DEV_READY;
1092         dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
1093
1094         /* Here we are safe to get the last avail index */
1095         msg->payload.state.num = vq->last_avail_idx;
1096
1097         RTE_LOG(INFO, VHOST_CONFIG,
1098                 "vring base idx:%d file:%d\n", msg->payload.state.index,
1099                 msg->payload.state.num);
1100         /*
1101          * Based on current qemu vhost-user implementation, this message is
1102          * sent and only sent in vhost_vring_stop.
1103          * TODO: cleanup the vring, it isn't usable since here.
1104          */
1105         if (vq->kickfd >= 0)
1106                 close(vq->kickfd);
1107
1108         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
1109
1110         if (vq->callfd >= 0)
1111                 close(vq->callfd);
1112
1113         vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
1114
1115         if (dev->dequeue_zero_copy)
1116                 free_zmbufs(vq);
1117         if (vq_is_packed(dev)) {
1118                 rte_free(vq->shadow_used_packed);
1119                 vq->shadow_used_packed = NULL;
1120         } else {
1121                 rte_free(vq->shadow_used_split);
1122                 vq->shadow_used_split = NULL;
1123         }
1124
1125         rte_free(vq->batch_copy_elems);
1126         vq->batch_copy_elems = NULL;
1127
1128         msg->size = sizeof(msg->payload.state);
1129
1130         return 0;
1131 }
1132
1133 /*
1134  * when virtio queues are ready to work, qemu will send us to
1135  * enable the virtio queue pair.
1136  */
1137 static int
1138 vhost_user_set_vring_enable(struct virtio_net *dev,
1139                             struct VhostUserMsg *msg)
1140 {
1141         int enable = (int)msg->payload.state.num;
1142         int index = (int)msg->payload.state.index;
1143         struct rte_vdpa_device *vdpa_dev;
1144         int did = -1;
1145
1146         RTE_LOG(INFO, VHOST_CONFIG,
1147                 "set queue enable: %d to qp idx: %d\n",
1148                 enable, index);
1149
1150         did = dev->vdpa_dev_id;
1151         vdpa_dev = rte_vdpa_get_device(did);
1152         if (vdpa_dev && vdpa_dev->ops->set_vring_state)
1153                 vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
1154
1155         if (dev->notify_ops->vring_state_changed)
1156                 dev->notify_ops->vring_state_changed(dev->vid,
1157                                 index, enable);
1158
1159         dev->virtqueue[index]->enabled = enable;
1160
1161         return 0;
1162 }
1163
1164 static void
1165 vhost_user_get_protocol_features(struct virtio_net *dev,
1166                                  struct VhostUserMsg *msg)
1167 {
1168         uint64_t features, protocol_features;
1169
1170         rte_vhost_driver_get_features(dev->ifname, &features);
1171         rte_vhost_driver_get_protocol_features(dev->ifname, &protocol_features);
1172
1173         /*
1174          * REPLY_ACK protocol feature is only mandatory for now
1175          * for IOMMU feature. If IOMMU is explicitly disabled by the
1176          * application, disable also REPLY_ACK feature for older buggy
1177          * Qemu versions (from v2.7.0 to v2.9.0).
1178          */
1179         if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
1180                 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK);
1181
1182         msg->payload.u64 = protocol_features;
1183         msg->size = sizeof(msg->payload.u64);
1184 }
1185
1186 static int
1187 vhost_user_set_protocol_features(struct virtio_net *dev,
1188                                  uint64_t protocol_features)
1189 {
1190         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES) {
1191                 RTE_LOG(ERR, VHOST_CONFIG,
1192                         "(%d) received invalid protocol features.\n",
1193                         dev->vid);
1194                 return -1;
1195         }
1196
1197         dev->protocol_features = protocol_features;
1198         return 0;
1199 }
1200
1201 static int
1202 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
1203 {
1204         int fd = msg->fds[0];
1205         uint64_t size, off;
1206         void *addr;
1207
1208         if (fd < 0) {
1209                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
1210                 return -1;
1211         }
1212
1213         if (msg->size != sizeof(VhostUserLog)) {
1214                 RTE_LOG(ERR, VHOST_CONFIG,
1215                         "invalid log base msg size: %"PRId32" != %d\n",
1216                         msg->size, (int)sizeof(VhostUserLog));
1217                 return -1;
1218         }
1219
1220         size = msg->payload.log.mmap_size;
1221         off  = msg->payload.log.mmap_offset;
1222
1223         /* Don't allow mmap_offset to point outside the mmap region */
1224         if (off > size) {
1225                 RTE_LOG(ERR, VHOST_CONFIG,
1226                         "log offset %#"PRIx64" exceeds log size %#"PRIx64"\n",
1227                         off, size);
1228                 return -1;
1229         }
1230
1231         RTE_LOG(INFO, VHOST_CONFIG,
1232                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
1233                 size, off);
1234
1235         /*
1236          * mmap from 0 to workaround a hugepage mmap bug: mmap will
1237          * fail when offset is not page size aligned.
1238          */
1239         addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1240         close(fd);
1241         if (addr == MAP_FAILED) {
1242                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
1243                 return -1;
1244         }
1245
1246         /*
1247          * Free previously mapped log memory on occasionally
1248          * multiple VHOST_USER_SET_LOG_BASE.
1249          */
1250         if (dev->log_addr) {
1251                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
1252         }
1253         dev->log_addr = (uint64_t)(uintptr_t)addr;
1254         dev->log_base = dev->log_addr + off;
1255         dev->log_size = size;
1256
1257         msg->size = sizeof(msg->payload.u64);
1258
1259         return 0;
1260 }
1261
1262 /*
1263  * An rarp packet is constructed and broadcasted to notify switches about
1264  * the new location of the migrated VM, so that packets from outside will
1265  * not be lost after migration.
1266  *
1267  * However, we don't actually "send" a rarp packet here, instead, we set
1268  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
1269  */
1270 static int
1271 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
1272 {
1273         uint8_t *mac = (uint8_t *)&msg->payload.u64;
1274         struct rte_vdpa_device *vdpa_dev;
1275         int did = -1;
1276
1277         RTE_LOG(DEBUG, VHOST_CONFIG,
1278                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
1279                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1280         memcpy(dev->mac.addr_bytes, mac, 6);
1281
1282         /*
1283          * Set the flag to inject a RARP broadcast packet at
1284          * rte_vhost_dequeue_burst().
1285          *
1286          * rte_smp_wmb() is for making sure the mac is copied
1287          * before the flag is set.
1288          */
1289         rte_smp_wmb();
1290         rte_atomic16_set(&dev->broadcast_rarp, 1);
1291         did = dev->vdpa_dev_id;
1292         vdpa_dev = rte_vdpa_get_device(did);
1293         if (vdpa_dev && vdpa_dev->ops->migration_done)
1294                 vdpa_dev->ops->migration_done(dev->vid);
1295
1296         return 0;
1297 }
1298
1299 static int
1300 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
1301 {
1302         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
1303                         msg->payload.u64 > VIRTIO_MAX_MTU) {
1304                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
1305                                 msg->payload.u64);
1306
1307                 return -1;
1308         }
1309
1310         dev->mtu = msg->payload.u64;
1311
1312         return 0;
1313 }
1314
1315 static int
1316 vhost_user_set_req_fd(struct virtio_net *dev, struct VhostUserMsg *msg)
1317 {
1318         int fd = msg->fds[0];
1319
1320         if (fd < 0) {
1321                 RTE_LOG(ERR, VHOST_CONFIG,
1322                                 "Invalid file descriptor for slave channel (%d)\n",
1323                                 fd);
1324                 return -1;
1325         }
1326
1327         dev->slave_req_fd = fd;
1328
1329         return 0;
1330 }
1331
1332 static int
1333 is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
1334 {
1335         struct vhost_vring_addr *ra;
1336         uint64_t start, end;
1337
1338         start = imsg->iova;
1339         end = start + imsg->size;
1340
1341         ra = &vq->ring_addrs;
1342         if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
1343                 return 1;
1344         if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
1345                 return 1;
1346         if (ra->used_user_addr >= start && ra->used_user_addr < end)
1347                 return 1;
1348
1349         return 0;
1350 }
1351
1352 static int
1353 is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
1354                                 struct vhost_iotlb_msg *imsg)
1355 {
1356         uint64_t istart, iend, vstart, vend;
1357
1358         istart = imsg->iova;
1359         iend = istart + imsg->size - 1;
1360
1361         vstart = (uintptr_t)vq->desc;
1362         vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
1363         if (vstart <= iend && istart <= vend)
1364                 return 1;
1365
1366         vstart = (uintptr_t)vq->avail;
1367         vend = vstart + sizeof(struct vring_avail);
1368         vend += sizeof(uint16_t) * vq->size - 1;
1369         if (vstart <= iend && istart <= vend)
1370                 return 1;
1371
1372         vstart = (uintptr_t)vq->used;
1373         vend = vstart + sizeof(struct vring_used);
1374         vend += sizeof(struct vring_used_elem) * vq->size - 1;
1375         if (vstart <= iend && istart <= vend)
1376                 return 1;
1377
1378         return 0;
1379 }
1380
1381 static int
1382 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg)
1383 {
1384         struct virtio_net *dev = *pdev;
1385         struct vhost_iotlb_msg *imsg = &msg->payload.iotlb;
1386         uint16_t i;
1387         uint64_t vva, len;
1388
1389         switch (imsg->type) {
1390         case VHOST_IOTLB_UPDATE:
1391                 len = imsg->size;
1392                 vva = qva_to_vva(dev, imsg->uaddr, &len);
1393                 if (!vva)
1394                         return -1;
1395
1396                 for (i = 0; i < dev->nr_vring; i++) {
1397                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1398
1399                         vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
1400                                         len, imsg->perm);
1401
1402                         if (is_vring_iotlb_update(vq, imsg))
1403                                 *pdev = dev = translate_ring_addresses(dev, i);
1404                 }
1405                 break;
1406         case VHOST_IOTLB_INVALIDATE:
1407                 for (i = 0; i < dev->nr_vring; i++) {
1408                         struct vhost_virtqueue *vq = dev->virtqueue[i];
1409
1410                         vhost_user_iotlb_cache_remove(vq, imsg->iova,
1411                                         imsg->size);
1412
1413                         if (is_vring_iotlb_invalidate(vq, imsg))
1414                                 vring_invalidate(dev, vq);
1415                 }
1416                 break;
1417         default:
1418                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid IOTLB message type (%d)\n",
1419                                 imsg->type);
1420                 return -1;
1421         }
1422
1423         return 0;
1424 }
1425
1426 /* return bytes# of read on success or negative val on failure. */
1427 static int
1428 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
1429 {
1430         int ret;
1431
1432         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
1433                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
1434         if (ret <= 0)
1435                 return ret;
1436
1437         if (msg && msg->size) {
1438                 if (msg->size > sizeof(msg->payload)) {
1439                         RTE_LOG(ERR, VHOST_CONFIG,
1440                                 "invalid msg size: %d\n", msg->size);
1441                         return -1;
1442                 }
1443                 ret = read(sockfd, &msg->payload, msg->size);
1444                 if (ret <= 0)
1445                         return ret;
1446                 if (ret != (int)msg->size) {
1447                         RTE_LOG(ERR, VHOST_CONFIG,
1448                                 "read control message failed\n");
1449                         return -1;
1450                 }
1451         }
1452
1453         return ret;
1454 }
1455
1456 static int
1457 send_vhost_message(int sockfd, struct VhostUserMsg *msg, int *fds, int fd_num)
1458 {
1459         if (!msg)
1460                 return 0;
1461
1462         return send_fd_message(sockfd, (char *)msg,
1463                 VHOST_USER_HDR_SIZE + msg->size, fds, fd_num);
1464 }
1465
1466 static int
1467 send_vhost_reply(int sockfd, struct VhostUserMsg *msg)
1468 {
1469         if (!msg)
1470                 return 0;
1471
1472         msg->flags &= ~VHOST_USER_VERSION_MASK;
1473         msg->flags &= ~VHOST_USER_NEED_REPLY;
1474         msg->flags |= VHOST_USER_VERSION;
1475         msg->flags |= VHOST_USER_REPLY_MASK;
1476
1477         return send_vhost_message(sockfd, msg, NULL, 0);
1478 }
1479
1480 static int
1481 send_vhost_slave_message(struct virtio_net *dev, struct VhostUserMsg *msg,
1482                          int *fds, int fd_num)
1483 {
1484         int ret;
1485
1486         if (msg->flags & VHOST_USER_NEED_REPLY)
1487                 rte_spinlock_lock(&dev->slave_req_lock);
1488
1489         ret = send_vhost_message(dev->slave_req_fd, msg, fds, fd_num);
1490         if (ret < 0 && (msg->flags & VHOST_USER_NEED_REPLY))
1491                 rte_spinlock_unlock(&dev->slave_req_lock);
1492
1493         return ret;
1494 }
1495
1496 /*
1497  * Allocate a queue pair if it hasn't been allocated yet
1498  */
1499 static int
1500 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
1501                         struct VhostUserMsg *msg)
1502 {
1503         uint16_t vring_idx;
1504
1505         switch (msg->request.master) {
1506         case VHOST_USER_SET_VRING_KICK:
1507         case VHOST_USER_SET_VRING_CALL:
1508         case VHOST_USER_SET_VRING_ERR:
1509                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
1510                 break;
1511         case VHOST_USER_SET_VRING_NUM:
1512         case VHOST_USER_SET_VRING_BASE:
1513         case VHOST_USER_SET_VRING_ENABLE:
1514                 vring_idx = msg->payload.state.index;
1515                 break;
1516         case VHOST_USER_SET_VRING_ADDR:
1517                 vring_idx = msg->payload.addr.index;
1518                 break;
1519         default:
1520                 return 0;
1521         }
1522
1523         if (vring_idx >= VHOST_MAX_VRING) {
1524                 RTE_LOG(ERR, VHOST_CONFIG,
1525                         "invalid vring index: %u\n", vring_idx);
1526                 return -1;
1527         }
1528
1529         if (dev->virtqueue[vring_idx])
1530                 return 0;
1531
1532         return alloc_vring_queue(dev, vring_idx);
1533 }
1534
1535 static void
1536 vhost_user_lock_all_queue_pairs(struct virtio_net *dev)
1537 {
1538         unsigned int i = 0;
1539         unsigned int vq_num = 0;
1540
1541         while (vq_num < dev->nr_vring) {
1542                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1543
1544                 if (vq) {
1545                         rte_spinlock_lock(&vq->access_lock);
1546                         vq_num++;
1547                 }
1548                 i++;
1549         }
1550 }
1551
1552 static void
1553 vhost_user_unlock_all_queue_pairs(struct virtio_net *dev)
1554 {
1555         unsigned int i = 0;
1556         unsigned int vq_num = 0;
1557
1558         while (vq_num < dev->nr_vring) {
1559                 struct vhost_virtqueue *vq = dev->virtqueue[i];
1560
1561                 if (vq) {
1562                         rte_spinlock_unlock(&vq->access_lock);
1563                         vq_num++;
1564                 }
1565                 i++;
1566         }
1567 }
1568
1569 int
1570 vhost_user_msg_handler(int vid, int fd)
1571 {
1572         struct virtio_net *dev;
1573         struct VhostUserMsg msg;
1574         struct rte_vdpa_device *vdpa_dev;
1575         int did = -1;
1576         int ret;
1577         int unlock_required = 0;
1578         uint32_t skip_master = 0;
1579
1580         dev = get_device(vid);
1581         if (dev == NULL)
1582                 return -1;
1583
1584         if (!dev->notify_ops) {
1585                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
1586                 if (!dev->notify_ops) {
1587                         RTE_LOG(ERR, VHOST_CONFIG,
1588                                 "failed to get callback ops for driver %s\n",
1589                                 dev->ifname);
1590                         return -1;
1591                 }
1592         }
1593
1594         ret = read_vhost_message(fd, &msg);
1595         if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
1596                 if (ret < 0)
1597                         RTE_LOG(ERR, VHOST_CONFIG,
1598                                 "vhost read message failed\n");
1599                 else if (ret == 0)
1600                         RTE_LOG(INFO, VHOST_CONFIG,
1601                                 "vhost peer closed\n");
1602                 else
1603                         RTE_LOG(ERR, VHOST_CONFIG,
1604                                 "vhost read incorrect message\n");
1605
1606                 return -1;
1607         }
1608
1609         ret = 0;
1610         if (msg.request.master != VHOST_USER_IOTLB_MSG)
1611                 RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1612                         vhost_message_str[msg.request.master]);
1613         else
1614                 RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
1615                         vhost_message_str[msg.request.master]);
1616
1617         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1618         if (ret < 0) {
1619                 RTE_LOG(ERR, VHOST_CONFIG,
1620                         "failed to alloc queue\n");
1621                 return -1;
1622         }
1623
1624         /*
1625          * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE
1626          * and VHOST_USER_RESET_OWNER, since it is sent when virtio stops
1627          * and device is destroyed. destroy_device waits for queues to be
1628          * inactive, so it is safe. Otherwise taking the access_lock
1629          * would cause a dead lock.
1630          */
1631         switch (msg.request.master) {
1632         case VHOST_USER_SET_FEATURES:
1633         case VHOST_USER_SET_PROTOCOL_FEATURES:
1634         case VHOST_USER_SET_OWNER:
1635         case VHOST_USER_SET_MEM_TABLE:
1636         case VHOST_USER_SET_LOG_BASE:
1637         case VHOST_USER_SET_LOG_FD:
1638         case VHOST_USER_SET_VRING_NUM:
1639         case VHOST_USER_SET_VRING_ADDR:
1640         case VHOST_USER_SET_VRING_BASE:
1641         case VHOST_USER_SET_VRING_KICK:
1642         case VHOST_USER_SET_VRING_CALL:
1643         case VHOST_USER_SET_VRING_ERR:
1644         case VHOST_USER_SET_VRING_ENABLE:
1645         case VHOST_USER_SEND_RARP:
1646         case VHOST_USER_NET_SET_MTU:
1647         case VHOST_USER_SET_SLAVE_REQ_FD:
1648                 vhost_user_lock_all_queue_pairs(dev);
1649                 unlock_required = 1;
1650                 break;
1651         default:
1652                 break;
1653
1654         }
1655
1656         if (dev->extern_ops.pre_msg_handle) {
1657                 uint32_t need_reply;
1658
1659                 ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
1660                                 (void *)&msg, &need_reply, &skip_master);
1661                 if (ret < 0)
1662                         goto skip_to_reply;
1663
1664                 if (need_reply)
1665                         send_vhost_reply(fd, &msg);
1666
1667                 if (skip_master)
1668                         goto skip_to_post_handle;
1669         }
1670
1671         switch (msg.request.master) {
1672         case VHOST_USER_GET_FEATURES:
1673                 vhost_user_get_features(dev, &msg);
1674                 send_vhost_reply(fd, &msg);
1675                 break;
1676         case VHOST_USER_SET_FEATURES:
1677                 ret = vhost_user_set_features(dev, msg.payload.u64);
1678                 break;
1679
1680         case VHOST_USER_GET_PROTOCOL_FEATURES:
1681                 vhost_user_get_protocol_features(dev, &msg);
1682                 send_vhost_reply(fd, &msg);
1683                 break;
1684         case VHOST_USER_SET_PROTOCOL_FEATURES:
1685                 ret = vhost_user_set_protocol_features(dev, msg.payload.u64);
1686                 break;
1687
1688         case VHOST_USER_SET_OWNER:
1689                 ret = vhost_user_set_owner();
1690                 break;
1691         case VHOST_USER_RESET_OWNER:
1692                 ret = vhost_user_reset_owner(dev);
1693                 break;
1694
1695         case VHOST_USER_SET_MEM_TABLE:
1696                 ret = vhost_user_set_mem_table(&dev, &msg);
1697                 break;
1698
1699         case VHOST_USER_SET_LOG_BASE:
1700                 ret = vhost_user_set_log_base(dev, &msg);
1701                 if (ret)
1702                         goto skip_to_reply;
1703                 /* it needs a reply */
1704                 send_vhost_reply(fd, &msg);
1705                 break;
1706         case VHOST_USER_SET_LOG_FD:
1707                 close(msg.fds[0]);
1708                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1709                 break;
1710
1711         case VHOST_USER_SET_VRING_NUM:
1712                 ret = vhost_user_set_vring_num(dev, &msg);
1713                 break;
1714         case VHOST_USER_SET_VRING_ADDR:
1715                 ret = vhost_user_set_vring_addr(&dev, &msg);
1716                 break;
1717         case VHOST_USER_SET_VRING_BASE:
1718                 ret = vhost_user_set_vring_base(dev, &msg);
1719                 break;
1720
1721         case VHOST_USER_GET_VRING_BASE:
1722                 ret = vhost_user_get_vring_base(dev, &msg);
1723                 if (ret)
1724                         goto skip_to_reply;
1725                 send_vhost_reply(fd, &msg);
1726                 break;
1727
1728         case VHOST_USER_SET_VRING_KICK:
1729                 ret = vhost_user_set_vring_kick(&dev, &msg);
1730                 break;
1731         case VHOST_USER_SET_VRING_CALL:
1732                 vhost_user_set_vring_call(dev, &msg);
1733                 break;
1734
1735         case VHOST_USER_SET_VRING_ERR:
1736                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1737                         close(msg.fds[0]);
1738                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1739                 break;
1740
1741         case VHOST_USER_GET_QUEUE_NUM:
1742                 vhost_user_get_queue_num(dev, &msg);
1743                 send_vhost_reply(fd, &msg);
1744                 break;
1745
1746         case VHOST_USER_SET_VRING_ENABLE:
1747                 ret = vhost_user_set_vring_enable(dev, &msg);
1748                 break;
1749         case VHOST_USER_SEND_RARP:
1750                 ret = vhost_user_send_rarp(dev, &msg);
1751                 break;
1752
1753         case VHOST_USER_NET_SET_MTU:
1754                 ret = vhost_user_net_set_mtu(dev, &msg);
1755                 break;
1756
1757         case VHOST_USER_SET_SLAVE_REQ_FD:
1758                 ret = vhost_user_set_req_fd(dev, &msg);
1759                 break;
1760
1761         case VHOST_USER_IOTLB_MSG:
1762                 ret = vhost_user_iotlb_msg(&dev, &msg);
1763                 break;
1764
1765         default:
1766                 ret = -1;
1767                 break;
1768         }
1769
1770 skip_to_post_handle:
1771         if (!ret && dev->extern_ops.post_msg_handle) {
1772                 uint32_t need_reply;
1773
1774                 ret = (*dev->extern_ops.post_msg_handle)(
1775                                 dev->vid, (void *)&msg, &need_reply);
1776                 if (ret < 0)
1777                         goto skip_to_reply;
1778
1779                 if (need_reply)
1780                         send_vhost_reply(fd, &msg);
1781         }
1782
1783 skip_to_reply:
1784         if (unlock_required)
1785                 vhost_user_unlock_all_queue_pairs(dev);
1786
1787         if (msg.flags & VHOST_USER_NEED_REPLY) {
1788                 msg.payload.u64 = !!ret;
1789                 msg.size = sizeof(msg.payload.u64);
1790                 send_vhost_reply(fd, &msg);
1791         } else if (ret) {
1792                 RTE_LOG(ERR, VHOST_CONFIG,
1793                         "vhost message handling failed.\n");
1794                 return -1;
1795         }
1796
1797         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1798                 dev->flags |= VIRTIO_DEV_READY;
1799
1800                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1801                         if (dev->dequeue_zero_copy) {
1802                                 RTE_LOG(INFO, VHOST_CONFIG,
1803                                                 "dequeue zero copy is enabled\n");
1804                         }
1805
1806                         if (dev->notify_ops->new_device(dev->vid) == 0)
1807                                 dev->flags |= VIRTIO_DEV_RUNNING;
1808                 }
1809         }
1810
1811         did = dev->vdpa_dev_id;
1812         vdpa_dev = rte_vdpa_get_device(did);
1813         if (vdpa_dev && virtio_is_ready(dev) &&
1814                         !(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
1815                         msg.request.master == VHOST_USER_SET_VRING_ENABLE) {
1816                 if (vdpa_dev->ops->dev_conf)
1817                         vdpa_dev->ops->dev_conf(dev->vid);
1818                 dev->flags |= VIRTIO_DEV_VDPA_CONFIGURED;
1819                 if (vhost_user_host_notifier_ctrl(dev->vid, true) != 0) {
1820                         RTE_LOG(INFO, VHOST_CONFIG,
1821                                 "(%d) software relay is used for vDPA, performance may be low.\n",
1822                                 dev->vid);
1823                 }
1824         }
1825
1826         return 0;
1827 }
1828
1829 static int process_slave_message_reply(struct virtio_net *dev,
1830                                        const struct VhostUserMsg *msg)
1831 {
1832         struct VhostUserMsg msg_reply;
1833         int ret;
1834
1835         if ((msg->flags & VHOST_USER_NEED_REPLY) == 0)
1836                 return 0;
1837
1838         if (read_vhost_message(dev->slave_req_fd, &msg_reply) < 0) {
1839                 ret = -1;
1840                 goto out;
1841         }
1842
1843         if (msg_reply.request.slave != msg->request.slave) {
1844                 RTE_LOG(ERR, VHOST_CONFIG,
1845                         "Received unexpected msg type (%u), expected %u\n",
1846                         msg_reply.request.slave, msg->request.slave);
1847                 ret = -1;
1848                 goto out;
1849         }
1850
1851         ret = msg_reply.payload.u64 ? -1 : 0;
1852
1853 out:
1854         rte_spinlock_unlock(&dev->slave_req_lock);
1855         return ret;
1856 }
1857
1858 int
1859 vhost_user_iotlb_miss(struct virtio_net *dev, uint64_t iova, uint8_t perm)
1860 {
1861         int ret;
1862         struct VhostUserMsg msg = {
1863                 .request.slave = VHOST_USER_SLAVE_IOTLB_MSG,
1864                 .flags = VHOST_USER_VERSION,
1865                 .size = sizeof(msg.payload.iotlb),
1866                 .payload.iotlb = {
1867                         .iova = iova,
1868                         .perm = perm,
1869                         .type = VHOST_IOTLB_MISS,
1870                 },
1871         };
1872
1873         ret = send_vhost_message(dev->slave_req_fd, &msg, NULL, 0);
1874         if (ret < 0) {
1875                 RTE_LOG(ERR, VHOST_CONFIG,
1876                                 "Failed to send IOTLB miss message (%d)\n",
1877                                 ret);
1878                 return ret;
1879         }
1880
1881         return 0;
1882 }
1883
1884 static int vhost_user_slave_set_vring_host_notifier(struct virtio_net *dev,
1885                                                     int index, int fd,
1886                                                     uint64_t offset,
1887                                                     uint64_t size)
1888 {
1889         int *fdp = NULL;
1890         size_t fd_num = 0;
1891         int ret;
1892         struct VhostUserMsg msg = {
1893                 .request.slave = VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG,
1894                 .flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY,
1895                 .size = sizeof(msg.payload.area),
1896                 .payload.area = {
1897                         .u64 = index & VHOST_USER_VRING_IDX_MASK,
1898                         .size = size,
1899                         .offset = offset,
1900                 },
1901         };
1902
1903         if (fd < 0)
1904                 msg.payload.area.u64 |= VHOST_USER_VRING_NOFD_MASK;
1905         else {
1906                 fdp = &fd;
1907                 fd_num = 1;
1908         }
1909
1910         ret = send_vhost_slave_message(dev, &msg, fdp, fd_num);
1911         if (ret < 0) {
1912                 RTE_LOG(ERR, VHOST_CONFIG,
1913                         "Failed to set host notifier (%d)\n", ret);
1914                 return ret;
1915         }
1916
1917         return process_slave_message_reply(dev, &msg);
1918 }
1919
1920 int vhost_user_host_notifier_ctrl(int vid, bool enable)
1921 {
1922         struct virtio_net *dev;
1923         struct rte_vdpa_device *vdpa_dev;
1924         int vfio_device_fd, did, ret = 0;
1925         uint64_t offset, size;
1926         unsigned int i;
1927
1928         dev = get_device(vid);
1929         if (!dev)
1930                 return -ENODEV;
1931
1932         did = dev->vdpa_dev_id;
1933         if (did < 0)
1934                 return -EINVAL;
1935
1936         if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
1937             !(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
1938             !(dev->protocol_features &
1939                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ)) ||
1940             !(dev->protocol_features &
1941                         (1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) ||
1942             !(dev->protocol_features &
1943                         (1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
1944                 return -ENOTSUP;
1945
1946         vdpa_dev = rte_vdpa_get_device(did);
1947         if (!vdpa_dev)
1948                 return -ENODEV;
1949
1950         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
1951         RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);
1952
1953         vfio_device_fd = vdpa_dev->ops->get_vfio_device_fd(vid);
1954         if (vfio_device_fd < 0)
1955                 return -ENOTSUP;
1956
1957         if (enable) {
1958                 for (i = 0; i < dev->nr_vring; i++) {
1959                         if (vdpa_dev->ops->get_notify_area(vid, i, &offset,
1960                                         &size) < 0) {
1961                                 ret = -ENOTSUP;
1962                                 goto disable;
1963                         }
1964
1965                         if (vhost_user_slave_set_vring_host_notifier(dev, i,
1966                                         vfio_device_fd, offset, size) < 0) {
1967                                 ret = -EFAULT;
1968                                 goto disable;
1969                         }
1970                 }
1971         } else {
1972 disable:
1973                 for (i = 0; i < dev->nr_vring; i++) {
1974                         vhost_user_slave_set_vring_host_notifier(dev, i, -1,
1975                                         0, 0);
1976                 }
1977         }
1978
1979         return ret;
1980 }