net/ark: support setting MTU
[dpdk.git] / lib / librte_vhost / vhost_user.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <string.h>
38 #include <unistd.h>
39 #include <sys/mman.h>
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <assert.h>
43 #ifdef RTE_LIBRTE_VHOST_NUMA
44 #include <numaif.h>
45 #endif
46
47 #include <rte_common.h>
48 #include <rte_malloc.h>
49 #include <rte_log.h>
50
51 #include "vhost.h"
52 #include "vhost_user.h"
53
54 #define VIRTIO_MIN_MTU 68
55 #define VIRTIO_MAX_MTU 65535
56
57 static const char *vhost_message_str[VHOST_USER_MAX] = {
58         [VHOST_USER_NONE] = "VHOST_USER_NONE",
59         [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
60         [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES",
61         [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER",
62         [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER",
63         [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE",
64         [VHOST_USER_SET_LOG_BASE] = "VHOST_USER_SET_LOG_BASE",
65         [VHOST_USER_SET_LOG_FD] = "VHOST_USER_SET_LOG_FD",
66         [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM",
67         [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR",
68         [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE",
69         [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE",
70         [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
71         [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
72         [VHOST_USER_SET_VRING_ERR]  = "VHOST_USER_SET_VRING_ERR",
73         [VHOST_USER_GET_PROTOCOL_FEATURES]  = "VHOST_USER_GET_PROTOCOL_FEATURES",
74         [VHOST_USER_SET_PROTOCOL_FEATURES]  = "VHOST_USER_SET_PROTOCOL_FEATURES",
75         [VHOST_USER_GET_QUEUE_NUM]  = "VHOST_USER_GET_QUEUE_NUM",
76         [VHOST_USER_SET_VRING_ENABLE]  = "VHOST_USER_SET_VRING_ENABLE",
77         [VHOST_USER_SEND_RARP]  = "VHOST_USER_SEND_RARP",
78         [VHOST_USER_NET_SET_MTU]  = "VHOST_USER_NET_SET_MTU",
79 };
80
81 static uint64_t
82 get_blk_size(int fd)
83 {
84         struct stat stat;
85         int ret;
86
87         ret = fstat(fd, &stat);
88         return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
89 }
90
91 static void
92 free_mem_region(struct virtio_net *dev)
93 {
94         uint32_t i;
95         struct rte_vhost_mem_region *reg;
96
97         if (!dev || !dev->mem)
98                 return;
99
100         for (i = 0; i < dev->mem->nregions; i++) {
101                 reg = &dev->mem->regions[i];
102                 if (reg->host_user_addr) {
103                         munmap(reg->mmap_addr, reg->mmap_size);
104                         close(reg->fd);
105                 }
106         }
107 }
108
109 void
110 vhost_backend_cleanup(struct virtio_net *dev)
111 {
112         if (dev->mem) {
113                 free_mem_region(dev);
114                 rte_free(dev->mem);
115                 dev->mem = NULL;
116         }
117
118         free(dev->guest_pages);
119         dev->guest_pages = NULL;
120
121         if (dev->log_addr) {
122                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
123                 dev->log_addr = 0;
124         }
125 }
126
127 /*
128  * This function just returns success at the moment unless
129  * the device hasn't been initialised.
130  */
131 static int
132 vhost_user_set_owner(void)
133 {
134         return 0;
135 }
136
137 static int
138 vhost_user_reset_owner(struct virtio_net *dev)
139 {
140         if (dev->flags & VIRTIO_DEV_RUNNING) {
141                 dev->flags &= ~VIRTIO_DEV_RUNNING;
142                 dev->notify_ops->destroy_device(dev->vid);
143         }
144
145         cleanup_device(dev, 0);
146         reset_device(dev);
147         return 0;
148 }
149
150 /*
151  * The features that we support are requested.
152  */
153 static uint64_t
154 vhost_user_get_features(struct virtio_net *dev)
155 {
156         uint64_t features = 0;
157
158         rte_vhost_driver_get_features(dev->ifname, &features);
159         return features;
160 }
161
162 /*
163  * We receive the negotiated features supported by us and the virtio device.
164  */
165 static int
166 vhost_user_set_features(struct virtio_net *dev, uint64_t features)
167 {
168         uint64_t vhost_features = 0;
169
170         rte_vhost_driver_get_features(dev->ifname, &vhost_features);
171         if (features & ~vhost_features) {
172                 RTE_LOG(ERR, VHOST_CONFIG,
173                         "(%d) received invalid negotiated features.\n",
174                         dev->vid);
175                 return -1;
176         }
177
178         if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->features != features) {
179                 if (dev->notify_ops->features_changed)
180                         dev->notify_ops->features_changed(dev->vid, features);
181         }
182
183         dev->features = features;
184         if (dev->features &
185                 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
186                 dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
187         } else {
188                 dev->vhost_hlen = sizeof(struct virtio_net_hdr);
189         }
190         LOG_DEBUG(VHOST_CONFIG,
191                 "(%d) mergeable RX buffers %s, virtio 1 %s\n",
192                 dev->vid,
193                 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
194                 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
195
196         return 0;
197 }
198
199 /*
200  * The virtio device sends us the size of the descriptor ring.
201  */
202 static int
203 vhost_user_set_vring_num(struct virtio_net *dev,
204                          VhostUserMsg *msg)
205 {
206         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
207
208         vq->size = msg->payload.state.num;
209
210         if (dev->dequeue_zero_copy) {
211                 vq->nr_zmbuf = 0;
212                 vq->last_zmbuf_idx = 0;
213                 vq->zmbuf_size = vq->size;
214                 vq->zmbufs = rte_zmalloc(NULL, vq->zmbuf_size *
215                                          sizeof(struct zcopy_mbuf), 0);
216                 if (vq->zmbufs == NULL) {
217                         RTE_LOG(WARNING, VHOST_CONFIG,
218                                 "failed to allocate mem for zero copy; "
219                                 "zero copy is force disabled\n");
220                         dev->dequeue_zero_copy = 0;
221                 }
222         }
223
224         vq->shadow_used_ring = rte_malloc(NULL,
225                                 vq->size * sizeof(struct vring_used_elem),
226                                 RTE_CACHE_LINE_SIZE);
227         if (!vq->shadow_used_ring) {
228                 RTE_LOG(ERR, VHOST_CONFIG,
229                         "failed to allocate memory for shadow used ring.\n");
230                 return -1;
231         }
232
233         return 0;
234 }
235
236 /*
237  * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
238  * same numa node as the memory of vring descriptor.
239  */
240 #ifdef RTE_LIBRTE_VHOST_NUMA
241 static struct virtio_net*
242 numa_realloc(struct virtio_net *dev, int index)
243 {
244         int oldnode, newnode;
245         struct virtio_net *old_dev;
246         struct vhost_virtqueue *old_vq, *vq;
247         int ret;
248
249         old_dev = dev;
250         vq = old_vq = dev->virtqueue[index];
251
252         ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
253                             MPOL_F_NODE | MPOL_F_ADDR);
254
255         /* check if we need to reallocate vq */
256         ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
257                              MPOL_F_NODE | MPOL_F_ADDR);
258         if (ret) {
259                 RTE_LOG(ERR, VHOST_CONFIG,
260                         "Unable to get vq numa information.\n");
261                 return dev;
262         }
263         if (oldnode != newnode) {
264                 RTE_LOG(INFO, VHOST_CONFIG,
265                         "reallocate vq from %d to %d node\n", oldnode, newnode);
266                 vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
267                 if (!vq)
268                         return dev;
269
270                 memcpy(vq, old_vq, sizeof(*vq));
271                 rte_free(old_vq);
272         }
273
274         /* check if we need to reallocate dev */
275         ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
276                             MPOL_F_NODE | MPOL_F_ADDR);
277         if (ret) {
278                 RTE_LOG(ERR, VHOST_CONFIG,
279                         "Unable to get dev numa information.\n");
280                 goto out;
281         }
282         if (oldnode != newnode) {
283                 RTE_LOG(INFO, VHOST_CONFIG,
284                         "reallocate dev from %d to %d node\n",
285                         oldnode, newnode);
286                 dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
287                 if (!dev) {
288                         dev = old_dev;
289                         goto out;
290                 }
291
292                 memcpy(dev, old_dev, sizeof(*dev));
293                 rte_free(old_dev);
294         }
295
296 out:
297         dev->virtqueue[index] = vq;
298         vhost_devices[dev->vid] = dev;
299
300         return dev;
301 }
302 #else
303 static struct virtio_net*
304 numa_realloc(struct virtio_net *dev, int index __rte_unused)
305 {
306         return dev;
307 }
308 #endif
309
310 /*
311  * Converts QEMU virtual address to Vhost virtual address. This function is
312  * used to convert the ring addresses to our address space.
313  */
314 static uint64_t
315 qva_to_vva(struct virtio_net *dev, uint64_t qva)
316 {
317         struct rte_vhost_mem_region *reg;
318         uint32_t i;
319
320         /* Find the region where the address lives. */
321         for (i = 0; i < dev->mem->nregions; i++) {
322                 reg = &dev->mem->regions[i];
323
324                 if (qva >= reg->guest_user_addr &&
325                     qva <  reg->guest_user_addr + reg->size) {
326                         return qva - reg->guest_user_addr +
327                                reg->host_user_addr;
328                 }
329         }
330
331         return 0;
332 }
333
334 /*
335  * The virtio device sends us the desc, used and avail ring addresses.
336  * This function then converts these to our address space.
337  */
338 static int
339 vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
340 {
341         struct vhost_virtqueue *vq;
342
343         if (dev->mem == NULL)
344                 return -1;
345
346         /* addr->index refers to the queue index. The txq 1, rxq is 0. */
347         vq = dev->virtqueue[msg->payload.addr.index];
348
349         /* The addresses are converted from QEMU virtual to Vhost virtual. */
350         vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
351                         msg->payload.addr.desc_user_addr);
352         if (vq->desc == 0) {
353                 RTE_LOG(ERR, VHOST_CONFIG,
354                         "(%d) failed to find desc ring address.\n",
355                         dev->vid);
356                 return -1;
357         }
358
359         dev = numa_realloc(dev, msg->payload.addr.index);
360         vq = dev->virtqueue[msg->payload.addr.index];
361
362         vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
363                         msg->payload.addr.avail_user_addr);
364         if (vq->avail == 0) {
365                 RTE_LOG(ERR, VHOST_CONFIG,
366                         "(%d) failed to find avail ring address.\n",
367                         dev->vid);
368                 return -1;
369         }
370
371         vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
372                         msg->payload.addr.used_user_addr);
373         if (vq->used == 0) {
374                 RTE_LOG(ERR, VHOST_CONFIG,
375                         "(%d) failed to find used ring address.\n",
376                         dev->vid);
377                 return -1;
378         }
379
380         if (vq->last_used_idx != vq->used->idx) {
381                 RTE_LOG(WARNING, VHOST_CONFIG,
382                         "last_used_idx (%u) and vq->used->idx (%u) mismatches; "
383                         "some packets maybe resent for Tx and dropped for Rx\n",
384                         vq->last_used_idx, vq->used->idx);
385                 vq->last_used_idx  = vq->used->idx;
386                 vq->last_avail_idx = vq->used->idx;
387         }
388
389         vq->log_guest_addr = msg->payload.addr.log_guest_addr;
390
391         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
392                         dev->vid, vq->desc);
393         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
394                         dev->vid, vq->avail);
395         LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
396                         dev->vid, vq->used);
397         LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
398                         dev->vid, vq->log_guest_addr);
399
400         return 0;
401 }
402
403 /*
404  * The virtio device sends us the available ring last used index.
405  */
406 static int
407 vhost_user_set_vring_base(struct virtio_net *dev,
408                           VhostUserMsg *msg)
409 {
410         dev->virtqueue[msg->payload.state.index]->last_used_idx  =
411                         msg->payload.state.num;
412         dev->virtqueue[msg->payload.state.index]->last_avail_idx =
413                         msg->payload.state.num;
414
415         return 0;
416 }
417
418 static void
419 add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr,
420                    uint64_t host_phys_addr, uint64_t size)
421 {
422         struct guest_page *page, *last_page;
423
424         if (dev->nr_guest_pages == dev->max_guest_pages) {
425                 dev->max_guest_pages *= 2;
426                 dev->guest_pages = realloc(dev->guest_pages,
427                                         dev->max_guest_pages * sizeof(*page));
428         }
429
430         if (dev->nr_guest_pages > 0) {
431                 last_page = &dev->guest_pages[dev->nr_guest_pages - 1];
432                 /* merge if the two pages are continuous */
433                 if (host_phys_addr == last_page->host_phys_addr +
434                                       last_page->size) {
435                         last_page->size += size;
436                         return;
437                 }
438         }
439
440         page = &dev->guest_pages[dev->nr_guest_pages++];
441         page->guest_phys_addr = guest_phys_addr;
442         page->host_phys_addr  = host_phys_addr;
443         page->size = size;
444 }
445
446 static void
447 add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
448                 uint64_t page_size)
449 {
450         uint64_t reg_size = reg->size;
451         uint64_t host_user_addr  = reg->host_user_addr;
452         uint64_t guest_phys_addr = reg->guest_phys_addr;
453         uint64_t host_phys_addr;
454         uint64_t size;
455
456         host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)host_user_addr);
457         size = page_size - (guest_phys_addr & (page_size - 1));
458         size = RTE_MIN(size, reg_size);
459
460         add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
461         host_user_addr  += size;
462         guest_phys_addr += size;
463         reg_size -= size;
464
465         while (reg_size > 0) {
466                 size = RTE_MIN(reg_size, page_size);
467                 host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
468                                                   host_user_addr);
469                 add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
470
471                 host_user_addr  += size;
472                 guest_phys_addr += size;
473                 reg_size -= size;
474         }
475 }
476
477 #ifdef RTE_LIBRTE_VHOST_DEBUG
478 /* TODO: enable it only in debug mode? */
479 static void
480 dump_guest_pages(struct virtio_net *dev)
481 {
482         uint32_t i;
483         struct guest_page *page;
484
485         for (i = 0; i < dev->nr_guest_pages; i++) {
486                 page = &dev->guest_pages[i];
487
488                 RTE_LOG(INFO, VHOST_CONFIG,
489                         "guest physical page region %u\n"
490                         "\t guest_phys_addr: %" PRIx64 "\n"
491                         "\t host_phys_addr : %" PRIx64 "\n"
492                         "\t size           : %" PRIx64 "\n",
493                         i,
494                         page->guest_phys_addr,
495                         page->host_phys_addr,
496                         page->size);
497         }
498 }
499 #else
500 #define dump_guest_pages(dev)
501 #endif
502
503 static int
504 vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg)
505 {
506         struct VhostUserMemory memory = pmsg->payload.memory;
507         struct rte_vhost_mem_region *reg;
508         void *mmap_addr;
509         uint64_t mmap_size;
510         uint64_t mmap_offset;
511         uint64_t alignment;
512         uint32_t i;
513         int fd;
514
515         if (dev->mem) {
516                 free_mem_region(dev);
517                 rte_free(dev->mem);
518                 dev->mem = NULL;
519         }
520
521         dev->nr_guest_pages = 0;
522         if (!dev->guest_pages) {
523                 dev->max_guest_pages = 8;
524                 dev->guest_pages = malloc(dev->max_guest_pages *
525                                                 sizeof(struct guest_page));
526                 if (dev->guest_pages == NULL) {
527                         RTE_LOG(ERR, VHOST_CONFIG,
528                                 "(%d) failed to allocate memory "
529                                 "for dev->guest_pages\n",
530                                 dev->vid);
531                         return -1;
532                 }
533         }
534
535         dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
536                 sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
537         if (dev->mem == NULL) {
538                 RTE_LOG(ERR, VHOST_CONFIG,
539                         "(%d) failed to allocate memory for dev->mem\n",
540                         dev->vid);
541                 return -1;
542         }
543         dev->mem->nregions = memory.nregions;
544
545         for (i = 0; i < memory.nregions; i++) {
546                 fd  = pmsg->fds[i];
547                 reg = &dev->mem->regions[i];
548
549                 reg->guest_phys_addr = memory.regions[i].guest_phys_addr;
550                 reg->guest_user_addr = memory.regions[i].userspace_addr;
551                 reg->size            = memory.regions[i].memory_size;
552                 reg->fd              = fd;
553
554                 mmap_offset = memory.regions[i].mmap_offset;
555                 mmap_size   = reg->size + mmap_offset;
556
557                 /* mmap() without flag of MAP_ANONYMOUS, should be called
558                  * with length argument aligned with hugepagesz at older
559                  * longterm version Linux, like 2.6.32 and 3.2.72, or
560                  * mmap() will fail with EINVAL.
561                  *
562                  * to avoid failure, make sure in caller to keep length
563                  * aligned.
564                  */
565                 alignment = get_blk_size(fd);
566                 if (alignment == (uint64_t)-1) {
567                         RTE_LOG(ERR, VHOST_CONFIG,
568                                 "couldn't get hugepage size through fstat\n");
569                         goto err_mmap;
570                 }
571                 mmap_size = RTE_ALIGN_CEIL(mmap_size, alignment);
572
573                 mmap_addr = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE,
574                                  MAP_SHARED | MAP_POPULATE, fd, 0);
575
576                 if (mmap_addr == MAP_FAILED) {
577                         RTE_LOG(ERR, VHOST_CONFIG,
578                                 "mmap region %u failed.\n", i);
579                         goto err_mmap;
580                 }
581
582                 reg->mmap_addr = mmap_addr;
583                 reg->mmap_size = mmap_size;
584                 reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
585                                       mmap_offset;
586
587                 if (dev->dequeue_zero_copy)
588                         add_guest_pages(dev, reg, alignment);
589
590                 RTE_LOG(INFO, VHOST_CONFIG,
591                         "guest memory region %u, size: 0x%" PRIx64 "\n"
592                         "\t guest physical addr: 0x%" PRIx64 "\n"
593                         "\t guest virtual  addr: 0x%" PRIx64 "\n"
594                         "\t host  virtual  addr: 0x%" PRIx64 "\n"
595                         "\t mmap addr : 0x%" PRIx64 "\n"
596                         "\t mmap size : 0x%" PRIx64 "\n"
597                         "\t mmap align: 0x%" PRIx64 "\n"
598                         "\t mmap off  : 0x%" PRIx64 "\n",
599                         i, reg->size,
600                         reg->guest_phys_addr,
601                         reg->guest_user_addr,
602                         reg->host_user_addr,
603                         (uint64_t)(uintptr_t)mmap_addr,
604                         mmap_size,
605                         alignment,
606                         mmap_offset);
607         }
608
609         dump_guest_pages(dev);
610
611         return 0;
612
613 err_mmap:
614         free_mem_region(dev);
615         rte_free(dev->mem);
616         dev->mem = NULL;
617         return -1;
618 }
619
620 static int
621 vq_is_ready(struct vhost_virtqueue *vq)
622 {
623         return vq && vq->desc   &&
624                vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
625                vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
626 }
627
628 static int
629 virtio_is_ready(struct virtio_net *dev)
630 {
631         struct vhost_virtqueue *vq;
632         uint32_t i;
633
634         if (dev->nr_vring == 0)
635                 return 0;
636
637         for (i = 0; i < dev->nr_vring; i++) {
638                 vq = dev->virtqueue[i];
639
640                 if (!vq_is_ready(vq))
641                         return 0;
642         }
643
644         RTE_LOG(INFO, VHOST_CONFIG,
645                 "virtio is now ready for processing.\n");
646         return 1;
647 }
648
649 static void
650 vhost_user_set_vring_call(struct virtio_net *dev, struct VhostUserMsg *pmsg)
651 {
652         struct vhost_vring_file file;
653         struct vhost_virtqueue *vq;
654
655         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
656         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
657                 file.fd = VIRTIO_INVALID_EVENTFD;
658         else
659                 file.fd = pmsg->fds[0];
660         RTE_LOG(INFO, VHOST_CONFIG,
661                 "vring call idx:%d file:%d\n", file.index, file.fd);
662
663         vq = dev->virtqueue[file.index];
664         if (vq->callfd >= 0)
665                 close(vq->callfd);
666
667         vq->callfd = file.fd;
668 }
669
670 static void
671 vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
672 {
673         struct vhost_vring_file file;
674         struct vhost_virtqueue *vq;
675
676         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
677         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
678                 file.fd = VIRTIO_INVALID_EVENTFD;
679         else
680                 file.fd = pmsg->fds[0];
681         RTE_LOG(INFO, VHOST_CONFIG,
682                 "vring kick idx:%d file:%d\n", file.index, file.fd);
683
684         vq = dev->virtqueue[file.index];
685         if (vq->kickfd >= 0)
686                 close(vq->kickfd);
687         vq->kickfd = file.fd;
688 }
689
690 static void
691 free_zmbufs(struct vhost_virtqueue *vq)
692 {
693         struct zcopy_mbuf *zmbuf, *next;
694
695         for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
696              zmbuf != NULL; zmbuf = next) {
697                 next = TAILQ_NEXT(zmbuf, next);
698
699                 rte_pktmbuf_free(zmbuf->mbuf);
700                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
701         }
702
703         rte_free(vq->zmbufs);
704 }
705
706 /*
707  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
708  */
709 static int
710 vhost_user_get_vring_base(struct virtio_net *dev,
711                           VhostUserMsg *msg)
712 {
713         struct vhost_virtqueue *vq = dev->virtqueue[msg->payload.state.index];
714
715         /* We have to stop the queue (virtio) if it is running. */
716         if (dev->flags & VIRTIO_DEV_RUNNING) {
717                 dev->flags &= ~VIRTIO_DEV_RUNNING;
718                 dev->notify_ops->destroy_device(dev->vid);
719         }
720
721         dev->flags &= ~VIRTIO_DEV_READY;
722
723         /* Here we are safe to get the last used index */
724         msg->payload.state.num = vq->last_used_idx;
725
726         RTE_LOG(INFO, VHOST_CONFIG,
727                 "vring base idx:%d file:%d\n", msg->payload.state.index,
728                 msg->payload.state.num);
729         /*
730          * Based on current qemu vhost-user implementation, this message is
731          * sent and only sent in vhost_vring_stop.
732          * TODO: cleanup the vring, it isn't usable since here.
733          */
734         if (vq->kickfd >= 0)
735                 close(vq->kickfd);
736
737         vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
738
739         if (dev->dequeue_zero_copy)
740                 free_zmbufs(vq);
741         rte_free(vq->shadow_used_ring);
742         vq->shadow_used_ring = NULL;
743
744         return 0;
745 }
746
747 /*
748  * when virtio queues are ready to work, qemu will send us to
749  * enable the virtio queue pair.
750  */
751 static int
752 vhost_user_set_vring_enable(struct virtio_net *dev,
753                             VhostUserMsg *msg)
754 {
755         int enable = (int)msg->payload.state.num;
756
757         RTE_LOG(INFO, VHOST_CONFIG,
758                 "set queue enable: %d to qp idx: %d\n",
759                 enable, msg->payload.state.index);
760
761         if (dev->notify_ops->vring_state_changed)
762                 dev->notify_ops->vring_state_changed(dev->vid,
763                                 msg->payload.state.index, enable);
764
765         dev->virtqueue[msg->payload.state.index]->enabled = enable;
766
767         return 0;
768 }
769
770 static void
771 vhost_user_set_protocol_features(struct virtio_net *dev,
772                                  uint64_t protocol_features)
773 {
774         if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
775                 return;
776
777         dev->protocol_features = protocol_features;
778 }
779
780 static int
781 vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg)
782 {
783         int fd = msg->fds[0];
784         uint64_t size, off;
785         void *addr;
786
787         if (fd < 0) {
788                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
789                 return -1;
790         }
791
792         if (msg->size != sizeof(VhostUserLog)) {
793                 RTE_LOG(ERR, VHOST_CONFIG,
794                         "invalid log base msg size: %"PRId32" != %d\n",
795                         msg->size, (int)sizeof(VhostUserLog));
796                 return -1;
797         }
798
799         size = msg->payload.log.mmap_size;
800         off  = msg->payload.log.mmap_offset;
801         RTE_LOG(INFO, VHOST_CONFIG,
802                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
803                 size, off);
804
805         /*
806          * mmap from 0 to workaround a hugepage mmap bug: mmap will
807          * fail when offset is not page size aligned.
808          */
809         addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
810         close(fd);
811         if (addr == MAP_FAILED) {
812                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
813                 return -1;
814         }
815
816         /*
817          * Free previously mapped log memory on occasionally
818          * multiple VHOST_USER_SET_LOG_BASE.
819          */
820         if (dev->log_addr) {
821                 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
822         }
823         dev->log_addr = (uint64_t)(uintptr_t)addr;
824         dev->log_base = dev->log_addr + off;
825         dev->log_size = size;
826
827         return 0;
828 }
829
830 /*
831  * An rarp packet is constructed and broadcasted to notify switches about
832  * the new location of the migrated VM, so that packets from outside will
833  * not be lost after migration.
834  *
835  * However, we don't actually "send" a rarp packet here, instead, we set
836  * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
837  */
838 static int
839 vhost_user_send_rarp(struct virtio_net *dev, struct VhostUserMsg *msg)
840 {
841         uint8_t *mac = (uint8_t *)&msg->payload.u64;
842
843         RTE_LOG(DEBUG, VHOST_CONFIG,
844                 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
845                 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
846         memcpy(dev->mac.addr_bytes, mac, 6);
847
848         /*
849          * Set the flag to inject a RARP broadcast packet at
850          * rte_vhost_dequeue_burst().
851          *
852          * rte_smp_wmb() is for making sure the mac is copied
853          * before the flag is set.
854          */
855         rte_smp_wmb();
856         rte_atomic16_set(&dev->broadcast_rarp, 1);
857
858         return 0;
859 }
860
861 static int
862 vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
863 {
864         if (msg->payload.u64 < VIRTIO_MIN_MTU ||
865                         msg->payload.u64 > VIRTIO_MAX_MTU) {
866                 RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
867                                 msg->payload.u64);
868
869                 return -1;
870         }
871
872         dev->mtu = msg->payload.u64;
873
874         return 0;
875 }
876
877 /* return bytes# of read on success or negative val on failure. */
878 static int
879 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
880 {
881         int ret;
882
883         ret = read_fd_message(sockfd, (char *)msg, VHOST_USER_HDR_SIZE,
884                 msg->fds, VHOST_MEMORY_MAX_NREGIONS);
885         if (ret <= 0)
886                 return ret;
887
888         if (msg && msg->size) {
889                 if (msg->size > sizeof(msg->payload)) {
890                         RTE_LOG(ERR, VHOST_CONFIG,
891                                 "invalid msg size: %d\n", msg->size);
892                         return -1;
893                 }
894                 ret = read(sockfd, &msg->payload, msg->size);
895                 if (ret <= 0)
896                         return ret;
897                 if (ret != (int)msg->size) {
898                         RTE_LOG(ERR, VHOST_CONFIG,
899                                 "read control message failed\n");
900                         return -1;
901                 }
902         }
903
904         return ret;
905 }
906
907 static int
908 send_vhost_message(int sockfd, struct VhostUserMsg *msg)
909 {
910         int ret;
911
912         if (!msg)
913                 return 0;
914
915         msg->flags &= ~VHOST_USER_VERSION_MASK;
916         msg->flags &= ~VHOST_USER_NEED_REPLY;
917         msg->flags |= VHOST_USER_VERSION;
918         msg->flags |= VHOST_USER_REPLY_MASK;
919
920         ret = send_fd_message(sockfd, (char *)msg,
921                 VHOST_USER_HDR_SIZE + msg->size, NULL, 0);
922
923         return ret;
924 }
925
926 /*
927  * Allocate a queue pair if it hasn't been allocated yet
928  */
929 static int
930 vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
931 {
932         uint16_t vring_idx;
933
934         switch (msg->request) {
935         case VHOST_USER_SET_VRING_KICK:
936         case VHOST_USER_SET_VRING_CALL:
937         case VHOST_USER_SET_VRING_ERR:
938                 vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
939                 break;
940         case VHOST_USER_SET_VRING_NUM:
941         case VHOST_USER_SET_VRING_BASE:
942         case VHOST_USER_SET_VRING_ENABLE:
943                 vring_idx = msg->payload.state.index;
944                 break;
945         case VHOST_USER_SET_VRING_ADDR:
946                 vring_idx = msg->payload.addr.index;
947                 break;
948         default:
949                 return 0;
950         }
951
952         if (vring_idx >= VHOST_MAX_VRING) {
953                 RTE_LOG(ERR, VHOST_CONFIG,
954                         "invalid vring index: %u\n", vring_idx);
955                 return -1;
956         }
957
958         if (dev->virtqueue[vring_idx])
959                 return 0;
960
961         return alloc_vring_queue(dev, vring_idx);
962 }
963
964 int
965 vhost_user_msg_handler(int vid, int fd)
966 {
967         struct virtio_net *dev;
968         struct VhostUserMsg msg;
969         int ret;
970
971         dev = get_device(vid);
972         if (dev == NULL)
973                 return -1;
974
975         if (!dev->notify_ops) {
976                 dev->notify_ops = vhost_driver_callback_get(dev->ifname);
977                 if (!dev->notify_ops) {
978                         RTE_LOG(ERR, VHOST_CONFIG,
979                                 "failed to get callback ops for driver %s\n",
980                                 dev->ifname);
981                         return -1;
982                 }
983         }
984
985         ret = read_vhost_message(fd, &msg);
986         if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
987                 if (ret < 0)
988                         RTE_LOG(ERR, VHOST_CONFIG,
989                                 "vhost read message failed\n");
990                 else if (ret == 0)
991                         RTE_LOG(INFO, VHOST_CONFIG,
992                                 "vhost peer closed\n");
993                 else
994                         RTE_LOG(ERR, VHOST_CONFIG,
995                                 "vhost read incorrect message\n");
996
997                 return -1;
998         }
999
1000         ret = 0;
1001         RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
1002                 vhost_message_str[msg.request]);
1003
1004         ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
1005         if (ret < 0) {
1006                 RTE_LOG(ERR, VHOST_CONFIG,
1007                         "failed to alloc queue\n");
1008                 return -1;
1009         }
1010
1011         switch (msg.request) {
1012         case VHOST_USER_GET_FEATURES:
1013                 msg.payload.u64 = vhost_user_get_features(dev);
1014                 msg.size = sizeof(msg.payload.u64);
1015                 send_vhost_message(fd, &msg);
1016                 break;
1017         case VHOST_USER_SET_FEATURES:
1018                 vhost_user_set_features(dev, msg.payload.u64);
1019                 break;
1020
1021         case VHOST_USER_GET_PROTOCOL_FEATURES:
1022                 msg.payload.u64 = VHOST_USER_PROTOCOL_FEATURES;
1023                 msg.size = sizeof(msg.payload.u64);
1024                 send_vhost_message(fd, &msg);
1025                 break;
1026         case VHOST_USER_SET_PROTOCOL_FEATURES:
1027                 vhost_user_set_protocol_features(dev, msg.payload.u64);
1028                 break;
1029
1030         case VHOST_USER_SET_OWNER:
1031                 vhost_user_set_owner();
1032                 break;
1033         case VHOST_USER_RESET_OWNER:
1034                 vhost_user_reset_owner(dev);
1035                 break;
1036
1037         case VHOST_USER_SET_MEM_TABLE:
1038                 ret = vhost_user_set_mem_table(dev, &msg);
1039                 break;
1040
1041         case VHOST_USER_SET_LOG_BASE:
1042                 vhost_user_set_log_base(dev, &msg);
1043
1044                 /* it needs a reply */
1045                 msg.size = sizeof(msg.payload.u64);
1046                 send_vhost_message(fd, &msg);
1047                 break;
1048         case VHOST_USER_SET_LOG_FD:
1049                 close(msg.fds[0]);
1050                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented.\n");
1051                 break;
1052
1053         case VHOST_USER_SET_VRING_NUM:
1054                 vhost_user_set_vring_num(dev, &msg);
1055                 break;
1056         case VHOST_USER_SET_VRING_ADDR:
1057                 vhost_user_set_vring_addr(dev, &msg);
1058                 break;
1059         case VHOST_USER_SET_VRING_BASE:
1060                 vhost_user_set_vring_base(dev, &msg);
1061                 break;
1062
1063         case VHOST_USER_GET_VRING_BASE:
1064                 vhost_user_get_vring_base(dev, &msg);
1065                 msg.size = sizeof(msg.payload.state);
1066                 send_vhost_message(fd, &msg);
1067                 break;
1068
1069         case VHOST_USER_SET_VRING_KICK:
1070                 vhost_user_set_vring_kick(dev, &msg);
1071                 break;
1072         case VHOST_USER_SET_VRING_CALL:
1073                 vhost_user_set_vring_call(dev, &msg);
1074                 break;
1075
1076         case VHOST_USER_SET_VRING_ERR:
1077                 if (!(msg.payload.u64 & VHOST_USER_VRING_NOFD_MASK))
1078                         close(msg.fds[0]);
1079                 RTE_LOG(INFO, VHOST_CONFIG, "not implemented\n");
1080                 break;
1081
1082         case VHOST_USER_GET_QUEUE_NUM:
1083                 msg.payload.u64 = VHOST_MAX_QUEUE_PAIRS;
1084                 msg.size = sizeof(msg.payload.u64);
1085                 send_vhost_message(fd, &msg);
1086                 break;
1087
1088         case VHOST_USER_SET_VRING_ENABLE:
1089                 vhost_user_set_vring_enable(dev, &msg);
1090                 break;
1091         case VHOST_USER_SEND_RARP:
1092                 vhost_user_send_rarp(dev, &msg);
1093                 break;
1094
1095         case VHOST_USER_NET_SET_MTU:
1096                 ret = vhost_user_net_set_mtu(dev, &msg);
1097                 break;
1098
1099         default:
1100                 ret = -1;
1101                 break;
1102
1103         }
1104
1105         if (msg.flags & VHOST_USER_NEED_REPLY) {
1106                 msg.payload.u64 = !!ret;
1107                 msg.size = sizeof(msg.payload.u64);
1108                 send_vhost_message(fd, &msg);
1109         }
1110
1111         if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
1112                 dev->flags |= VIRTIO_DEV_READY;
1113
1114                 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1115                         if (dev->dequeue_zero_copy) {
1116                                 RTE_LOG(INFO, VHOST_CONFIG,
1117                                                 "dequeue zero copy is enabled\n");
1118                         }
1119
1120                         if (dev->notify_ops->new_device(dev->vid) == 0)
1121                                 dev->flags |= VIRTIO_DEV_RUNNING;
1122                 }
1123         }
1124
1125         return 0;
1126 }