vhost: handle dirty pages logging request
[dpdk.git] / lib / librte_vhost / vhost_user / virtio-net-user.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <sys/mman.h>
39 #include <sys/types.h>
40 #include <sys/stat.h>
41 #include <unistd.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45
46 #include "virtio-net.h"
47 #include "virtio-net-user.h"
48 #include "vhost-net-user.h"
49 #include "vhost-net.h"
50
51 struct orig_region_map {
52         int fd;
53         uint64_t mapped_address;
54         uint64_t mapped_size;
55         uint64_t blksz;
56 };
57
58 #define orig_region(ptr, nregions) \
59         ((struct orig_region_map *)RTE_PTR_ADD((ptr), \
60                 sizeof(struct virtio_memory) + \
61                 sizeof(struct virtio_memory_regions) * (nregions)))
62
63 static uint64_t
64 get_blk_size(int fd)
65 {
66         struct stat stat;
67
68         fstat(fd, &stat);
69         return (uint64_t)stat.st_blksize;
70 }
71
72 static void
73 free_mem_region(struct virtio_net *dev)
74 {
75         struct orig_region_map *region;
76         unsigned int idx;
77
78         if (!dev || !dev->mem)
79                 return;
80
81         region = orig_region(dev->mem, dev->mem->nregions);
82         for (idx = 0; idx < dev->mem->nregions; idx++) {
83                 if (region[idx].mapped_address) {
84                         munmap((void *)(uintptr_t)region[idx].mapped_address,
85                                         region[idx].mapped_size);
86                         close(region[idx].fd);
87                 }
88         }
89 }
90
91 int
92 user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
93 {
94         struct VhostUserMemory memory = pmsg->payload.memory;
95         struct virtio_memory_regions *pregion;
96         uint64_t mapped_address, mapped_size;
97         struct virtio_net *dev;
98         unsigned int idx = 0;
99         struct orig_region_map *pregion_orig;
100         uint64_t alignment;
101
102         /* unmap old memory regions one by one*/
103         dev = get_device(ctx);
104         if (dev == NULL)
105                 return -1;
106
107         /* Remove from the data plane. */
108         if (dev->flags & VIRTIO_DEV_RUNNING)
109                 notify_ops->destroy_device(dev);
110
111         if (dev->mem) {
112                 free_mem_region(dev);
113                 free(dev->mem);
114                 dev->mem = NULL;
115         }
116
117         dev->mem = calloc(1,
118                 sizeof(struct virtio_memory) +
119                 sizeof(struct virtio_memory_regions) * memory.nregions +
120                 sizeof(struct orig_region_map) * memory.nregions);
121         if (dev->mem == NULL) {
122                 RTE_LOG(ERR, VHOST_CONFIG,
123                         "(%"PRIu64") Failed to allocate memory for dev->mem\n",
124                         dev->device_fh);
125                 return -1;
126         }
127         dev->mem->nregions = memory.nregions;
128
129         pregion_orig = orig_region(dev->mem, memory.nregions);
130         for (idx = 0; idx < memory.nregions; idx++) {
131                 pregion = &dev->mem->regions[idx];
132                 pregion->guest_phys_address =
133                         memory.regions[idx].guest_phys_addr;
134                 pregion->guest_phys_address_end =
135                         memory.regions[idx].guest_phys_addr +
136                         memory.regions[idx].memory_size;
137                 pregion->memory_size =
138                         memory.regions[idx].memory_size;
139                 pregion->userspace_address =
140                         memory.regions[idx].userspace_addr;
141
142                 /* This is ugly */
143                 mapped_size = memory.regions[idx].memory_size +
144                         memory.regions[idx].mmap_offset;
145
146                 /* mmap() without flag of MAP_ANONYMOUS, should be called
147                  * with length argument aligned with hugepagesz at older
148                  * longterm version Linux, like 2.6.32 and 3.2.72, or
149                  * mmap() will fail with EINVAL.
150                  *
151                  * to avoid failure, make sure in caller to keep length
152                  * aligned.
153                  */
154                 alignment = get_blk_size(pmsg->fds[idx]);
155                 mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment);
156
157                 mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
158                         mapped_size,
159                         PROT_READ | PROT_WRITE, MAP_SHARED,
160                         pmsg->fds[idx],
161                         0);
162
163                 RTE_LOG(INFO, VHOST_CONFIG,
164                         "mapped region %d fd:%d to:%p sz:0x%"PRIx64" "
165                         "off:0x%"PRIx64" align:0x%"PRIx64"\n",
166                         idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
167                         mapped_size, memory.regions[idx].mmap_offset,
168                         alignment);
169
170                 if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
171                         RTE_LOG(ERR, VHOST_CONFIG,
172                                 "mmap qemu guest failed.\n");
173                         goto err_mmap;
174                 }
175
176                 pregion_orig[idx].mapped_address = mapped_address;
177                 pregion_orig[idx].mapped_size = mapped_size;
178                 pregion_orig[idx].blksz = alignment;
179                 pregion_orig[idx].fd = pmsg->fds[idx];
180
181                 mapped_address +=  memory.regions[idx].mmap_offset;
182
183                 pregion->address_offset = mapped_address -
184                         pregion->guest_phys_address;
185
186                 if (memory.regions[idx].guest_phys_addr == 0) {
187                         dev->mem->base_address =
188                                 memory.regions[idx].userspace_addr;
189                         dev->mem->mapped_address =
190                                 pregion->address_offset;
191                 }
192
193                 LOG_DEBUG(VHOST_CONFIG,
194                         "REGION: %u GPA: %p QEMU VA: %p SIZE (%"PRIu64")\n",
195                         idx,
196                         (void *)(uintptr_t)pregion->guest_phys_address,
197                         (void *)(uintptr_t)pregion->userspace_address,
198                          pregion->memory_size);
199         }
200
201         return 0;
202
203 err_mmap:
204         while (idx--) {
205                 munmap((void *)(uintptr_t)pregion_orig[idx].mapped_address,
206                                 pregion_orig[idx].mapped_size);
207                 close(pregion_orig[idx].fd);
208         }
209         free(dev->mem);
210         dev->mem = NULL;
211         return -1;
212 }
213
214 static int
215 vq_is_ready(struct vhost_virtqueue *vq)
216 {
217         return vq && vq->desc   &&
218                vq->kickfd != -1 &&
219                vq->callfd != -1;
220 }
221
222 static int
223 virtio_is_ready(struct virtio_net *dev)
224 {
225         struct vhost_virtqueue *rvq, *tvq;
226         uint32_t i;
227
228         for (i = 0; i < dev->virt_qp_nb; i++) {
229                 rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
230                 tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
231
232                 if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) {
233                         RTE_LOG(INFO, VHOST_CONFIG,
234                                 "virtio is not ready for processing.\n");
235                         return 0;
236                 }
237         }
238
239         RTE_LOG(INFO, VHOST_CONFIG,
240                 "virtio is now ready for processing.\n");
241         return 1;
242 }
243
244 void
245 user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
246 {
247         struct vhost_vring_file file;
248
249         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
250         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
251                 file.fd = -1;
252         else
253                 file.fd = pmsg->fds[0];
254         RTE_LOG(INFO, VHOST_CONFIG,
255                 "vring call idx:%d file:%d\n", file.index, file.fd);
256         ops->set_vring_call(ctx, &file);
257 }
258
259
260 /*
261  *  In vhost-user, when we receive kick message, will test whether virtio
262  *  device is ready for packet processing.
263  */
264 void
265 user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
266 {
267         struct vhost_vring_file file;
268         struct virtio_net *dev = get_device(ctx);
269
270         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
271         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
272                 file.fd = -1;
273         else
274                 file.fd = pmsg->fds[0];
275         RTE_LOG(INFO, VHOST_CONFIG,
276                 "vring kick idx:%d file:%d\n", file.index, file.fd);
277         ops->set_vring_kick(ctx, &file);
278
279         if (virtio_is_ready(dev) &&
280                 !(dev->flags & VIRTIO_DEV_RUNNING))
281                         notify_ops->new_device(dev);
282 }
283
284 /*
285  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
286  */
287 int
288 user_get_vring_base(struct vhost_device_ctx ctx,
289         struct vhost_vring_state *state)
290 {
291         struct virtio_net *dev = get_device(ctx);
292
293         if (dev == NULL)
294                 return -1;
295         /* We have to stop the queue (virtio) if it is running. */
296         if (dev->flags & VIRTIO_DEV_RUNNING)
297                 notify_ops->destroy_device(dev);
298
299         /* Here we are safe to get the last used index */
300         ops->get_vring_base(ctx, state->index, state);
301
302         RTE_LOG(INFO, VHOST_CONFIG,
303                 "vring base idx:%d file:%d\n", state->index, state->num);
304         /*
305          * Based on current qemu vhost-user implementation, this message is
306          * sent and only sent in vhost_vring_stop.
307          * TODO: cleanup the vring, it isn't usable since here.
308          */
309         if (dev->virtqueue[state->index]->kickfd >= 0) {
310                 close(dev->virtqueue[state->index]->kickfd);
311                 dev->virtqueue[state->index]->kickfd = -1;
312         }
313
314         return 0;
315 }
316
317 /*
318  * when virtio queues are ready to work, qemu will send us to
319  * enable the virtio queue pair.
320  */
321 int
322 user_set_vring_enable(struct vhost_device_ctx ctx,
323                       struct vhost_vring_state *state)
324 {
325         struct virtio_net *dev = get_device(ctx);
326         int enable = (int)state->num;
327
328         RTE_LOG(INFO, VHOST_CONFIG,
329                 "set queue enable: %d to qp idx: %d\n",
330                 enable, state->index);
331
332         if (notify_ops->vring_state_changed) {
333                 notify_ops->vring_state_changed(dev, state->index, enable);
334         }
335
336         dev->virtqueue[state->index]->enabled = enable;
337
338         return 0;
339 }
340
341 void
342 user_destroy_device(struct vhost_device_ctx ctx)
343 {
344         struct virtio_net *dev = get_device(ctx);
345
346         if (dev && (dev->flags & VIRTIO_DEV_RUNNING))
347                 notify_ops->destroy_device(dev);
348
349         if (dev && dev->mem) {
350                 free_mem_region(dev);
351                 free(dev->mem);
352                 dev->mem = NULL;
353         }
354 }
355
356 void
357 user_set_protocol_features(struct vhost_device_ctx ctx,
358                            uint64_t protocol_features)
359 {
360         struct virtio_net *dev;
361
362         dev = get_device(ctx);
363         if (dev == NULL || protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
364                 return;
365
366         dev->protocol_features = protocol_features;
367 }
368
369 int
370 user_set_log_base(struct vhost_device_ctx ctx,
371                  struct VhostUserMsg *msg)
372 {
373         struct virtio_net *dev;
374         int fd = msg->fds[0];
375         uint64_t size, off;
376         void *addr;
377
378         dev = get_device(ctx);
379         if (!dev)
380                 return -1;
381
382         if (fd < 0) {
383                 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
384                 return -1;
385         }
386
387         if (msg->size != sizeof(VhostUserLog)) {
388                 RTE_LOG(ERR, VHOST_CONFIG,
389                         "invalid log base msg size: %"PRId32" != %d\n",
390                         msg->size, (int)sizeof(VhostUserLog));
391                 return -1;
392         }
393
394         size = msg->payload.log.mmap_size;
395         off  = msg->payload.log.mmap_offset;
396         RTE_LOG(INFO, VHOST_CONFIG,
397                 "log mmap size: %"PRId64", offset: %"PRId64"\n",
398                 size, off);
399
400         /*
401          * mmap from 0 to workaround a hugepage mmap bug: mmap will
402          * fail when offset is not page size aligned.
403          */
404         addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
405         if (addr == MAP_FAILED) {
406                 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
407                 return -1;
408         }
409
410         /* TODO: unmap on stop */
411         dev->log_base = (uint64_t)(uintptr_t)addr + off;
412         dev->log_size = size;
413
414         return 0;
415 }