vhost: fix crash with multiqueue enabled
[dpdk.git] / lib / librte_vhost / vhost_user / virtio-net-user.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdio.h>
36 #include <stdlib.h>
37 #include <unistd.h>
38 #include <sys/mman.h>
39 #include <sys/types.h>
40 #include <sys/stat.h>
41 #include <unistd.h>
42
43 #include <rte_common.h>
44 #include <rte_log.h>
45
46 #include "virtio-net.h"
47 #include "virtio-net-user.h"
48 #include "vhost-net-user.h"
49 #include "vhost-net.h"
50
51 struct orig_region_map {
52         int fd;
53         uint64_t mapped_address;
54         uint64_t mapped_size;
55         uint64_t blksz;
56 };
57
58 #define orig_region(ptr, nregions) \
59         ((struct orig_region_map *)RTE_PTR_ADD((ptr), \
60                 sizeof(struct virtio_memory) + \
61                 sizeof(struct virtio_memory_regions) * (nregions)))
62
63 static uint64_t
64 get_blk_size(int fd)
65 {
66         struct stat stat;
67
68         fstat(fd, &stat);
69         return (uint64_t)stat.st_blksize;
70 }
71
72 static void
73 free_mem_region(struct virtio_net *dev)
74 {
75         struct orig_region_map *region;
76         unsigned int idx;
77         uint64_t alignment;
78
79         if (!dev || !dev->mem)
80                 return;
81
82         region = orig_region(dev->mem, dev->mem->nregions);
83         for (idx = 0; idx < dev->mem->nregions; idx++) {
84                 if (region[idx].mapped_address) {
85                         alignment = region[idx].blksz;
86                         munmap((void *)(uintptr_t)
87                                 RTE_ALIGN_FLOOR(
88                                         region[idx].mapped_address, alignment),
89                                 RTE_ALIGN_CEIL(
90                                         region[idx].mapped_size, alignment));
91                         close(region[idx].fd);
92                 }
93         }
94 }
95
96 int
97 user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
98 {
99         struct VhostUserMemory memory = pmsg->payload.memory;
100         struct virtio_memory_regions *pregion;
101         uint64_t mapped_address, mapped_size;
102         struct virtio_net *dev;
103         unsigned int idx = 0;
104         struct orig_region_map *pregion_orig;
105         uint64_t alignment;
106
107         /* unmap old memory regions one by one*/
108         dev = get_device(ctx);
109         if (dev == NULL)
110                 return -1;
111
112         /* Remove from the data plane. */
113         if (dev->flags & VIRTIO_DEV_RUNNING)
114                 notify_ops->destroy_device(dev);
115
116         if (dev->mem) {
117                 free_mem_region(dev);
118                 free(dev->mem);
119                 dev->mem = NULL;
120         }
121
122         dev->mem = calloc(1,
123                 sizeof(struct virtio_memory) +
124                 sizeof(struct virtio_memory_regions) * memory.nregions +
125                 sizeof(struct orig_region_map) * memory.nregions);
126         if (dev->mem == NULL) {
127                 RTE_LOG(ERR, VHOST_CONFIG,
128                         "(%"PRIu64") Failed to allocate memory for dev->mem\n",
129                         dev->device_fh);
130                 return -1;
131         }
132         dev->mem->nregions = memory.nregions;
133
134         pregion_orig = orig_region(dev->mem, memory.nregions);
135         for (idx = 0; idx < memory.nregions; idx++) {
136                 pregion = &dev->mem->regions[idx];
137                 pregion->guest_phys_address =
138                         memory.regions[idx].guest_phys_addr;
139                 pregion->guest_phys_address_end =
140                         memory.regions[idx].guest_phys_addr +
141                         memory.regions[idx].memory_size;
142                 pregion->memory_size =
143                         memory.regions[idx].memory_size;
144                 pregion->userspace_address =
145                         memory.regions[idx].userspace_addr;
146
147                 /* This is ugly */
148                 mapped_size = memory.regions[idx].memory_size +
149                         memory.regions[idx].mmap_offset;
150                 mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
151                         mapped_size,
152                         PROT_READ | PROT_WRITE, MAP_SHARED,
153                         pmsg->fds[idx],
154                         0);
155
156                 RTE_LOG(INFO, VHOST_CONFIG,
157                         "mapped region %d fd:%d to %p sz:0x%"PRIx64" off:0x%"PRIx64"\n",
158                         idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
159                         mapped_size, memory.regions[idx].mmap_offset);
160
161                 if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
162                         RTE_LOG(ERR, VHOST_CONFIG,
163                                 "mmap qemu guest failed.\n");
164                         goto err_mmap;
165                 }
166
167                 pregion_orig[idx].mapped_address = mapped_address;
168                 pregion_orig[idx].mapped_size = mapped_size;
169                 pregion_orig[idx].blksz = get_blk_size(pmsg->fds[idx]);
170                 pregion_orig[idx].fd = pmsg->fds[idx];
171
172                 mapped_address +=  memory.regions[idx].mmap_offset;
173
174                 pregion->address_offset = mapped_address -
175                         pregion->guest_phys_address;
176
177                 if (memory.regions[idx].guest_phys_addr == 0) {
178                         dev->mem->base_address =
179                                 memory.regions[idx].userspace_addr;
180                         dev->mem->mapped_address =
181                                 pregion->address_offset;
182                 }
183
184                 LOG_DEBUG(VHOST_CONFIG,
185                         "REGION: %u GPA: %p QEMU VA: %p SIZE (%"PRIu64")\n",
186                         idx,
187                         (void *)(uintptr_t)pregion->guest_phys_address,
188                         (void *)(uintptr_t)pregion->userspace_address,
189                          pregion->memory_size);
190         }
191
192         return 0;
193
194 err_mmap:
195         while (idx--) {
196                 alignment = pregion_orig[idx].blksz;
197                 munmap((void *)(uintptr_t)RTE_ALIGN_FLOOR(
198                         pregion_orig[idx].mapped_address, alignment),
199                         RTE_ALIGN_CEIL(pregion_orig[idx].mapped_size,
200                                         alignment));
201                 close(pregion_orig[idx].fd);
202         }
203         free(dev->mem);
204         dev->mem = NULL;
205         return -1;
206 }
207
208 static int
209 vq_is_ready(struct vhost_virtqueue *vq)
210 {
211         return vq && vq->desc   &&
212                vq->kickfd != -1 &&
213                vq->callfd != -1;
214 }
215
216 static int
217 virtio_is_ready(struct virtio_net *dev)
218 {
219         struct vhost_virtqueue *rvq, *tvq;
220         uint32_t i;
221
222         for (i = 0; i < dev->virt_qp_nb; i++) {
223                 rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
224                 tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
225
226                 if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) {
227                         RTE_LOG(INFO, VHOST_CONFIG,
228                                 "virtio is not ready for processing.\n");
229                         return 0;
230                 }
231         }
232
233         RTE_LOG(INFO, VHOST_CONFIG,
234                 "virtio is now ready for processing.\n");
235         return 1;
236 }
237
238 void
239 user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
240 {
241         struct vhost_vring_file file;
242
243         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
244         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
245                 file.fd = -1;
246         else
247                 file.fd = pmsg->fds[0];
248         RTE_LOG(INFO, VHOST_CONFIG,
249                 "vring call idx:%d file:%d\n", file.index, file.fd);
250         ops->set_vring_call(ctx, &file);
251 }
252
253
254 /*
255  *  In vhost-user, when we receive kick message, will test whether virtio
256  *  device is ready for packet processing.
257  */
258 void
259 user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
260 {
261         struct vhost_vring_file file;
262         struct virtio_net *dev = get_device(ctx);
263
264         file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
265         if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
266                 file.fd = -1;
267         else
268                 file.fd = pmsg->fds[0];
269         RTE_LOG(INFO, VHOST_CONFIG,
270                 "vring kick idx:%d file:%d\n", file.index, file.fd);
271         ops->set_vring_kick(ctx, &file);
272
273         if (virtio_is_ready(dev) &&
274                 !(dev->flags & VIRTIO_DEV_RUNNING))
275                         notify_ops->new_device(dev);
276 }
277
278 /*
279  * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
280  */
281 int
282 user_get_vring_base(struct vhost_device_ctx ctx,
283         struct vhost_vring_state *state)
284 {
285         struct virtio_net *dev = get_device(ctx);
286
287         if (dev == NULL)
288                 return -1;
289         /* We have to stop the queue (virtio) if it is running. */
290         if (dev->flags & VIRTIO_DEV_RUNNING)
291                 notify_ops->destroy_device(dev);
292
293         /* Here we are safe to get the last used index */
294         ops->get_vring_base(ctx, state->index, state);
295
296         RTE_LOG(INFO, VHOST_CONFIG,
297                 "vring base idx:%d file:%d\n", state->index, state->num);
298         /*
299          * Based on current qemu vhost-user implementation, this message is
300          * sent and only sent in vhost_vring_stop.
301          * TODO: cleanup the vring, it isn't usable since here.
302          */
303         if (dev->virtqueue[state->index]->kickfd >= 0) {
304                 close(dev->virtqueue[state->index]->kickfd);
305                 dev->virtqueue[state->index]->kickfd = -1;
306         }
307
308         return 0;
309 }
310
311 /*
312  * when virtio queues are ready to work, qemu will send us to
313  * enable the virtio queue pair.
314  */
315 int
316 user_set_vring_enable(struct vhost_device_ctx ctx,
317                       struct vhost_vring_state *state)
318 {
319         struct virtio_net *dev = get_device(ctx);
320         uint16_t base_idx = state->index;
321         int enable = (int)state->num;
322
323         RTE_LOG(INFO, VHOST_CONFIG,
324                 "set queue enable: %d to qp idx: %d\n",
325                 enable, state->index);
326
327         if (notify_ops->vring_state_changed) {
328                 notify_ops->vring_state_changed(dev, base_idx / VIRTIO_QNUM,
329                                                 enable);
330         }
331
332         dev->virtqueue[base_idx + VIRTIO_RXQ]->enabled = enable;
333         dev->virtqueue[base_idx + VIRTIO_TXQ]->enabled = enable;
334
335         return 0;
336 }
337
338 void
339 user_destroy_device(struct vhost_device_ctx ctx)
340 {
341         struct virtio_net *dev = get_device(ctx);
342
343         if (dev && (dev->flags & VIRTIO_DEV_RUNNING))
344                 notify_ops->destroy_device(dev);
345
346         if (dev && dev->mem) {
347                 free_mem_region(dev);
348                 free(dev->mem);
349                 dev->mem = NULL;
350         }
351 }
352
353 void
354 user_set_protocol_features(struct vhost_device_ctx ctx,
355                            uint64_t protocol_features)
356 {
357         struct virtio_net *dev;
358
359         dev = get_device(ctx);
360         if (dev == NULL || protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
361                 return;
362
363         dev->protocol_features = protocol_features;
364 }