4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 #include <sys/types.h>
43 #include <rte_common.h>
46 #include "virtio-net.h"
47 #include "virtio-net-user.h"
48 #include "vhost-net-user.h"
49 #include "vhost-net.h"
51 struct orig_region_map {
53 uint64_t mapped_address;
58 #define orig_region(ptr, nregions) \
59 ((struct orig_region_map *)RTE_PTR_ADD((ptr), \
60 sizeof(struct virtio_memory) + \
61 sizeof(struct virtio_memory_regions) * (nregions)))
69 return (uint64_t)stat.st_blksize;
73 free_mem_region(struct virtio_net *dev)
75 struct orig_region_map *region;
79 if (!dev || !dev->mem)
82 region = orig_region(dev->mem, dev->mem->nregions);
83 for (idx = 0; idx < dev->mem->nregions; idx++) {
84 if (region[idx].mapped_address) {
85 alignment = region[idx].blksz;
86 munmap((void *)(uintptr_t)
88 region[idx].mapped_address, alignment),
90 region[idx].mapped_size, alignment));
91 close(region[idx].fd);
97 user_set_mem_table(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
99 struct VhostUserMemory memory = pmsg->payload.memory;
100 struct virtio_memory_regions *pregion;
101 uint64_t mapped_address, mapped_size;
102 struct virtio_net *dev;
103 unsigned int idx = 0;
104 struct orig_region_map *pregion_orig;
107 /* unmap old memory regions one by one*/
108 dev = get_device(ctx);
112 /* Remove from the data plane. */
113 if (dev->flags & VIRTIO_DEV_RUNNING)
114 notify_ops->destroy_device(dev);
117 free_mem_region(dev);
123 sizeof(struct virtio_memory) +
124 sizeof(struct virtio_memory_regions) * memory.nregions +
125 sizeof(struct orig_region_map) * memory.nregions);
126 if (dev->mem == NULL) {
127 RTE_LOG(ERR, VHOST_CONFIG,
128 "(%"PRIu64") Failed to allocate memory for dev->mem\n",
132 dev->mem->nregions = memory.nregions;
134 pregion_orig = orig_region(dev->mem, memory.nregions);
135 for (idx = 0; idx < memory.nregions; idx++) {
136 pregion = &dev->mem->regions[idx];
137 pregion->guest_phys_address =
138 memory.regions[idx].guest_phys_addr;
139 pregion->guest_phys_address_end =
140 memory.regions[idx].guest_phys_addr +
141 memory.regions[idx].memory_size;
142 pregion->memory_size =
143 memory.regions[idx].memory_size;
144 pregion->userspace_address =
145 memory.regions[idx].userspace_addr;
148 mapped_size = memory.regions[idx].memory_size +
149 memory.regions[idx].mmap_offset;
150 mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
152 PROT_READ | PROT_WRITE, MAP_SHARED,
156 RTE_LOG(INFO, VHOST_CONFIG,
157 "mapped region %d fd:%d to %p sz:0x%"PRIx64" off:0x%"PRIx64"\n",
158 idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
159 mapped_size, memory.regions[idx].mmap_offset);
161 if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
162 RTE_LOG(ERR, VHOST_CONFIG,
163 "mmap qemu guest failed.\n");
167 pregion_orig[idx].mapped_address = mapped_address;
168 pregion_orig[idx].mapped_size = mapped_size;
169 pregion_orig[idx].blksz = get_blk_size(pmsg->fds[idx]);
170 pregion_orig[idx].fd = pmsg->fds[idx];
172 mapped_address += memory.regions[idx].mmap_offset;
174 pregion->address_offset = mapped_address -
175 pregion->guest_phys_address;
177 if (memory.regions[idx].guest_phys_addr == 0) {
178 dev->mem->base_address =
179 memory.regions[idx].userspace_addr;
180 dev->mem->mapped_address =
181 pregion->address_offset;
184 LOG_DEBUG(VHOST_CONFIG,
185 "REGION: %u GPA: %p QEMU VA: %p SIZE (%"PRIu64")\n",
187 (void *)(uintptr_t)pregion->guest_phys_address,
188 (void *)(uintptr_t)pregion->userspace_address,
189 pregion->memory_size);
196 alignment = pregion_orig[idx].blksz;
197 munmap((void *)(uintptr_t)RTE_ALIGN_FLOOR(
198 pregion_orig[idx].mapped_address, alignment),
199 RTE_ALIGN_CEIL(pregion_orig[idx].mapped_size,
201 close(pregion_orig[idx].fd);
209 virtio_is_ready(struct virtio_net *dev)
211 struct vhost_virtqueue *rvq, *tvq;
213 /* mq support in future.*/
214 rvq = dev->virtqueue[VIRTIO_RXQ];
215 tvq = dev->virtqueue[VIRTIO_TXQ];
216 if (rvq && tvq && rvq->desc && tvq->desc &&
217 (rvq->kickfd != (eventfd_t)-1) &&
218 (rvq->callfd != (eventfd_t)-1) &&
219 (tvq->kickfd != (eventfd_t)-1) &&
220 (tvq->callfd != (eventfd_t)-1)) {
221 RTE_LOG(INFO, VHOST_CONFIG,
222 "virtio is now ready for processing.\n");
225 RTE_LOG(INFO, VHOST_CONFIG,
226 "virtio isn't ready for processing.\n");
231 user_set_vring_call(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
233 struct vhost_vring_file file;
235 file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
236 if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
239 file.fd = pmsg->fds[0];
240 RTE_LOG(INFO, VHOST_CONFIG,
241 "vring call idx:%d file:%d\n", file.index, file.fd);
242 ops->set_vring_call(ctx, &file);
247 * In vhost-user, when we receive kick message, will test whether virtio
248 * device is ready for packet processing.
251 user_set_vring_kick(struct vhost_device_ctx ctx, struct VhostUserMsg *pmsg)
253 struct vhost_vring_file file;
254 struct virtio_net *dev = get_device(ctx);
256 file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
257 if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
260 file.fd = pmsg->fds[0];
261 RTE_LOG(INFO, VHOST_CONFIG,
262 "vring kick idx:%d file:%d\n", file.index, file.fd);
263 ops->set_vring_kick(ctx, &file);
265 if (virtio_is_ready(dev) &&
266 !(dev->flags & VIRTIO_DEV_RUNNING))
267 notify_ops->new_device(dev);
271 * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
274 user_get_vring_base(struct vhost_device_ctx ctx,
275 struct vhost_vring_state *state)
277 struct virtio_net *dev = get_device(ctx);
279 /* We have to stop the queue (virtio) if it is running. */
280 if (dev->flags & VIRTIO_DEV_RUNNING)
281 notify_ops->destroy_device(dev);
283 /* Here we are safe to get the last used index */
284 ops->get_vring_base(ctx, state->index, state);
286 RTE_LOG(INFO, VHOST_CONFIG,
287 "vring base idx:%d file:%d\n", state->index, state->num);
289 * Based on current qemu vhost-user implementation, this message is
290 * sent and only sent in vhost_vring_stop.
291 * TODO: cleanup the vring, it isn't usable since here.
293 if (((int)dev->virtqueue[VIRTIO_RXQ]->kickfd) >= 0) {
294 close(dev->virtqueue[VIRTIO_RXQ]->kickfd);
295 dev->virtqueue[VIRTIO_RXQ]->kickfd = (eventfd_t)-1;
297 if (((int)dev->virtqueue[VIRTIO_TXQ]->kickfd) >= 0) {
298 close(dev->virtqueue[VIRTIO_TXQ]->kickfd);
299 dev->virtqueue[VIRTIO_TXQ]->kickfd = (eventfd_t)-1;
306 user_destroy_device(struct vhost_device_ctx ctx)
308 struct virtio_net *dev = get_device(ctx);
310 if (dev && (dev->flags & VIRTIO_DEV_RUNNING))
311 notify_ops->destroy_device(dev);
313 if (dev && dev->mem) {
314 free_mem_region(dev);