4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 #include <sys/types.h>
43 #include <rte_common.h>
46 #include "virtio-net-user.h"
47 #include "vhost-net-user.h"
48 #include "vhost-net.h"
50 struct orig_region_map {
52 uint64_t mapped_address;
57 #define orig_region(ptr, nregions) \
58 ((struct orig_region_map *)RTE_PTR_ADD((ptr), \
59 sizeof(struct virtio_memory) + \
60 sizeof(struct virtio_memory_regions) * (nregions)))
68 ret = fstat(fd, &stat);
69 return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
73 free_mem_region(struct virtio_net *dev)
75 struct orig_region_map *region;
78 if (!dev || !dev->mem)
81 region = orig_region(dev->mem, dev->mem->nregions);
82 for (idx = 0; idx < dev->mem->nregions; idx++) {
83 if (region[idx].mapped_address) {
84 munmap((void *)(uintptr_t)region[idx].mapped_address,
85 region[idx].mapped_size);
86 close(region[idx].fd);
92 vhost_backend_cleanup(struct virtio_net *dev)
100 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
106 user_set_mem_table(int vid, struct VhostUserMsg *pmsg)
108 struct VhostUserMemory memory = pmsg->payload.memory;
109 struct virtio_memory_regions *pregion;
110 uint64_t mapped_address, mapped_size;
111 struct virtio_net *dev;
112 unsigned int idx = 0;
113 struct orig_region_map *pregion_orig;
116 /* unmap old memory regions one by one*/
117 dev = get_device(vid);
121 /* Remove from the data plane. */
122 if (dev->flags & VIRTIO_DEV_RUNNING) {
123 dev->flags &= ~VIRTIO_DEV_RUNNING;
124 notify_ops->destroy_device(vid);
128 free_mem_region(dev);
134 sizeof(struct virtio_memory) +
135 sizeof(struct virtio_memory_regions) * memory.nregions +
136 sizeof(struct orig_region_map) * memory.nregions);
137 if (dev->mem == NULL) {
138 RTE_LOG(ERR, VHOST_CONFIG,
139 "(%d) failed to allocate memory for dev->mem\n",
143 dev->mem->nregions = memory.nregions;
145 pregion_orig = orig_region(dev->mem, memory.nregions);
146 for (idx = 0; idx < memory.nregions; idx++) {
147 pregion = &dev->mem->regions[idx];
148 pregion->guest_phys_address =
149 memory.regions[idx].guest_phys_addr;
150 pregion->guest_phys_address_end =
151 memory.regions[idx].guest_phys_addr +
152 memory.regions[idx].memory_size;
153 pregion->memory_size =
154 memory.regions[idx].memory_size;
155 pregion->userspace_address =
156 memory.regions[idx].userspace_addr;
159 mapped_size = memory.regions[idx].memory_size +
160 memory.regions[idx].mmap_offset;
162 /* mmap() without flag of MAP_ANONYMOUS, should be called
163 * with length argument aligned with hugepagesz at older
164 * longterm version Linux, like 2.6.32 and 3.2.72, or
165 * mmap() will fail with EINVAL.
167 * to avoid failure, make sure in caller to keep length
170 alignment = get_blk_size(pmsg->fds[idx]);
171 if (alignment == (uint64_t)-1) {
172 RTE_LOG(ERR, VHOST_CONFIG,
173 "couldn't get hugepage size through fstat\n");
176 mapped_size = RTE_ALIGN_CEIL(mapped_size, alignment);
178 mapped_address = (uint64_t)(uintptr_t)mmap(NULL,
180 PROT_READ | PROT_WRITE, MAP_SHARED,
184 RTE_LOG(INFO, VHOST_CONFIG,
185 "mapped region %d fd:%d to:%p sz:0x%"PRIx64" "
186 "off:0x%"PRIx64" align:0x%"PRIx64"\n",
187 idx, pmsg->fds[idx], (void *)(uintptr_t)mapped_address,
188 mapped_size, memory.regions[idx].mmap_offset,
191 if (mapped_address == (uint64_t)(uintptr_t)MAP_FAILED) {
192 RTE_LOG(ERR, VHOST_CONFIG,
193 "mmap qemu guest failed.\n");
197 pregion_orig[idx].mapped_address = mapped_address;
198 pregion_orig[idx].mapped_size = mapped_size;
199 pregion_orig[idx].blksz = alignment;
200 pregion_orig[idx].fd = pmsg->fds[idx];
202 mapped_address += memory.regions[idx].mmap_offset;
204 pregion->address_offset = mapped_address -
205 pregion->guest_phys_address;
207 if (memory.regions[idx].guest_phys_addr == 0) {
208 dev->mem->base_address =
209 memory.regions[idx].userspace_addr;
210 dev->mem->mapped_address =
211 pregion->address_offset;
214 LOG_DEBUG(VHOST_CONFIG,
215 "REGION: %u GPA: %p QEMU VA: %p SIZE (%"PRIu64")\n",
217 (void *)(uintptr_t)pregion->guest_phys_address,
218 (void *)(uintptr_t)pregion->userspace_address,
219 pregion->memory_size);
226 munmap((void *)(uintptr_t)pregion_orig[idx].mapped_address,
227 pregion_orig[idx].mapped_size);
228 close(pregion_orig[idx].fd);
236 vq_is_ready(struct vhost_virtqueue *vq)
238 return vq && vq->desc &&
239 vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
240 vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
244 virtio_is_ready(struct virtio_net *dev)
246 struct vhost_virtqueue *rvq, *tvq;
249 for (i = 0; i < dev->virt_qp_nb; i++) {
250 rvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ];
251 tvq = dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ];
253 if (!vq_is_ready(rvq) || !vq_is_ready(tvq)) {
254 RTE_LOG(INFO, VHOST_CONFIG,
255 "virtio is not ready for processing.\n");
260 RTE_LOG(INFO, VHOST_CONFIG,
261 "virtio is now ready for processing.\n");
266 user_set_vring_call(int vid, struct VhostUserMsg *pmsg)
268 struct vhost_vring_file file;
270 file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
271 if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
272 file.fd = VIRTIO_INVALID_EVENTFD;
274 file.fd = pmsg->fds[0];
275 RTE_LOG(INFO, VHOST_CONFIG,
276 "vring call idx:%d file:%d\n", file.index, file.fd);
277 vhost_set_vring_call(vid, &file);
282 * In vhost-user, when we receive kick message, will test whether virtio
283 * device is ready for packet processing.
286 user_set_vring_kick(int vid, struct VhostUserMsg *pmsg)
288 struct vhost_vring_file file;
289 struct virtio_net *dev = get_device(vid);
294 file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
295 if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
296 file.fd = VIRTIO_INVALID_EVENTFD;
298 file.fd = pmsg->fds[0];
299 RTE_LOG(INFO, VHOST_CONFIG,
300 "vring kick idx:%d file:%d\n", file.index, file.fd);
301 vhost_set_vring_kick(vid, &file);
303 if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
304 if (notify_ops->new_device(vid) == 0)
305 dev->flags |= VIRTIO_DEV_RUNNING;
310 * when virtio is stopped, qemu will send us the GET_VRING_BASE message.
313 user_get_vring_base(int vid, struct vhost_vring_state *state)
315 struct virtio_net *dev = get_device(vid);
319 /* We have to stop the queue (virtio) if it is running. */
320 if (dev->flags & VIRTIO_DEV_RUNNING) {
321 dev->flags &= ~VIRTIO_DEV_RUNNING;
322 notify_ops->destroy_device(vid);
325 /* Here we are safe to get the last used index */
326 vhost_get_vring_base(vid, state->index, state);
328 RTE_LOG(INFO, VHOST_CONFIG,
329 "vring base idx:%d file:%d\n", state->index, state->num);
331 * Based on current qemu vhost-user implementation, this message is
332 * sent and only sent in vhost_vring_stop.
333 * TODO: cleanup the vring, it isn't usable since here.
335 if (dev->virtqueue[state->index]->kickfd >= 0)
336 close(dev->virtqueue[state->index]->kickfd);
338 dev->virtqueue[state->index]->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
344 * when virtio queues are ready to work, qemu will send us to
345 * enable the virtio queue pair.
348 user_set_vring_enable(int vid, struct vhost_vring_state *state)
350 struct virtio_net *dev;
351 int enable = (int)state->num;
353 dev = get_device(vid);
357 RTE_LOG(INFO, VHOST_CONFIG,
358 "set queue enable: %d to qp idx: %d\n",
359 enable, state->index);
361 if (notify_ops->vring_state_changed)
362 notify_ops->vring_state_changed(vid, state->index, enable);
364 dev->virtqueue[state->index]->enabled = enable;
370 user_set_protocol_features(int vid, uint64_t protocol_features)
372 struct virtio_net *dev;
374 dev = get_device(vid);
375 if (dev == NULL || protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
378 dev->protocol_features = protocol_features;
382 user_set_log_base(int vid, struct VhostUserMsg *msg)
384 struct virtio_net *dev;
385 int fd = msg->fds[0];
389 dev = get_device(vid);
394 RTE_LOG(ERR, VHOST_CONFIG, "invalid log fd: %d\n", fd);
398 if (msg->size != sizeof(VhostUserLog)) {
399 RTE_LOG(ERR, VHOST_CONFIG,
400 "invalid log base msg size: %"PRId32" != %d\n",
401 msg->size, (int)sizeof(VhostUserLog));
405 size = msg->payload.log.mmap_size;
406 off = msg->payload.log.mmap_offset;
407 RTE_LOG(INFO, VHOST_CONFIG,
408 "log mmap size: %"PRId64", offset: %"PRId64"\n",
412 * mmap from 0 to workaround a hugepage mmap bug: mmap will
413 * fail when offset is not page size aligned.
415 addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
417 if (addr == MAP_FAILED) {
418 RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
423 * Free previously mapped log memory on occasionally
424 * multiple VHOST_USER_SET_LOG_BASE.
427 munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
429 dev->log_addr = (uint64_t)(uintptr_t)addr;
430 dev->log_base = dev->log_addr + off;
431 dev->log_size = size;
437 * An rarp packet is constructed and broadcasted to notify switches about
438 * the new location of the migrated VM, so that packets from outside will
439 * not be lost after migration.
441 * However, we don't actually "send" a rarp packet here, instead, we set
442 * a flag 'broadcast_rarp' to let rte_vhost_dequeue_burst() inject it.
445 user_send_rarp(int vid, struct VhostUserMsg *msg)
447 struct virtio_net *dev;
448 uint8_t *mac = (uint8_t *)&msg->payload.u64;
450 dev = get_device(vid);
454 RTE_LOG(DEBUG, VHOST_CONFIG,
455 ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
456 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
457 memcpy(dev->mac.addr_bytes, mac, 6);
460 * Set the flag to inject a RARP broadcast packet at
461 * rte_vhost_dequeue_burst().
463 * rte_smp_wmb() is for making sure the mac is copied
464 * before the flag is set.
467 rte_atomic16_set(&dev->broadcast_rarp, 1);