4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <fuse/cuse_lowlevel.h>
36 #include <linux/vhost.h>
37 #include <linux/virtio_net.h>
41 #include <sys/eventfd.h>
42 #include <sys/ioctl.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #include <rte_memory.h>
52 #include "virtio-net.h"
53 #include "vhost-net-cdev.h"
54 #include "eventfd_link/eventfd_link.h"
56 const char eventfd_cdev[] = "/dev/eventfd-link";
58 extern uint32_t num_devices;
59 static uint32_t num_cur_devices = 0;
61 /* device ops to add/remove device to data core. */
62 static struct virtio_net_device_ops const * notify_ops;
63 /* Root address of the linked list in the configuration core. */
64 static struct virtio_net_config_ll *ll_root = NULL;
66 /* Features supported by this application. RX merge buffers are disabled by default. */
67 uint64_t VHOST_FEATURES = (0ULL << VIRTIO_NET_F_MRG_RXBUF);
69 /* Line size for reading maps file. */
70 const uint32_t BUFSIZE = PATH_MAX;
72 /* Size of prot char array in procmap. */
75 /* Number of elements in procmap struct. */
78 /* Structure containing information gathered from maps file. */
81 uint64_t va_start; /* Start virtual address in file. */
82 uint64_t len; /* Size of file. */
83 uint64_t pgoff; /* Not used. */
84 uint32_t maj; /* Not used. */
85 uint32_t min; /* Not used. */
86 uint32_t ino; /* Not used. */
87 char prot[PROT_SZ]; /* Not used. */
88 char fname[PATH_MAX]; /* File name. */
92 * Converts QEMU virtual address to Vhost virtual address. This function is used
93 * to convert the ring addresses to our address space.
96 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
98 struct virtio_memory_regions *region;
99 uint64_t vhost_va = 0;
100 uint32_t regionidx = 0;
102 /* Find the region where the address lives. */
103 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
104 region = &dev->mem->regions[regionidx];
105 if ((qemu_va >= region->userspace_address) &&
106 (qemu_va <= region->userspace_address +
107 region->memory_size)) {
108 vhost_va = dev->mem->mapped_address + qemu_va - dev->mem->base_address;
116 * Locate the file containing QEMU's memory space and map it to our address space.
119 host_memory_map (struct virtio_net *dev, struct virtio_memory *mem, pid_t pid, uint64_t addr)
121 struct dirent *dptr = NULL;
122 struct procmap procmap;
126 char memfile[PATH_MAX];
127 char mapfile[PATH_MAX];
128 char procdir[PATH_MAX];
129 char resolved_path[PATH_MAX];
135 char *str, *sp, *in[PROCMAP_SZ];
138 /* Path where mem files are located. */
139 snprintf (procdir, PATH_MAX, "/proc/%u/fd/", pid);
140 /* Maps file used to locate mem file. */
141 snprintf (mapfile, PATH_MAX, "/proc/%u/maps", pid);
143 fmap = fopen(mapfile, "r");
145 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open maps file for pid %d\n", dev->device_fh, pid);
149 /* Read through maps file until we find out base_address. */
150 while (fgets(line, BUFSIZE, fmap) != 0) {
153 /* Split line in to fields. */
154 for (i = 0; i < PROCMAP_SZ; i++) {
155 if (((in[i] = strtok_r(str, &dlm[i], &sp)) == NULL) || (errno != 0)) {
162 /* Convert/Copy each field as needed. */
163 procmap.va_start = strtoull(in[0], &end, 16);
164 if ((in[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
169 procmap.len = strtoull(in[1], &end, 16);
170 if ((in[1] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
175 procmap.pgoff = strtoull(in[3], &end, 16);
176 if ((in[3] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
181 procmap.maj = strtoul(in[4], &end, 16);
182 if ((in[4] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
187 procmap.min = strtoul(in[5], &end, 16);
188 if ((in[5] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
193 procmap.ino = strtoul(in[6], &end, 16);
194 if ((in[6] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
199 memcpy(&procmap.prot, in[2], PROT_SZ);
200 memcpy(&procmap.fname, in[7], PATH_MAX);
202 if (procmap.va_start == addr) {
203 procmap.len = procmap.len - procmap.va_start;
211 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file in pid %d maps file\n", dev->device_fh, pid);
215 /* Find the guest memory file among the process fds. */
216 dp = opendir(procdir);
218 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid);
225 /* Read the fd directory contents. */
226 while (NULL != (dptr = readdir(dp))) {
227 snprintf (memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name);
228 realpath(memfile, resolved_path);
229 if (resolved_path == NULL) {
230 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
234 if (strncmp(resolved_path, procmap.fname,
235 strnlen(procmap.fname, PATH_MAX)) == 0) {
244 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file for pid %d\n", dev->device_fh, pid);
247 /* Open the shared memory file and map the memory into this process. */
248 fd = open(memfile, O_RDWR);
251 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open %s for pid %d\n", dev->device_fh, memfile, pid);
255 map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE , MAP_POPULATE|MAP_SHARED, fd, 0);
258 if (map == MAP_FAILED) {
259 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
263 /* Store the memory address and size in the device data structure */
264 mem->mapped_address = (uint64_t)(uintptr_t)map;
265 mem->mapped_size = procmap.len;
267 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n", dev->device_fh,
268 memfile, resolved_path, (long long unsigned)mem->mapped_size, map);
274 * Retrieves an entry from the devices configuration linked list.
276 static struct virtio_net_config_ll *
277 get_config_ll_entry(struct vhost_device_ctx ctx)
279 struct virtio_net_config_ll *ll_dev = ll_root;
281 /* Loop through linked list until the device_fh is found. */
282 while (ll_dev != NULL) {
283 if (ll_dev->dev.device_fh == ctx.fh)
285 ll_dev = ll_dev->next;
292 * Searches the configuration core linked list and retrieves the device if it exists.
294 static struct virtio_net *
295 get_device(struct vhost_device_ctx ctx)
297 struct virtio_net_config_ll *ll_dev;
299 ll_dev = get_config_ll_entry(ctx);
301 /* If a matching entry is found in the linked list, return the device in that entry. */
306 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
311 * Add entry containing a device to the device configuration linked list.
314 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
316 struct virtio_net_config_ll *ll_dev = ll_root;
318 /* If ll_dev == NULL then this is the first device so go to else */
320 /* If the 1st device_fh != 0 then we insert our device here. */
321 if (ll_dev->dev.device_fh != 0) {
322 new_ll_dev->dev.device_fh = 0;
323 new_ll_dev->next = ll_dev;
324 ll_root = new_ll_dev;
326 /* Increment through the ll until we find un unused device_fh. Insert the device at that entry*/
327 while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))
328 ll_dev = ll_dev->next;
330 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
331 new_ll_dev->next = ll_dev->next;
332 ll_dev->next = new_ll_dev;
335 ll_root = new_ll_dev;
336 ll_root->dev.device_fh = 0;
342 * Unmap any memory, close any file descriptors and free any memory owned by a device.
345 cleanup_device(struct virtio_net *dev)
347 /* Unmap QEMU memory file if mapped. */
349 munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
350 if (dev->mem->regions_hpa)
351 free(dev->mem->regions_hpa);
355 /* Close any event notifiers opened by device. */
356 if (dev->virtqueue[VIRTIO_RXQ]->callfd)
357 close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
358 if (dev->virtqueue[VIRTIO_RXQ]->kickfd)
359 close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
360 if (dev->virtqueue[VIRTIO_TXQ]->callfd)
361 close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
362 if (dev->virtqueue[VIRTIO_TXQ]->kickfd)
363 close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
367 * Release virtqueues and device memory.
370 free_device(struct virtio_net_config_ll *ll_dev)
372 /* Free any malloc'd memory */
373 free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
374 free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
378 * Remove an entry from the device configuration linked list.
380 static struct virtio_net_config_ll *
381 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)
383 /* First remove the device and then clean it up. */
384 if (ll_dev == ll_root) {
385 ll_root = ll_dev->next;
386 cleanup_device(&ll_dev->dev);
390 if (likely(ll_dev_last != NULL)) {
391 ll_dev_last->next = ll_dev->next;
392 cleanup_device(&ll_dev->dev);
394 return ll_dev_last->next;
396 cleanup_device(&ll_dev->dev);
398 RTE_LOG(ERR, VHOST_CONFIG, "Remove entry from config_ll failed\n");
405 * Initialise all variables in device structure.
408 init_device(struct virtio_net *dev)
412 /* Virtqueues have already been malloced so we don't want to set them to NULL. */
413 vq_offset = offsetof(struct virtio_net, mem);
415 /* Set everything to 0. */
416 memset((void*)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
417 (sizeof(struct virtio_net) - (size_t)vq_offset));
418 memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
419 memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
421 /* Backends are set to -1 indicating an inactive device. */
422 dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
423 dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
427 * Function is called from the CUSE open function. The device structure is
428 * initialised and a new entry is added to the device configuration linked
432 new_device(struct vhost_device_ctx ctx)
434 struct virtio_net_config_ll *new_ll_dev;
435 struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
437 /*check the number of devices in the system*/
438 if (num_cur_devices == num_devices) {
439 RTE_LOG(ERR, VHOST_CONFIG, "() Max num devices (%u) exceeded\n", num_devices);
443 /* Setup device and virtqueues. */
444 new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
445 if (new_ll_dev == NULL) {
446 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev.\n", ctx.fh);
450 virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
451 if (virtqueue_rx == NULL) {
453 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_rx.\n", ctx.fh);
457 virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
458 if (virtqueue_tx == NULL) {
461 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_tx.\n", ctx.fh);
465 new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
466 new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
468 /* Initialise device and virtqueues. */
469 init_device(&new_ll_dev->dev);
471 new_ll_dev->next = NULL;
473 /* Add entry to device configuration linked list. */
474 add_config_ll_entry(new_ll_dev);
476 /*increment the number of devices in the system*/
479 return new_ll_dev->dev.device_fh;
483 * Function is called from the CUSE release function. This function will cleanup
484 * the device and remove it from device configuration linked list.
487 destroy_device(struct vhost_device_ctx ctx)
489 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
490 struct virtio_net_config_ll *ll_dev_cur = ll_root;
492 /* Find the linked list entry for the device to be removed. */
493 ll_dev_cur_ctx = get_config_ll_entry(ctx);
494 while (ll_dev_cur != NULL) {
495 /* If the device is found or a device that doesn't exist is found then it is removed. */
496 if (ll_dev_cur == ll_dev_cur_ctx) {
498 * If the device is running on a data core then call the function to remove it from
501 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
502 notify_ops->destroy_device(&(ll_dev_cur->dev));
503 ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
505 ll_dev_last = ll_dev_cur;
506 ll_dev_cur = ll_dev_cur->next;
510 /*decrement the number of devices in the system*/
515 * Called from CUSE IOCTL: VHOST_SET_OWNER
516 * This function just returns success at the moment unless the device hasn't been initialised.
519 set_owner(struct vhost_device_ctx ctx)
521 struct virtio_net *dev;
523 dev = get_device(ctx);
531 * Called from CUSE IOCTL: VHOST_RESET_OWNER
534 reset_owner(struct vhost_device_ctx ctx)
536 struct virtio_net_config_ll *ll_dev;
538 ll_dev = get_config_ll_entry(ctx);
540 cleanup_device(&ll_dev->dev);
541 init_device(&ll_dev->dev);
547 * Called from CUSE IOCTL: VHOST_GET_FEATURES
548 * The features that we support are requested.
551 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
553 struct virtio_net *dev;
555 dev = get_device(ctx);
559 /* Send our supported features. */
560 *pu = VHOST_FEATURES;
565 * Called from CUSE IOCTL: VHOST_SET_FEATURES
566 * We receive the negotiated set of features supported by us and the virtio device.
569 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
571 struct virtio_net *dev;
573 dev = get_device(ctx);
576 if (*pu & ~VHOST_FEATURES)
579 /* Store the negotiated feature list for the device. */
582 /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
583 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
584 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
585 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
586 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
588 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
589 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
590 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
596 * Calculate the region count of physical continous regions for one particular
597 * region of whose vhost virtual address is continous. The particular region
598 * start from vva_start, with size of 'size' in argument.
600 static uint32_t check_hpa_regions(uint64_t vva_start, uint64_t size)
602 uint32_t i, nregions = 0, page_size = PAGE_SIZE;
603 uint64_t cur_phys_addr = 0, next_phys_addr = 0;
604 if (vva_start % page_size) {
605 LOG_DEBUG(VHOST_CONFIG,
606 "in check_countinous: vva start(%p) mod page_size(%d) "
608 (void *)(uintptr_t)vva_start, page_size);
611 if (size % page_size) {
612 LOG_DEBUG(VHOST_CONFIG,
613 "in check_countinous: "
614 "size((%"PRIu64")) mod page_size(%d) has remainder\n",
618 for (i = 0; i < size - page_size; i = i + page_size) {
620 = rte_mem_virt2phy((void *)(uintptr_t)(vva_start + i));
621 next_phys_addr = rte_mem_virt2phy(
622 (void *)(uintptr_t)(vva_start + i + page_size));
623 if ((cur_phys_addr + page_size) != next_phys_addr) {
625 LOG_DEBUG(VHOST_CONFIG,
626 "in check_continuous: hva addr:(%p) is not "
627 "continuous with hva addr:(%p), diff:%d\n",
628 (void *)(uintptr_t)(vva_start + (uint64_t)i),
629 (void *)(uintptr_t)(vva_start + (uint64_t)i
630 + page_size), page_size);
631 LOG_DEBUG(VHOST_CONFIG,
632 "in check_continuous: hpa addr:(%p) is not "
633 "continuous with hpa addr:(%p), "
634 "diff:(%"PRIu64")\n",
635 (void *)(uintptr_t)cur_phys_addr,
636 (void *)(uintptr_t)next_phys_addr,
637 (next_phys_addr-cur_phys_addr));
644 * Divide each region whose vhost virtual address is continous into a few
645 * sub-regions, make sure the physical address within each sub-region are
646 * continous. And fill offset(to GPA) and size etc. information of each
647 * sub-region into regions_hpa.
649 static uint32_t fill_hpa_memory_regions(void *memory)
651 uint32_t regionidx, regionidx_hpa = 0, i, k, page_size = PAGE_SIZE;
652 uint64_t cur_phys_addr = 0, next_phys_addr = 0, vva_start;
653 struct virtio_memory *virtio_memory = (struct virtio_memory *)memory;
654 struct virtio_memory_regions_hpa *mem_region_hpa
655 = virtio_memory->regions_hpa;
657 if (mem_region_hpa == NULL)
660 for (regionidx = 0; regionidx < virtio_memory->nregions; regionidx++) {
661 vva_start = virtio_memory->regions[regionidx].guest_phys_address
662 + virtio_memory->regions[regionidx].address_offset;
663 mem_region_hpa[regionidx_hpa].guest_phys_address
664 = virtio_memory->regions[regionidx].guest_phys_address;
665 mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
666 rte_mem_virt2phy((void *)(uintptr_t)(vva_start))
667 - mem_region_hpa[regionidx_hpa].guest_phys_address;
668 LOG_DEBUG(VHOST_CONFIG,
669 "in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
672 (mem_region_hpa[regionidx_hpa].guest_phys_address));
673 LOG_DEBUG(VHOST_CONFIG,
674 "in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
677 (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
679 i < virtio_memory->regions[regionidx].memory_size
682 cur_phys_addr = rte_mem_virt2phy(
683 (void *)(uintptr_t)(vva_start + i));
684 next_phys_addr = rte_mem_virt2phy(
685 (void *)(uintptr_t)(vva_start
687 if ((cur_phys_addr + page_size) != next_phys_addr) {
688 mem_region_hpa[regionidx_hpa].guest_phys_address_end =
689 mem_region_hpa[regionidx_hpa].guest_phys_address
691 mem_region_hpa[regionidx_hpa].memory_size
693 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
694 "phys addr end [%d]:(%p)\n",
697 (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
698 LOG_DEBUG(VHOST_CONFIG,
699 "in fill_hpa_regions: guest phys addr "
703 (mem_region_hpa[regionidx_hpa].memory_size));
704 mem_region_hpa[regionidx_hpa + 1].guest_phys_address
705 = mem_region_hpa[regionidx_hpa].guest_phys_address_end;
707 mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
709 - mem_region_hpa[regionidx_hpa].guest_phys_address;
710 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
711 " phys addr start[%d]:(%p)\n",
714 (mem_region_hpa[regionidx_hpa].guest_phys_address));
715 LOG_DEBUG(VHOST_CONFIG,
716 "in fill_hpa_regions: host phys addr "
720 (mem_region_hpa[regionidx_hpa].host_phys_addr_offset));
726 mem_region_hpa[regionidx_hpa].guest_phys_address_end
727 = mem_region_hpa[regionidx_hpa].guest_phys_address
729 mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
730 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
731 "[%d]:(%p)\n", regionidx_hpa,
733 (mem_region_hpa[regionidx_hpa].guest_phys_address_end));
734 LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
735 "[%d]:(%p)\n", regionidx_hpa,
737 (mem_region_hpa[regionidx_hpa].memory_size));
740 return regionidx_hpa;
744 * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
745 * This function creates and populates the memory structure for the device. This includes
746 * storing offsets used to translate buffer addresses.
749 set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr, uint32_t nregions)
751 struct virtio_net *dev;
752 struct vhost_memory_region *mem_regions;
753 struct virtio_memory *mem;
754 uint64_t size = offsetof(struct vhost_memory, regions);
755 uint32_t regionidx, valid_regions;
757 dev = get_device(ctx);
762 munmap((void*)(uintptr_t)dev->mem->mapped_address, (size_t)dev->mem->mapped_size);
766 /* Malloc the memory structure depending on the number of regions. */
767 mem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions));
769 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
773 mem->nregions = nregions;
775 mem_regions = (void*)(uintptr_t)((uint64_t)(uintptr_t)mem_regions_addr + size);
777 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
778 /* Populate the region structure for each region. */
779 mem->regions[regionidx].guest_phys_address = mem_regions[regionidx].guest_phys_addr;
780 mem->regions[regionidx].guest_phys_address_end = mem->regions[regionidx].guest_phys_address +
781 mem_regions[regionidx].memory_size;
782 mem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size;
783 mem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;
785 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
786 regionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address,
787 (void*)(uintptr_t)mem->regions[regionidx].userspace_address,
788 mem->regions[regionidx].memory_size);
790 /*set the base address mapping*/
791 if (mem->regions[regionidx].guest_phys_address == 0x0) {
792 mem->base_address = mem->regions[regionidx].userspace_address;
793 /* Map VM memory file */
794 if (host_memory_map(dev, mem, ctx.pid, mem->base_address) != 0) {
801 /* Check that we have a valid base address. */
802 if (mem->base_address == 0) {
803 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
808 /* Check if all of our regions have valid mappings. Usually one does not exist in the QEMU memory file. */
809 valid_regions = mem->nregions;
810 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
811 if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
812 (mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size)))
816 /* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */
817 if (valid_regions != mem->nregions) {
818 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
821 /* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */
824 for (regionidx = mem->nregions; 0 != regionidx--;) {
825 if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
826 (mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size))) {
827 memmove(&mem->regions[regionidx], &mem->regions[regionidx + 1],
828 sizeof(struct virtio_memory_regions) * valid_regions);
834 mem->nregions = valid_regions;
835 mem->nregions_hpa = mem->nregions;
839 * Calculate the address offset for each region. This offset is used to identify the vhost virtual address
840 * corresponding to a QEMU guest physical address.
842 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
843 dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
844 + dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
846 dev->mem->nregions_hpa
847 += check_hpa_regions(
848 dev->mem->regions[regionidx].guest_phys_address
849 + dev->mem->regions[regionidx].address_offset,
850 dev->mem->regions[regionidx].memory_size);
852 if (dev->mem->regions_hpa != NULL) {
853 free(dev->mem->regions_hpa);
854 dev->mem->regions_hpa = NULL;
857 dev->mem->regions_hpa = (struct virtio_memory_regions_hpa *) calloc(1,
858 (sizeof(struct virtio_memory_regions_hpa)
859 * dev->mem->nregions_hpa));
860 if (dev->mem->regions_hpa == NULL) {
861 RTE_LOG(ERR, VHOST_CONFIG,
862 "(%"PRIu64") Failed to allocate memory for "
863 "dev->mem->regions_hpa.\n", dev->device_fh);
866 if (fill_hpa_memory_regions(
867 (void *)dev->mem) != dev->mem->nregions_hpa) {
868 RTE_LOG(ERR, VHOST_CONFIG,
869 "in set_mem_table: hpa memory regions number mismatch: "
870 "[%d]\n", dev->mem->nregions_hpa);
878 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
879 * The virtio device sends us the size of the descriptor ring.
882 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
884 struct virtio_net *dev;
886 dev = get_device(ctx);
890 /* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
891 dev->virtqueue[state->index]->size = state->num;
897 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
898 * The virtio device sends us the desc, used and avail ring addresses. This function
899 * then converts these to our address space.
902 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
904 struct virtio_net *dev;
905 struct vhost_virtqueue *vq;
907 dev = get_device(ctx);
911 /* addr->index refers to the queue index. The TX queue is 1, RX queue is 0. */
912 vq = dev->virtqueue[addr->index];
914 /* The addresses are converted from QEMU virtual to Vhost virtual. */
915 vq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
917 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
921 vq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
922 if (vq->avail == 0) {
923 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
927 vq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
929 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
933 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n", dev->device_fh, vq->desc);
934 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n", dev->device_fh, vq->avail);
935 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n", dev->device_fh, vq->used);
941 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
942 * The virtio device sends us the available ring last used index.
945 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
947 struct virtio_net *dev;
949 dev = get_device(ctx);
953 /* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
954 dev->virtqueue[state->index]->last_used_idx = state->num;
955 dev->virtqueue[state->index]->last_used_idx_res = state->num;
961 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
962 * We send the virtio device our available ring last used index.
965 get_vring_base(struct vhost_device_ctx ctx, uint32_t index, struct vhost_vring_state *state)
967 struct virtio_net *dev;
969 dev = get_device(ctx);
973 state->index = index;
974 /* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
975 state->num = dev->virtqueue[state->index]->last_used_idx;
981 * This function uses the eventfd_link kernel module to copy an eventfd file descriptor
982 * provided by QEMU in to our process space.
985 eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
987 int eventfd_link, ret;
989 /* Open the character device to the kernel module. */
990 eventfd_link = open(eventfd_cdev, O_RDWR);
991 if (eventfd_link < 0) {
992 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") eventfd_link module is not loaded\n", dev->device_fh);
996 /* Call the IOCTL to copy the eventfd. */
997 ret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);
1001 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") EVENTFD_COPY ioctl failed\n", dev->device_fh);
1010 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
1011 * The virtio device sends an eventfd to interrupt the guest. This fd gets copied in
1012 * to our process space.
1015 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
1017 struct virtio_net *dev;
1018 struct eventfd_copy eventfd_kick;
1019 struct vhost_virtqueue *vq;
1021 dev = get_device(ctx);
1025 /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
1026 vq = dev->virtqueue[file->index];
1029 close((int)vq->kickfd);
1031 /* Populate the eventfd_copy structure and call eventfd_copy. */
1032 vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1033 eventfd_kick.source_fd = vq->kickfd;
1034 eventfd_kick.target_fd = file->fd;
1035 eventfd_kick.target_pid = ctx.pid;
1037 if (eventfd_copy(dev, &eventfd_kick))
1044 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
1045 * The virtio device sends an eventfd that it can use to notify us. This fd gets copied in
1046 * to our process space.
1049 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
1051 struct virtio_net *dev;
1052 struct eventfd_copy eventfd_call;
1053 struct vhost_virtqueue *vq;
1055 dev = get_device(ctx);
1059 /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
1060 vq = dev->virtqueue[file->index];
1063 close((int)vq->callfd);
1065 /* Populate the eventfd_copy structure and call eventfd_copy. */
1066 vq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1067 eventfd_call.source_fd = vq->callfd;
1068 eventfd_call.target_fd = file->fd;
1069 eventfd_call.target_pid = ctx.pid;
1071 if (eventfd_copy(dev, &eventfd_call))
1078 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
1079 * To complete device initialisation when the virtio driver is loaded we are provided with a
1080 * valid fd for a tap device (not used by us). If this happens then we can add the device to a
1081 * data core. When the virtio driver is removed we get fd=-1. At that point we remove the device
1082 * from the data core. The device will still exist in the device configuration linked list.
1085 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
1087 struct virtio_net *dev;
1089 dev = get_device(ctx);
1094 /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
1095 dev->virtqueue[file->index]->backend = file->fd;
1097 /* If the device isn't already running and both backend fds are set we add the device. */
1098 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1099 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
1100 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED))
1101 return notify_ops->new_device(dev);
1102 /* Otherwise we remove it. */
1104 if (file->fd == VIRTIO_DEV_STOPPED) {
1105 notify_ops->destroy_device(dev);
1111 * Function pointers are set for the device operations to allow CUSE to call functions
1112 * when an IOCTL, device_add or device_release is received.
1114 static const struct vhost_net_device_ops vhost_device_ops =
1116 .new_device = new_device,
1117 .destroy_device = destroy_device,
1119 .get_features = get_features,
1120 .set_features = set_features,
1122 .set_mem_table = set_mem_table,
1124 .set_vring_num = set_vring_num,
1125 .set_vring_addr = set_vring_addr,
1126 .set_vring_base = set_vring_base,
1127 .get_vring_base = get_vring_base,
1129 .set_vring_kick = set_vring_kick,
1130 .set_vring_call = set_vring_call,
1132 .set_backend = set_backend,
1134 .set_owner = set_owner,
1135 .reset_owner = reset_owner,
1139 * Called by main to setup callbacks when registering CUSE device.
1141 struct vhost_net_device_ops const *
1142 get_virtio_net_callbacks(void)
1144 return &vhost_device_ops;
1148 * Register ops so that we can add/remove device to data core.
1151 init_virtio_net(struct virtio_net_device_ops const * const ops)
1159 * Currently not used as we Ctrl+c to exit application.
1162 deinit_virtio_net(void)