4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <fuse/cuse_lowlevel.h>
36 #include <linux/vhost.h>
37 #include <linux/virtio_net.h>
41 #include <sys/eventfd.h>
42 #include <sys/ioctl.h>
46 #include <sys/socket.h>
47 #include <linux/if_tun.h>
50 #include <rte_ethdev.h>
52 #include <rte_string_fns.h>
53 #include <rte_memory.h>
54 #include <rte_virtio_net.h>
56 #include "vhost-net-cdev.h"
57 #include "eventfd_link/eventfd_link.h"
60 * Device linked list structure for configuration.
62 struct virtio_net_config_ll {
63 struct virtio_net dev; /* Virtio device.*/
64 struct virtio_net_config_ll *next; /* Next dev on linked list.*/
67 const char eventfd_cdev[] = "/dev/eventfd-link";
69 /* device ops to add/remove device to/from data core. */
70 static struct virtio_net_device_ops const *notify_ops;
71 /* root address of the linked list of managed virtio devices */
72 static struct virtio_net_config_ll *ll_root;
74 /* Features supported by this lib. */
75 #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
76 (1ULL << VIRTIO_NET_F_CTRL_RX))
77 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
79 /* Line size for reading maps file. */
80 static const uint32_t BUFSIZE = PATH_MAX;
82 /* Size of prot char array in procmap. */
85 /* Number of elements in procmap struct. */
88 /* Structure containing information gathered from maps file. */
90 uint64_t va_start; /* Start virtual address in file. */
91 uint64_t len; /* Size of file. */
92 uint64_t pgoff; /* Not used. */
93 uint32_t maj; /* Not used. */
94 uint32_t min; /* Not used. */
95 uint32_t ino; /* Not used. */
96 char prot[PROT_SZ]; /* Not used. */
97 char fname[PATH_MAX]; /* File name. */
101 * Converts QEMU virtual address to Vhost virtual address. This function is
102 * used to convert the ring addresses to our address space.
105 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
107 struct virtio_memory_regions *region;
108 uint64_t vhost_va = 0;
109 uint32_t regionidx = 0;
111 /* Find the region where the address lives. */
112 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
113 region = &dev->mem->regions[regionidx];
114 if ((qemu_va >= region->userspace_address) &&
115 (qemu_va <= region->userspace_address +
116 region->memory_size)) {
117 vhost_va = dev->mem->mapped_address + qemu_va -
118 dev->mem->base_address;
126 * Locate the file containing QEMU's memory space and
127 * map it to our address space.
130 host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
131 pid_t pid, uint64_t addr)
133 struct dirent *dptr = NULL;
134 struct procmap procmap;
138 char memfile[PATH_MAX];
139 char mapfile[PATH_MAX];
140 char procdir[PATH_MAX];
141 char resolved_path[PATH_MAX];
148 char *str, *sp, *in[PROCMAP_SZ];
151 /* Path where mem files are located. */
152 snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
153 /* Maps file used to locate mem file. */
154 snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
156 fmap = fopen(mapfile, "r");
158 RTE_LOG(ERR, VHOST_CONFIG,
159 "(%"PRIu64") Failed to open maps file for pid %d\n",
160 dev->device_fh, pid);
164 /* Read through maps file until we find out base_address. */
165 while (fgets(line, BUFSIZE, fmap) != 0) {
168 /* Split line into fields. */
169 for (i = 0; i < PROCMAP_SZ; i++) {
170 in[i] = strtok_r(str, &dlm[i], &sp);
171 if ((in[i] == NULL) || (errno != 0)) {
178 /* Convert/Copy each field as needed. */
179 procmap.va_start = strtoull(in[0], &end, 16);
180 if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
186 procmap.len = strtoull(in[1], &end, 16);
187 if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
193 procmap.pgoff = strtoull(in[3], &end, 16);
194 if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
200 procmap.maj = strtoul(in[4], &end, 16);
201 if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
207 procmap.min = strtoul(in[5], &end, 16);
208 if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
214 procmap.ino = strtoul(in[6], &end, 16);
215 if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
221 memcpy(&procmap.prot, in[2], PROT_SZ);
222 memcpy(&procmap.fname, in[7], PATH_MAX);
224 if (procmap.va_start == addr) {
225 procmap.len = procmap.len - procmap.va_start;
233 RTE_LOG(ERR, VHOST_CONFIG,
234 "(%"PRIu64") Failed to find memory file in pid %d maps file\n",
235 dev->device_fh, pid);
239 /* Find the guest memory file among the process fds. */
240 dp = opendir(procdir);
242 RTE_LOG(ERR, VHOST_CONFIG,
243 "(%"PRIu64") Cannot open pid %d process directory\n",
244 dev->device_fh, pid);
250 /* Read the fd directory contents. */
251 while (NULL != (dptr = readdir(dp))) {
252 snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
254 path = realpath(memfile, resolved_path);
255 if ((path == NULL) && (strlen(resolved_path) == 0)) {
256 RTE_LOG(ERR, VHOST_CONFIG,
257 "(%"PRIu64") Failed to resolve fd directory\n",
262 if (strncmp(resolved_path, procmap.fname,
263 strnlen(procmap.fname, PATH_MAX)) == 0) {
272 RTE_LOG(ERR, VHOST_CONFIG,
273 "(%"PRIu64") Failed to find memory file for pid %d\n",
274 dev->device_fh, pid);
277 /* Open the shared memory file and map the memory into this process. */
278 fd = open(memfile, O_RDWR);
281 RTE_LOG(ERR, VHOST_CONFIG,
282 "(%"PRIu64") Failed to open %s for pid %d\n",
283 dev->device_fh, memfile, pid);
287 map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
288 MAP_POPULATE|MAP_SHARED, fd, 0);
291 if (map == MAP_FAILED) {
292 RTE_LOG(ERR, VHOST_CONFIG,
293 "(%"PRIu64") Error mapping the file %s for pid %d\n",
294 dev->device_fh, memfile, pid);
298 /* Store the memory address and size in the device data structure */
299 mem->mapped_address = (uint64_t)(uintptr_t)map;
300 mem->mapped_size = procmap.len;
302 LOG_DEBUG(VHOST_CONFIG,
303 "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
305 memfile, resolved_path,
306 (unsigned long long)mem->mapped_size, map);
312 * Retrieves an entry from the devices configuration linked list.
314 static struct virtio_net_config_ll *
315 get_config_ll_entry(struct vhost_device_ctx ctx)
317 struct virtio_net_config_ll *ll_dev = ll_root;
319 /* Loop through linked list until the device_fh is found. */
320 while (ll_dev != NULL) {
321 if (ll_dev->dev.device_fh == ctx.fh)
323 ll_dev = ll_dev->next;
330 * Searches the configuration core linked list and
331 * retrieves the device if it exists.
333 static struct virtio_net *
334 get_device(struct vhost_device_ctx ctx)
336 struct virtio_net_config_ll *ll_dev;
338 ll_dev = get_config_ll_entry(ctx);
343 RTE_LOG(ERR, VHOST_CONFIG,
344 "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
349 * Add entry containing a device to the device configuration linked list.
352 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
354 struct virtio_net_config_ll *ll_dev = ll_root;
356 /* If ll_dev == NULL then this is the first device so go to else */
358 /* If the 1st device_fh != 0 then we insert our device here. */
359 if (ll_dev->dev.device_fh != 0) {
360 new_ll_dev->dev.device_fh = 0;
361 new_ll_dev->next = ll_dev;
362 ll_root = new_ll_dev;
365 * Increment through the ll until we find un unused
366 * device_fh. Insert the device at that entry.
368 while ((ll_dev->next != NULL) &&
369 (ll_dev->dev.device_fh ==
370 (ll_dev->next->dev.device_fh - 1)))
371 ll_dev = ll_dev->next;
373 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
374 new_ll_dev->next = ll_dev->next;
375 ll_dev->next = new_ll_dev;
378 ll_root = new_ll_dev;
379 ll_root->dev.device_fh = 0;
385 * Unmap any memory, close any file descriptors and
386 * free any memory owned by a device.
389 cleanup_device(struct virtio_net *dev)
391 /* Unmap QEMU memory file if mapped. */
393 munmap((void *)(uintptr_t)dev->mem->mapped_address,
394 (size_t)dev->mem->mapped_size);
398 /* Close any event notifiers opened by device. */
399 if (dev->virtqueue[VIRTIO_RXQ]->callfd)
400 close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
401 if (dev->virtqueue[VIRTIO_RXQ]->kickfd)
402 close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
403 if (dev->virtqueue[VIRTIO_TXQ]->callfd)
404 close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
405 if (dev->virtqueue[VIRTIO_TXQ]->kickfd)
406 close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
410 * Release virtqueues and device memory.
413 free_device(struct virtio_net_config_ll *ll_dev)
415 /* Free any malloc'd memory */
416 free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
417 free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
422 * Remove an entry from the device configuration linked list.
424 static struct virtio_net_config_ll *
425 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
426 struct virtio_net_config_ll *ll_dev_last)
428 /* First remove the device and then clean it up. */
429 if (ll_dev == ll_root) {
430 ll_root = ll_dev->next;
431 cleanup_device(&ll_dev->dev);
435 if (likely(ll_dev_last != NULL)) {
436 ll_dev_last->next = ll_dev->next;
437 cleanup_device(&ll_dev->dev);
439 return ll_dev_last->next;
441 cleanup_device(&ll_dev->dev);
443 RTE_LOG(ERR, VHOST_CONFIG,
444 "Remove entry from config_ll failed\n");
451 * Initialise all variables in device structure.
454 init_device(struct virtio_net *dev)
459 * Virtqueues have already been malloced so
460 * we don't want to set them to NULL.
462 vq_offset = offsetof(struct virtio_net, mem);
464 /* Set everything to 0. */
465 memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
466 (sizeof(struct virtio_net) - (size_t)vq_offset));
467 memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
468 memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
470 /* Backends are set to -1 indicating an inactive device. */
471 dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
472 dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
476 * Function is called from the CUSE open function. The device structure is
477 * initialised and a new entry is added to the device configuration linked
481 new_device(struct vhost_device_ctx ctx)
483 struct virtio_net_config_ll *new_ll_dev;
484 struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
486 /* Setup device and virtqueues. */
487 new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
488 if (new_ll_dev == NULL) {
489 RTE_LOG(ERR, VHOST_CONFIG,
490 "(%"PRIu64") Failed to allocate memory for dev.\n",
495 virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
496 if (virtqueue_rx == NULL) {
498 RTE_LOG(ERR, VHOST_CONFIG,
499 "(%"PRIu64") Failed to allocate memory for rxq.\n",
504 virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
505 if (virtqueue_tx == NULL) {
508 RTE_LOG(ERR, VHOST_CONFIG,
509 "(%"PRIu64") Failed to allocate memory for txq.\n",
514 new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
515 new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
517 /* Initialise device and virtqueues. */
518 init_device(&new_ll_dev->dev);
520 new_ll_dev->next = NULL;
522 /* Add entry to device configuration linked list. */
523 add_config_ll_entry(new_ll_dev);
525 return new_ll_dev->dev.device_fh;
529 * Function is called from the CUSE release function. This function will
530 * cleanup the device and remove it from device configuration linked list.
533 destroy_device(struct vhost_device_ctx ctx)
535 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
536 struct virtio_net_config_ll *ll_dev_cur = ll_root;
538 /* Find the linked list entry for the device to be removed. */
539 ll_dev_cur_ctx = get_config_ll_entry(ctx);
540 while (ll_dev_cur != NULL) {
542 * If the device is found or
543 * a device that doesn't exist is found then it is removed.
545 if (ll_dev_cur == ll_dev_cur_ctx) {
547 * If the device is running on a data core then call
548 * the function to remove it from the data core.
550 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
551 notify_ops->destroy_device(&(ll_dev_cur->dev));
552 ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
555 ll_dev_last = ll_dev_cur;
556 ll_dev_cur = ll_dev_cur->next;
562 * Called from CUSE IOCTL: VHOST_SET_OWNER
563 * This function just returns success at the moment unless
564 * the device hasn't been initialised.
567 set_owner(struct vhost_device_ctx ctx)
569 struct virtio_net *dev;
571 dev = get_device(ctx);
579 * Called from CUSE IOCTL: VHOST_RESET_OWNER
582 reset_owner(struct vhost_device_ctx ctx)
584 struct virtio_net_config_ll *ll_dev;
586 ll_dev = get_config_ll_entry(ctx);
588 cleanup_device(&ll_dev->dev);
589 init_device(&ll_dev->dev);
595 * Called from CUSE IOCTL: VHOST_GET_FEATURES
596 * The features that we support are requested.
599 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
601 struct virtio_net *dev;
603 dev = get_device(ctx);
607 /* Send our supported features. */
608 *pu = VHOST_FEATURES;
613 * Called from CUSE IOCTL: VHOST_SET_FEATURES
614 * We receive the negotiated features supported by us and the virtio device.
617 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
619 struct virtio_net *dev;
621 dev = get_device(ctx);
624 if (*pu & ~VHOST_FEATURES)
627 /* Store the negotiated feature list for the device. */
630 /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
631 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
632 LOG_DEBUG(VHOST_CONFIG,
633 "(%"PRIu64") Mergeable RX buffers enabled\n",
635 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
636 sizeof(struct virtio_net_hdr_mrg_rxbuf);
637 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
638 sizeof(struct virtio_net_hdr_mrg_rxbuf);
640 LOG_DEBUG(VHOST_CONFIG,
641 "(%"PRIu64") Mergeable RX buffers disabled\n",
643 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
644 sizeof(struct virtio_net_hdr);
645 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
646 sizeof(struct virtio_net_hdr);
653 * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
654 * This function creates and populates the memory structure for the device.
655 * This includes storing offsets used to translate buffer addresses.
658 set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
661 struct virtio_net *dev;
662 struct vhost_memory_region *mem_regions;
663 struct virtio_memory *mem;
664 uint64_t size = offsetof(struct vhost_memory, regions);
665 uint32_t regionidx, valid_regions;
667 dev = get_device(ctx);
672 munmap((void *)(uintptr_t)dev->mem->mapped_address,
673 (size_t)dev->mem->mapped_size);
677 /* Malloc the memory structure depending on the number of regions. */
678 mem = calloc(1, sizeof(struct virtio_memory) +
679 (sizeof(struct virtio_memory_regions) * nregions));
681 RTE_LOG(ERR, VHOST_CONFIG,
682 "(%"PRIu64") Failed to allocate memory for dev->mem.\n",
687 mem->nregions = nregions;
689 mem_regions = (void *)(uintptr_t)
690 ((uint64_t)(uintptr_t)mem_regions_addr + size);
692 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
693 /* Populate the region structure for each region. */
694 mem->regions[regionidx].guest_phys_address =
695 mem_regions[regionidx].guest_phys_addr;
696 mem->regions[regionidx].guest_phys_address_end =
697 mem->regions[regionidx].guest_phys_address +
698 mem_regions[regionidx].memory_size;
699 mem->regions[regionidx].memory_size =
700 mem_regions[regionidx].memory_size;
701 mem->regions[regionidx].userspace_address =
702 mem_regions[regionidx].userspace_addr;
704 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
706 (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
707 (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
708 mem->regions[regionidx].memory_size);
710 /*set the base address mapping*/
711 if (mem->regions[regionidx].guest_phys_address == 0x0) {
713 mem->regions[regionidx].userspace_address;
714 /* Map VM memory file */
715 if (host_memory_map(dev, mem, ctx.pid,
716 mem->base_address) != 0) {
723 /* Check that we have a valid base address. */
724 if (mem->base_address == 0) {
725 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
731 * Check if all of our regions have valid mappings.
732 * Usually one does not exist in the QEMU memory file.
734 valid_regions = mem->nregions;
735 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
736 if ((mem->regions[regionidx].userspace_address <
737 mem->base_address) ||
738 (mem->regions[regionidx].userspace_address >
739 (mem->base_address + mem->mapped_size)))
744 * If a region does not have a valid mapping,
745 * we rebuild our memory struct to contain only valid entries.
747 if (valid_regions != mem->nregions) {
748 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
752 * Re-populate the memory structure with only valid regions.
753 * Invalid regions are over-written with memmove.
757 for (regionidx = mem->nregions; 0 != regionidx--;) {
758 if ((mem->regions[regionidx].userspace_address <
759 mem->base_address) ||
760 (mem->regions[regionidx].userspace_address >
761 (mem->base_address + mem->mapped_size))) {
762 memmove(&mem->regions[regionidx],
763 &mem->regions[regionidx + 1],
764 sizeof(struct virtio_memory_regions) *
771 mem->nregions = valid_regions;
775 * Calculate the address offset for each region.
776 * This offset is used to identify the vhost virtual address
777 * corresponding to a QEMU guest physical address.
779 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
780 dev->mem->regions[regionidx].address_offset =
781 dev->mem->regions[regionidx].userspace_address -
782 dev->mem->base_address +
783 dev->mem->mapped_address -
784 dev->mem->regions[regionidx].guest_phys_address;
791 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
792 * The virtio device sends us the size of the descriptor ring.
795 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
797 struct virtio_net *dev;
799 dev = get_device(ctx);
803 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
804 dev->virtqueue[state->index]->size = state->num;
810 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
811 * The virtio device sends us the desc, used and avail ring addresses.
812 * This function then converts these to our address space.
815 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
817 struct virtio_net *dev;
818 struct vhost_virtqueue *vq;
820 dev = get_device(ctx);
824 /* addr->index refers to the queue index. The txq 1, rxq is 0. */
825 vq = dev->virtqueue[addr->index];
827 /* The addresses are converted from QEMU virtual to Vhost virtual. */
828 vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
829 addr->desc_user_addr);
831 RTE_LOG(ERR, VHOST_CONFIG,
832 "(%"PRIu64") Failed to find desc ring address.\n",
837 vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
838 addr->avail_user_addr);
839 if (vq->avail == 0) {
840 RTE_LOG(ERR, VHOST_CONFIG,
841 "(%"PRIu64") Failed to find avail ring address.\n",
846 vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
847 addr->used_user_addr);
849 RTE_LOG(ERR, VHOST_CONFIG,
850 "(%"PRIu64") Failed to find used ring address.\n",
855 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
856 dev->device_fh, vq->desc);
857 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
858 dev->device_fh, vq->avail);
859 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
860 dev->device_fh, vq->used);
866 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
867 * The virtio device sends us the available ring last used index.
870 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
872 struct virtio_net *dev;
874 dev = get_device(ctx);
878 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
879 dev->virtqueue[state->index]->last_used_idx = state->num;
880 dev->virtqueue[state->index]->last_used_idx_res = state->num;
886 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
887 * We send the virtio device our available ring last used index.
890 get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
891 struct vhost_vring_state *state)
893 struct virtio_net *dev;
895 dev = get_device(ctx);
899 state->index = index;
900 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
901 state->num = dev->virtqueue[state->index]->last_used_idx;
907 * This function uses the eventfd_link kernel module to copy an eventfd file
908 * descriptor provided by QEMU in to our process space.
911 eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
913 int eventfd_link, ret;
915 /* Open the character device to the kernel module. */
916 eventfd_link = open(eventfd_cdev, O_RDWR);
917 if (eventfd_link < 0) {
918 RTE_LOG(ERR, VHOST_CONFIG,
919 "(%"PRIu64") eventfd_link module is not loaded\n",
924 /* Call the IOCTL to copy the eventfd. */
925 ret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);
929 RTE_LOG(ERR, VHOST_CONFIG,
930 "(%"PRIu64") EVENTFD_COPY ioctl failed\n",
939 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
940 * The virtio device sends an eventfd to interrupt the guest. This fd gets
941 * copied into our process space.
944 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
946 struct virtio_net *dev;
947 struct eventfd_copy eventfd_kick;
948 struct vhost_virtqueue *vq;
950 dev = get_device(ctx);
954 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
955 vq = dev->virtqueue[file->index];
958 close((int)vq->kickfd);
960 /* Populate the eventfd_copy structure and call eventfd_copy. */
961 vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
962 eventfd_kick.source_fd = vq->kickfd;
963 eventfd_kick.target_fd = file->fd;
964 eventfd_kick.target_pid = ctx.pid;
966 if (eventfd_copy(dev, &eventfd_kick))
973 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
974 * The virtio device sends an eventfd that it can use to notify us.
975 * This fd gets copied into our process space.
978 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
980 struct virtio_net *dev;
981 struct eventfd_copy eventfd_call;
982 struct vhost_virtqueue *vq;
984 dev = get_device(ctx);
988 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
989 vq = dev->virtqueue[file->index];
992 close((int)vq->callfd);
994 /* Populate the eventfd_copy structure and call eventfd_copy. */
995 vq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
996 eventfd_call.source_fd = vq->callfd;
997 eventfd_call.target_fd = file->fd;
998 eventfd_call.target_pid = ctx.pid;
1000 if (eventfd_copy(dev, &eventfd_call))
1007 * Function to get the tap device name from the provided file descriptor and
1008 * save it in the device structure.
1011 get_ifname(struct virtio_net *dev, int tap_fd, int pid)
1013 struct eventfd_copy fd_tap;
1015 uint32_t size, ifr_size;
1018 fd_tap.source_fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
1019 fd_tap.target_fd = tap_fd;
1020 fd_tap.target_pid = pid;
1022 if (eventfd_copy(dev, &fd_tap))
1025 ret = ioctl(fd_tap.source_fd, TUNGETIFF, &ifr);
1027 if (close(fd_tap.source_fd) < 0)
1028 RTE_LOG(ERR, VHOST_CONFIG,
1029 "(%"PRIu64") fd close failed\n",
1033 ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
1034 size = ifr_size > sizeof(dev->ifname) ?
1035 sizeof(dev->ifname) : ifr_size;
1037 strncpy(dev->ifname, ifr.ifr_name, size);
1039 RTE_LOG(ERR, VHOST_CONFIG,
1040 "(%"PRIu64") TUNGETIFF ioctl failed\n",
1047 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
1048 * To complete device initialisation when the virtio driver is loaded,
1049 * we are provided with a valid fd for a tap device (not used by us).
1050 * If this happens then we can add the device to a data core.
1051 * When the virtio driver is removed we get fd=-1.
1052 * At that point we remove the device from the data core.
1053 * The device will still exist in the device configuration linked list.
1056 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
1058 struct virtio_net *dev;
1060 dev = get_device(ctx);
1064 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
1065 dev->virtqueue[file->index]->backend = file->fd;
1068 * If the device isn't already running and both backend fds are set,
1069 * we add the device.
1071 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1072 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
1073 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
1074 get_ifname(dev, file->fd, ctx.pid);
1075 return notify_ops->new_device(dev);
1077 /* Otherwise we remove it. */
1079 if (file->fd == VIRTIO_DEV_STOPPED)
1080 notify_ops->destroy_device(dev);
1085 * Function pointers are set for the device operations to allow CUSE to call
1086 * functions when an IOCTL, device_add or device_release is received.
1088 static const struct vhost_net_device_ops vhost_device_ops = {
1089 .new_device = new_device,
1090 .destroy_device = destroy_device,
1092 .get_features = get_features,
1093 .set_features = set_features,
1095 .set_mem_table = set_mem_table,
1097 .set_vring_num = set_vring_num,
1098 .set_vring_addr = set_vring_addr,
1099 .set_vring_base = set_vring_base,
1100 .get_vring_base = get_vring_base,
1102 .set_vring_kick = set_vring_kick,
1103 .set_vring_call = set_vring_call,
1105 .set_backend = set_backend,
1107 .set_owner = set_owner,
1108 .reset_owner = reset_owner,
1112 * Called by main to setup callbacks when registering CUSE device.
1114 struct vhost_net_device_ops const *
1115 get_virtio_net_callbacks(void)
1117 return &vhost_device_ops;
1120 int rte_vhost_enable_guest_notification(struct virtio_net *dev,
1121 uint16_t queue_id, int enable)
1124 RTE_LOG(ERR, VHOST_CONFIG,
1125 "guest notification isn't supported.\n");
1129 dev->virtqueue[queue_id]->used->flags =
1130 enable ? 0 : VRING_USED_F_NO_NOTIFY;
1134 uint64_t rte_vhost_feature_get(void)
1136 return VHOST_FEATURES;
1139 int rte_vhost_feature_disable(uint64_t feature_mask)
1141 VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
1145 int rte_vhost_feature_enable(uint64_t feature_mask)
1147 if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
1148 VHOST_FEATURES = VHOST_FEATURES | feature_mask;
1155 * Register ops so that we can add/remove device to data core.
1158 rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)