4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <fuse/cuse_lowlevel.h>
36 #include <linux/vhost.h>
37 #include <linux/virtio_net.h>
41 #include <sys/eventfd.h>
42 #include <sys/ioctl.h>
46 #include <rte_ethdev.h>
48 #include <rte_string_fns.h>
49 #include <rte_memory.h>
50 #include <rte_virtio_net.h>
52 #include "vhost-net-cdev.h"
53 #include "eventfd_link/eventfd_link.h"
56 * Device linked list structure for configuration.
58 struct virtio_net_config_ll {
59 struct virtio_net dev; /* Virtio device.*/
60 struct virtio_net_config_ll *next; /* Next entry on linked list.*/
63 const char eventfd_cdev[] = "/dev/eventfd-link";
65 /* device ops to add/remove device to data core. */
66 static struct virtio_net_device_ops const *notify_ops;
67 /* Root address of the linked list in the configuration core. */
68 static struct virtio_net_config_ll *ll_root;
70 /* Features supported by this application. RX merge buffers are enabled by default. */
71 #define VHOST_SUPPORTED_FEATURES (1ULL << VIRTIO_NET_F_MRG_RXBUF)
72 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
74 /* Line size for reading maps file. */
75 const uint32_t BUFSIZE = PATH_MAX;
77 /* Size of prot char array in procmap. */
80 /* Number of elements in procmap struct. */
83 /* Structure containing information gathered from maps file. */
85 uint64_t va_start; /* Start virtual address in file. */
86 uint64_t len; /* Size of file. */
87 uint64_t pgoff; /* Not used. */
88 uint32_t maj; /* Not used. */
89 uint32_t min; /* Not used. */
90 uint32_t ino; /* Not used. */
91 char prot[PROT_SZ]; /* Not used. */
92 char fname[PATH_MAX];/* File name. */
96 * Converts QEMU virtual address to Vhost virtual address. This function is used
97 * to convert the ring addresses to our address space.
100 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
102 struct virtio_memory_regions *region;
103 uint64_t vhost_va = 0;
104 uint32_t regionidx = 0;
106 /* Find the region where the address lives. */
107 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
108 region = &dev->mem->regions[regionidx];
109 if ((qemu_va >= region->userspace_address) &&
110 (qemu_va <= region->userspace_address +
111 region->memory_size)) {
112 vhost_va = dev->mem->mapped_address + qemu_va -
113 dev->mem->base_address;
121 * Locate the file containing QEMU's memory space and map it to our address space.
124 host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
125 pid_t pid, uint64_t addr)
127 struct dirent *dptr = NULL;
128 struct procmap procmap;
132 char memfile[PATH_MAX];
133 char mapfile[PATH_MAX];
134 char procdir[PATH_MAX];
135 char resolved_path[PATH_MAX];
141 char *str, *sp, *in[PROCMAP_SZ];
144 /* Path where mem files are located. */
145 snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
146 /* Maps file used to locate mem file. */
147 snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
149 fmap = fopen(mapfile, "r");
151 RTE_LOG(ERR, VHOST_CONFIG,
152 "(%"PRIu64") Failed to open maps file for pid %d\n",
153 dev->device_fh, pid);
157 /* Read through maps file until we find out base_address. */
158 while (fgets(line, BUFSIZE, fmap) != 0) {
161 /* Split line in to fields. */
162 for (i = 0; i < PROCMAP_SZ; i++) {
163 in[i] = strtok_r(str, &dlm[i], &sp);
164 if ((in[i] == NULL) || (errno != 0)) {
171 /* Convert/Copy each field as needed. */
172 procmap.va_start = strtoull(in[0], &end, 16);
173 if ((in[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
178 procmap.len = strtoull(in[1], &end, 16);
179 if ((in[1] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
184 procmap.pgoff = strtoull(in[3], &end, 16);
185 if ((in[3] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
190 procmap.maj = strtoul(in[4], &end, 16);
191 if ((in[4] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
196 procmap.min = strtoul(in[5], &end, 16);
197 if ((in[5] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
202 procmap.ino = strtoul(in[6], &end, 16);
203 if ((in[6] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0)) {
208 memcpy(&procmap.prot, in[2], PROT_SZ);
209 memcpy(&procmap.fname, in[7], PATH_MAX);
211 if (procmap.va_start == addr) {
212 procmap.len = procmap.len - procmap.va_start;
220 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file in pid %d maps file\n", dev->device_fh, pid);
224 /* Find the guest memory file among the process fds. */
225 dp = opendir(procdir);
227 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory\n", dev->device_fh, pid);
234 /* Read the fd directory contents. */
235 while (NULL != (dptr = readdir(dp))) {
236 snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
238 realpath(memfile, resolved_path);
239 if (resolved_path == NULL) {
240 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
244 if (strncmp(resolved_path, procmap.fname,
245 strnlen(procmap.fname, PATH_MAX)) == 0) {
254 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file for pid %d\n", dev->device_fh, pid);
257 /* Open the shared memory file and map the memory into this process. */
258 fd = open(memfile, O_RDWR);
261 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open %s for pid %d\n", dev->device_fh, memfile, pid);
265 map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE ,
266 MAP_POPULATE|MAP_SHARED, fd, 0);
269 if (map == MAP_FAILED) {
270 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
274 /* Store the memory address and size in the device data structure */
275 mem->mapped_address = (uint64_t)(uintptr_t)map;
276 mem->mapped_size = procmap.len;
278 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n", dev->device_fh,
279 memfile, resolved_path, (long long unsigned)mem->mapped_size, map);
285 * Retrieves an entry from the devices configuration linked list.
287 static struct virtio_net_config_ll *
288 get_config_ll_entry(struct vhost_device_ctx ctx)
290 struct virtio_net_config_ll *ll_dev = ll_root;
292 /* Loop through linked list until the device_fh is found. */
293 while (ll_dev != NULL) {
294 if (ll_dev->dev.device_fh == ctx.fh)
296 ll_dev = ll_dev->next;
303 * Searches the configuration core linked list and retrieves the device if it exists.
305 static struct virtio_net *
306 get_device(struct vhost_device_ctx ctx)
308 struct virtio_net_config_ll *ll_dev;
310 ll_dev = get_config_ll_entry(ctx);
312 /* If a matching entry is found in the linked list, return the device in that entry. */
316 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
321 * Add entry containing a device to the device configuration linked list.
324 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
326 struct virtio_net_config_ll *ll_dev = ll_root;
328 /* If ll_dev == NULL then this is the first device so go to else */
330 /* If the 1st device_fh != 0 then we insert our device here. */
331 if (ll_dev->dev.device_fh != 0) {
332 new_ll_dev->dev.device_fh = 0;
333 new_ll_dev->next = ll_dev;
334 ll_root = new_ll_dev;
336 /* Increment through the ll until we find un unused device_fh. Insert the device at that entry*/
337 while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))
338 ll_dev = ll_dev->next;
340 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
341 new_ll_dev->next = ll_dev->next;
342 ll_dev->next = new_ll_dev;
345 ll_root = new_ll_dev;
346 ll_root->dev.device_fh = 0;
352 * Unmap any memory, close any file descriptors and free any memory owned by a device.
355 cleanup_device(struct virtio_net *dev)
357 /* Unmap QEMU memory file if mapped. */
359 munmap((void *)(uintptr_t)dev->mem->mapped_address,
360 (size_t)dev->mem->mapped_size);
364 /* Close any event notifiers opened by device. */
365 if (dev->virtqueue[VIRTIO_RXQ]->callfd)
366 close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
367 if (dev->virtqueue[VIRTIO_RXQ]->kickfd)
368 close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
369 if (dev->virtqueue[VIRTIO_TXQ]->callfd)
370 close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
371 if (dev->virtqueue[VIRTIO_TXQ]->kickfd)
372 close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
376 * Release virtqueues and device memory.
379 free_device(struct virtio_net_config_ll *ll_dev)
381 /* Free any malloc'd memory */
382 free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
383 free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
387 * Remove an entry from the device configuration linked list.
389 static struct virtio_net_config_ll *
390 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
391 struct virtio_net_config_ll *ll_dev_last)
393 /* First remove the device and then clean it up. */
394 if (ll_dev == ll_root) {
395 ll_root = ll_dev->next;
396 cleanup_device(&ll_dev->dev);
400 if (likely(ll_dev_last != NULL)) {
401 ll_dev_last->next = ll_dev->next;
402 cleanup_device(&ll_dev->dev);
404 return ll_dev_last->next;
406 cleanup_device(&ll_dev->dev);
408 RTE_LOG(ERR, VHOST_CONFIG,
409 "Remove entry from config_ll failed\n");
416 * Initialise all variables in device structure.
419 init_device(struct virtio_net *dev)
423 /* Virtqueues have already been malloced so we don't want to set them to NULL. */
424 vq_offset = offsetof(struct virtio_net, mem);
426 /* Set everything to 0. */
427 memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
428 (sizeof(struct virtio_net) - (size_t)vq_offset));
429 memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
430 memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
432 /* Backends are set to -1 indicating an inactive device. */
433 dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
434 dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
438 * Function is called from the CUSE open function. The device structure is
439 * initialised and a new entry is added to the device configuration linked
443 new_device(struct vhost_device_ctx ctx)
445 struct virtio_net_config_ll *new_ll_dev;
446 struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
448 /* Setup device and virtqueues. */
449 new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
450 if (new_ll_dev == NULL) {
451 RTE_LOG(ERR, VHOST_CONFIG,
452 "(%"PRIu64") Failed to allocate memory for dev.\n",
457 virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
458 if (virtqueue_rx == NULL) {
460 RTE_LOG(ERR, VHOST_CONFIG,
461 "(%"PRIu64") Failed to allocate memory for rxq.\n",
466 virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
467 if (virtqueue_tx == NULL) {
470 RTE_LOG(ERR, VHOST_CONFIG,
471 "(%"PRIu64") Failed to allocate memory for txq.\n",
476 new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
477 new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
479 /* Initialise device and virtqueues. */
480 init_device(&new_ll_dev->dev);
482 new_ll_dev->next = NULL;
484 /* Add entry to device configuration linked list. */
485 add_config_ll_entry(new_ll_dev);
487 return new_ll_dev->dev.device_fh;
491 * Function is called from the CUSE release function. This function will cleanup
492 * the device and remove it from device configuration linked list.
495 destroy_device(struct vhost_device_ctx ctx)
497 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
498 struct virtio_net_config_ll *ll_dev_cur = ll_root;
500 /* Find the linked list entry for the device to be removed. */
501 ll_dev_cur_ctx = get_config_ll_entry(ctx);
502 while (ll_dev_cur != NULL) {
503 /* If the device is found or a device that doesn't exist is found then it is removed. */
504 if (ll_dev_cur == ll_dev_cur_ctx) {
506 * If the device is running on a data core then call the function to remove it from
509 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
510 notify_ops->destroy_device(&(ll_dev_cur->dev));
511 ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
513 ll_dev_last = ll_dev_cur;
514 ll_dev_cur = ll_dev_cur->next;
520 * Called from CUSE IOCTL: VHOST_SET_OWNER
521 * This function just returns success at the moment unless the device hasn't been initialised.
524 set_owner(struct vhost_device_ctx ctx)
526 struct virtio_net *dev;
528 dev = get_device(ctx);
536 * Called from CUSE IOCTL: VHOST_RESET_OWNER
539 reset_owner(struct vhost_device_ctx ctx)
541 struct virtio_net_config_ll *ll_dev;
543 ll_dev = get_config_ll_entry(ctx);
545 cleanup_device(&ll_dev->dev);
546 init_device(&ll_dev->dev);
552 * Called from CUSE IOCTL: VHOST_GET_FEATURES
553 * The features that we support are requested.
556 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
558 struct virtio_net *dev;
560 dev = get_device(ctx);
564 /* Send our supported features. */
565 *pu = VHOST_FEATURES;
570 * Called from CUSE IOCTL: VHOST_SET_FEATURES
571 * We receive the negotiated set of features supported by us and the virtio device.
574 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
576 struct virtio_net *dev;
578 dev = get_device(ctx);
581 if (*pu & ~VHOST_FEATURES)
584 /* Store the negotiated feature list for the device. */
587 /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
588 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
589 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
590 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
591 sizeof(struct virtio_net_hdr_mrg_rxbuf);
592 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
593 sizeof(struct virtio_net_hdr_mrg_rxbuf);
595 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
596 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
597 sizeof(struct virtio_net_hdr);
598 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
599 sizeof(struct virtio_net_hdr);
606 * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
607 * This function creates and populates the memory structure for the device. This includes
608 * storing offsets used to translate buffer addresses.
611 set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
614 struct virtio_net *dev;
615 struct vhost_memory_region *mem_regions;
616 struct virtio_memory *mem;
617 uint64_t size = offsetof(struct vhost_memory, regions);
618 uint32_t regionidx, valid_regions;
620 dev = get_device(ctx);
625 munmap((void *)(uintptr_t)dev->mem->mapped_address,
626 (size_t)dev->mem->mapped_size);
630 /* Malloc the memory structure depending on the number of regions. */
631 mem = calloc(1, sizeof(struct virtio_memory) +
632 (sizeof(struct virtio_memory_regions) * nregions));
634 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
638 mem->nregions = nregions;
640 mem_regions = (void *)(uintptr_t)
641 ((uint64_t)(uintptr_t)mem_regions_addr + size);
643 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
644 /* Populate the region structure for each region. */
645 mem->regions[regionidx].guest_phys_address =
646 mem_regions[regionidx].guest_phys_addr;
647 mem->regions[regionidx].guest_phys_address_end =
648 mem->regions[regionidx].guest_phys_address +
649 mem_regions[regionidx].memory_size;
650 mem->regions[regionidx].memory_size =
651 mem_regions[regionidx].memory_size;
652 mem->regions[regionidx].userspace_address =
653 mem_regions[regionidx].userspace_addr;
655 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
656 regionidx, (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
657 (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
658 mem->regions[regionidx].memory_size);
660 /*set the base address mapping*/
661 if (mem->regions[regionidx].guest_phys_address == 0x0) {
662 mem->base_address = mem->regions[regionidx].userspace_address;
663 /* Map VM memory file */
664 if (host_memory_map(dev, mem, ctx.pid, mem->base_address) != 0) {
671 /* Check that we have a valid base address. */
672 if (mem->base_address == 0) {
673 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
678 /* Check if all of our regions have valid mappings. Usually one does not exist in the QEMU memory file. */
679 valid_regions = mem->nregions;
680 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
681 if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
682 (mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size)))
686 /* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */
687 if (valid_regions != mem->nregions) {
688 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
691 /* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */
694 for (regionidx = mem->nregions; 0 != regionidx--;) {
695 if ((mem->regions[regionidx].userspace_address < mem->base_address) ||
696 (mem->regions[regionidx].userspace_address > (mem->base_address + mem->mapped_size))) {
697 memmove(&mem->regions[regionidx], &mem->regions[regionidx + 1],
698 sizeof(struct virtio_memory_regions) * valid_regions);
704 mem->nregions = valid_regions;
708 * Calculate the address offset for each region. This offset is used to identify the vhost virtual address
709 * corresponding to a QEMU guest physical address.
711 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
712 dev->mem->regions[regionidx].address_offset = dev->mem->regions[regionidx].userspace_address - dev->mem->base_address
713 + dev->mem->mapped_address - dev->mem->regions[regionidx].guest_phys_address;
720 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
721 * The virtio device sends us the size of the descriptor ring.
724 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
726 struct virtio_net *dev;
728 dev = get_device(ctx);
732 /* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
733 dev->virtqueue[state->index]->size = state->num;
739 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
740 * The virtio device sends us the desc, used and avail ring addresses. This function
741 * then converts these to our address space.
744 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
746 struct virtio_net *dev;
747 struct vhost_virtqueue *vq;
749 dev = get_device(ctx);
753 /* addr->index refers to the queue index. The TX queue is 1, RX queue is 0. */
754 vq = dev->virtqueue[addr->index];
756 /* The addresses are converted from QEMU virtual to Vhost virtual. */
757 vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
759 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
763 vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
764 if (vq->avail == 0) {
765 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
769 vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
771 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
775 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n", dev->device_fh, vq->desc);
776 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n", dev->device_fh, vq->avail);
777 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n", dev->device_fh, vq->used);
783 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
784 * The virtio device sends us the available ring last used index.
787 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
789 struct virtio_net *dev;
791 dev = get_device(ctx);
795 /* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
796 dev->virtqueue[state->index]->last_used_idx = state->num;
797 dev->virtqueue[state->index]->last_used_idx_res = state->num;
803 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
804 * We send the virtio device our available ring last used index.
807 get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
808 struct vhost_vring_state *state)
810 struct virtio_net *dev;
812 dev = get_device(ctx);
816 state->index = index;
817 /* State->index refers to the queue index. The TX queue is 1, RX queue is 0. */
818 state->num = dev->virtqueue[state->index]->last_used_idx;
824 * This function uses the eventfd_link kernel module to copy an eventfd file descriptor
825 * provided by QEMU in to our process space.
828 eventfd_copy(struct virtio_net *dev, struct eventfd_copy *eventfd_copy)
830 int eventfd_link, ret;
832 /* Open the character device to the kernel module. */
833 eventfd_link = open(eventfd_cdev, O_RDWR);
834 if (eventfd_link < 0) {
835 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") eventfd_link module is not loaded\n", dev->device_fh);
839 /* Call the IOCTL to copy the eventfd. */
840 ret = ioctl(eventfd_link, EVENTFD_COPY, eventfd_copy);
844 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") EVENTFD_COPY ioctl failed\n", dev->device_fh);
853 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
854 * The virtio device sends an eventfd to interrupt the guest. This fd gets copied in
855 * to our process space.
858 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
860 struct virtio_net *dev;
861 struct eventfd_copy eventfd_kick;
862 struct vhost_virtqueue *vq;
864 dev = get_device(ctx);
868 /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
869 vq = dev->virtqueue[file->index];
872 close((int)vq->kickfd);
874 /* Populate the eventfd_copy structure and call eventfd_copy. */
875 vq->kickfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
876 eventfd_kick.source_fd = vq->kickfd;
877 eventfd_kick.target_fd = file->fd;
878 eventfd_kick.target_pid = ctx.pid;
880 if (eventfd_copy(dev, &eventfd_kick))
887 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
888 * The virtio device sends an eventfd that it can use to notify us. This fd gets copied in
889 * to our process space.
892 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
894 struct virtio_net *dev;
895 struct eventfd_copy eventfd_call;
896 struct vhost_virtqueue *vq;
898 dev = get_device(ctx);
902 /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
903 vq = dev->virtqueue[file->index];
906 close((int)vq->callfd);
908 /* Populate the eventfd_copy structure and call eventfd_copy. */
909 vq->callfd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
910 eventfd_call.source_fd = vq->callfd;
911 eventfd_call.target_fd = file->fd;
912 eventfd_call.target_pid = ctx.pid;
914 if (eventfd_copy(dev, &eventfd_call))
921 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
922 * To complete device initialisation when the virtio driver is loaded we are provided with a
923 * valid fd for a tap device (not used by us). If this happens then we can add the device to a
924 * data core. When the virtio driver is removed we get fd=-1. At that point we remove the device
925 * from the data core. The device will still exist in the device configuration linked list.
928 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
930 struct virtio_net *dev;
932 dev = get_device(ctx);
936 /* file->index refers to the queue index. The TX queue is 1, RX queue is 0. */
937 dev->virtqueue[file->index]->backend = file->fd;
939 /* If the device isn't already running and both backend fds are set we add the device. */
940 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
941 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
942 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED))
943 return notify_ops->new_device(dev);
944 /* Otherwise we remove it. */
946 if (file->fd == VIRTIO_DEV_STOPPED)
947 notify_ops->destroy_device(dev);
952 * Function pointers are set for the device operations to allow CUSE to call functions
953 * when an IOCTL, device_add or device_release is received.
955 static const struct vhost_net_device_ops vhost_device_ops = {
956 .new_device = new_device,
957 .destroy_device = destroy_device,
959 .get_features = get_features,
960 .set_features = set_features,
962 .set_mem_table = set_mem_table,
964 .set_vring_num = set_vring_num,
965 .set_vring_addr = set_vring_addr,
966 .set_vring_base = set_vring_base,
967 .get_vring_base = get_vring_base,
969 .set_vring_kick = set_vring_kick,
970 .set_vring_call = set_vring_call,
972 .set_backend = set_backend,
974 .set_owner = set_owner,
975 .reset_owner = reset_owner,
979 * Called by main to setup callbacks when registering CUSE device.
981 struct vhost_net_device_ops const *
982 get_virtio_net_callbacks(void)
984 return &vhost_device_ops;
987 int rte_vhost_enable_guest_notification(struct virtio_net *dev,
988 uint16_t queue_id, int enable)
991 RTE_LOG(ERR, VHOST_CONFIG, "guest notification isn't supported.\n");
995 dev->virtqueue[queue_id]->used->flags = enable ? 0 : VRING_USED_F_NO_NOTIFY;
999 uint64_t rte_vhost_feature_get(void)
1001 return VHOST_FEATURES;
1004 int rte_vhost_feature_disable(uint64_t feature_mask)
1006 VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
1010 int rte_vhost_feature_enable(uint64_t feature_mask)
1012 if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
1013 VHOST_FEATURES = VHOST_FEATURES | feature_mask;
1020 * Register ops so that we can add/remove device to data core.
1023 rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)