4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <fuse/cuse_lowlevel.h>
36 #include <linux/vhost.h>
37 #include <linux/virtio_net.h>
43 #include <sys/ioctl.h>
46 #include <sys/socket.h>
47 #include <linux/if_tun.h>
50 #include <rte_ethdev.h>
52 #include <rte_string_fns.h>
53 #include <rte_memory.h>
54 #include <rte_virtio_net.h>
56 #include "vhost_cuse/eventfd_copy.h"
57 #include "vhost-net.h"
60 * Device linked list structure for configuration.
62 struct virtio_net_config_ll {
63 struct virtio_net dev; /* Virtio device.*/
64 struct virtio_net_config_ll *next; /* Next dev on linked list.*/
67 /* device ops to add/remove device to/from data core. */
68 static struct virtio_net_device_ops const *notify_ops;
69 /* root address of the linked list of managed virtio devices */
70 static struct virtio_net_config_ll *ll_root;
72 /* Features supported by this lib. */
73 #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
74 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
75 (1ULL << VIRTIO_NET_F_CTRL_RX))
76 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
78 /* Line size for reading maps file. */
79 static const uint32_t BUFSIZE = PATH_MAX;
81 /* Size of prot char array in procmap. */
84 /* Number of elements in procmap struct. */
87 /* Structure containing information gathered from maps file. */
89 uint64_t va_start; /* Start virtual address in file. */
90 uint64_t len; /* Size of file. */
91 uint64_t pgoff; /* Not used. */
92 uint32_t maj; /* Not used. */
93 uint32_t min; /* Not used. */
94 uint32_t ino; /* Not used. */
95 char prot[PROT_SZ]; /* Not used. */
96 char fname[PATH_MAX]; /* File name. */
100 * Converts QEMU virtual address to Vhost virtual address. This function is
101 * used to convert the ring addresses to our address space.
104 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
106 struct virtio_memory_regions *region;
107 uint64_t vhost_va = 0;
108 uint32_t regionidx = 0;
110 /* Find the region where the address lives. */
111 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
112 region = &dev->mem->regions[regionidx];
113 if ((qemu_va >= region->userspace_address) &&
114 (qemu_va <= region->userspace_address +
115 region->memory_size)) {
116 vhost_va = dev->mem->mapped_address + qemu_va -
117 dev->mem->base_address;
125 * Locate the file containing QEMU's memory space and
126 * map it to our address space.
129 host_memory_map(struct virtio_net *dev, struct virtio_memory *mem,
130 pid_t pid, uint64_t addr)
132 struct dirent *dptr = NULL;
133 struct procmap procmap;
137 char memfile[PATH_MAX];
138 char mapfile[PATH_MAX];
139 char procdir[PATH_MAX];
140 char resolved_path[PATH_MAX];
147 char *str, *sp, *in[PROCMAP_SZ];
150 /* Path where mem files are located. */
151 snprintf(procdir, PATH_MAX, "/proc/%u/fd/", pid);
152 /* Maps file used to locate mem file. */
153 snprintf(mapfile, PATH_MAX, "/proc/%u/maps", pid);
155 fmap = fopen(mapfile, "r");
157 RTE_LOG(ERR, VHOST_CONFIG,
158 "(%"PRIu64") Failed to open maps file for pid %d\n",
159 dev->device_fh, pid);
163 /* Read through maps file until we find out base_address. */
164 while (fgets(line, BUFSIZE, fmap) != 0) {
167 /* Split line into fields. */
168 for (i = 0; i < PROCMAP_SZ; i++) {
169 in[i] = strtok_r(str, &dlm[i], &sp);
170 if ((in[i] == NULL) || (errno != 0)) {
177 /* Convert/Copy each field as needed. */
178 procmap.va_start = strtoull(in[0], &end, 16);
179 if ((in[0] == '\0') || (end == NULL) || (*end != '\0') ||
185 procmap.len = strtoull(in[1], &end, 16);
186 if ((in[1] == '\0') || (end == NULL) || (*end != '\0') ||
192 procmap.pgoff = strtoull(in[3], &end, 16);
193 if ((in[3] == '\0') || (end == NULL) || (*end != '\0') ||
199 procmap.maj = strtoul(in[4], &end, 16);
200 if ((in[4] == '\0') || (end == NULL) || (*end != '\0') ||
206 procmap.min = strtoul(in[5], &end, 16);
207 if ((in[5] == '\0') || (end == NULL) || (*end != '\0') ||
213 procmap.ino = strtoul(in[6], &end, 16);
214 if ((in[6] == '\0') || (end == NULL) || (*end != '\0') ||
220 memcpy(&procmap.prot, in[2], PROT_SZ);
221 memcpy(&procmap.fname, in[7], PATH_MAX);
223 if (procmap.va_start == addr) {
224 procmap.len = procmap.len - procmap.va_start;
232 RTE_LOG(ERR, VHOST_CONFIG,
233 "(%"PRIu64") Failed to find memory file in pid %d maps file\n",
234 dev->device_fh, pid);
238 /* Find the guest memory file among the process fds. */
239 dp = opendir(procdir);
241 RTE_LOG(ERR, VHOST_CONFIG,
242 "(%"PRIu64") Cannot open pid %d process directory\n",
243 dev->device_fh, pid);
249 /* Read the fd directory contents. */
250 while (NULL != (dptr = readdir(dp))) {
251 snprintf(memfile, PATH_MAX, "/proc/%u/fd/%s",
253 path = realpath(memfile, resolved_path);
254 if ((path == NULL) && (strlen(resolved_path) == 0)) {
255 RTE_LOG(ERR, VHOST_CONFIG,
256 "(%"PRIu64") Failed to resolve fd directory\n",
261 if (strncmp(resolved_path, procmap.fname,
262 strnlen(procmap.fname, PATH_MAX)) == 0) {
271 RTE_LOG(ERR, VHOST_CONFIG,
272 "(%"PRIu64") Failed to find memory file for pid %d\n",
273 dev->device_fh, pid);
276 /* Open the shared memory file and map the memory into this process. */
277 fd = open(memfile, O_RDWR);
280 RTE_LOG(ERR, VHOST_CONFIG,
281 "(%"PRIu64") Failed to open %s for pid %d\n",
282 dev->device_fh, memfile, pid);
286 map = mmap(0, (size_t)procmap.len, PROT_READ|PROT_WRITE,
287 MAP_POPULATE|MAP_SHARED, fd, 0);
290 if (map == MAP_FAILED) {
291 RTE_LOG(ERR, VHOST_CONFIG,
292 "(%"PRIu64") Error mapping the file %s for pid %d\n",
293 dev->device_fh, memfile, pid);
297 /* Store the memory address and size in the device data structure */
298 mem->mapped_address = (uint64_t)(uintptr_t)map;
299 mem->mapped_size = procmap.len;
301 LOG_DEBUG(VHOST_CONFIG,
302 "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n",
304 memfile, resolved_path,
305 (unsigned long long)mem->mapped_size, map);
311 * Retrieves an entry from the devices configuration linked list.
313 static struct virtio_net_config_ll *
314 get_config_ll_entry(struct vhost_device_ctx ctx)
316 struct virtio_net_config_ll *ll_dev = ll_root;
318 /* Loop through linked list until the device_fh is found. */
319 while (ll_dev != NULL) {
320 if (ll_dev->dev.device_fh == ctx.fh)
322 ll_dev = ll_dev->next;
329 * Searches the configuration core linked list and
330 * retrieves the device if it exists.
332 static struct virtio_net *
333 get_device(struct vhost_device_ctx ctx)
335 struct virtio_net_config_ll *ll_dev;
337 ll_dev = get_config_ll_entry(ctx);
342 RTE_LOG(ERR, VHOST_CONFIG,
343 "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
348 * Add entry containing a device to the device configuration linked list.
351 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
353 struct virtio_net_config_ll *ll_dev = ll_root;
355 /* If ll_dev == NULL then this is the first device so go to else */
357 /* If the 1st device_fh != 0 then we insert our device here. */
358 if (ll_dev->dev.device_fh != 0) {
359 new_ll_dev->dev.device_fh = 0;
360 new_ll_dev->next = ll_dev;
361 ll_root = new_ll_dev;
364 * Increment through the ll until we find un unused
365 * device_fh. Insert the device at that entry.
367 while ((ll_dev->next != NULL) &&
368 (ll_dev->dev.device_fh ==
369 (ll_dev->next->dev.device_fh - 1)))
370 ll_dev = ll_dev->next;
372 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
373 new_ll_dev->next = ll_dev->next;
374 ll_dev->next = new_ll_dev;
377 ll_root = new_ll_dev;
378 ll_root->dev.device_fh = 0;
384 * Unmap any memory, close any file descriptors and
385 * free any memory owned by a device.
388 cleanup_device(struct virtio_net *dev)
390 /* Unmap QEMU memory file if mapped. */
392 munmap((void *)(uintptr_t)dev->mem->mapped_address,
393 (size_t)dev->mem->mapped_size);
397 /* Close any event notifiers opened by device. */
398 if (dev->virtqueue[VIRTIO_RXQ]->callfd)
399 close((int)dev->virtqueue[VIRTIO_RXQ]->callfd);
400 if (dev->virtqueue[VIRTIO_RXQ]->kickfd)
401 close((int)dev->virtqueue[VIRTIO_RXQ]->kickfd);
402 if (dev->virtqueue[VIRTIO_TXQ]->callfd)
403 close((int)dev->virtqueue[VIRTIO_TXQ]->callfd);
404 if (dev->virtqueue[VIRTIO_TXQ]->kickfd)
405 close((int)dev->virtqueue[VIRTIO_TXQ]->kickfd);
409 * Release virtqueues and device memory.
412 free_device(struct virtio_net_config_ll *ll_dev)
414 /* Free any malloc'd memory */
415 free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
416 free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
421 * Remove an entry from the device configuration linked list.
423 static struct virtio_net_config_ll *
424 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
425 struct virtio_net_config_ll *ll_dev_last)
427 /* First remove the device and then clean it up. */
428 if (ll_dev == ll_root) {
429 ll_root = ll_dev->next;
430 cleanup_device(&ll_dev->dev);
434 if (likely(ll_dev_last != NULL)) {
435 ll_dev_last->next = ll_dev->next;
436 cleanup_device(&ll_dev->dev);
438 return ll_dev_last->next;
440 cleanup_device(&ll_dev->dev);
442 RTE_LOG(ERR, VHOST_CONFIG,
443 "Remove entry from config_ll failed\n");
450 * Initialise all variables in device structure.
453 init_device(struct virtio_net *dev)
458 * Virtqueues have already been malloced so
459 * we don't want to set them to NULL.
461 vq_offset = offsetof(struct virtio_net, mem);
463 /* Set everything to 0. */
464 memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
465 (sizeof(struct virtio_net) - (size_t)vq_offset));
466 memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
467 memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
469 /* Backends are set to -1 indicating an inactive device. */
470 dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
471 dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
475 * Function is called from the CUSE open function. The device structure is
476 * initialised and a new entry is added to the device configuration linked
480 new_device(struct vhost_device_ctx ctx)
482 struct virtio_net_config_ll *new_ll_dev;
483 struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
485 /* Setup device and virtqueues. */
486 new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
487 if (new_ll_dev == NULL) {
488 RTE_LOG(ERR, VHOST_CONFIG,
489 "(%"PRIu64") Failed to allocate memory for dev.\n",
494 virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
495 if (virtqueue_rx == NULL) {
497 RTE_LOG(ERR, VHOST_CONFIG,
498 "(%"PRIu64") Failed to allocate memory for rxq.\n",
503 virtqueue_tx = malloc(sizeof(struct vhost_virtqueue));
504 if (virtqueue_tx == NULL) {
507 RTE_LOG(ERR, VHOST_CONFIG,
508 "(%"PRIu64") Failed to allocate memory for txq.\n",
513 new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
514 new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
516 /* Initialise device and virtqueues. */
517 init_device(&new_ll_dev->dev);
519 new_ll_dev->next = NULL;
521 /* Add entry to device configuration linked list. */
522 add_config_ll_entry(new_ll_dev);
524 return new_ll_dev->dev.device_fh;
528 * Function is called from the CUSE release function. This function will
529 * cleanup the device and remove it from device configuration linked list.
532 destroy_device(struct vhost_device_ctx ctx)
534 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
535 struct virtio_net_config_ll *ll_dev_cur = ll_root;
537 /* Find the linked list entry for the device to be removed. */
538 ll_dev_cur_ctx = get_config_ll_entry(ctx);
539 while (ll_dev_cur != NULL) {
541 * If the device is found or
542 * a device that doesn't exist is found then it is removed.
544 if (ll_dev_cur == ll_dev_cur_ctx) {
546 * If the device is running on a data core then call
547 * the function to remove it from the data core.
549 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
550 notify_ops->destroy_device(&(ll_dev_cur->dev));
551 ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
554 ll_dev_last = ll_dev_cur;
555 ll_dev_cur = ll_dev_cur->next;
561 * Called from CUSE IOCTL: VHOST_SET_OWNER
562 * This function just returns success at the moment unless
563 * the device hasn't been initialised.
566 set_owner(struct vhost_device_ctx ctx)
568 struct virtio_net *dev;
570 dev = get_device(ctx);
578 * Called from CUSE IOCTL: VHOST_RESET_OWNER
581 reset_owner(struct vhost_device_ctx ctx)
583 struct virtio_net_config_ll *ll_dev;
585 ll_dev = get_config_ll_entry(ctx);
587 cleanup_device(&ll_dev->dev);
588 init_device(&ll_dev->dev);
594 * Called from CUSE IOCTL: VHOST_GET_FEATURES
595 * The features that we support are requested.
598 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
600 struct virtio_net *dev;
602 dev = get_device(ctx);
606 /* Send our supported features. */
607 *pu = VHOST_FEATURES;
612 * Called from CUSE IOCTL: VHOST_SET_FEATURES
613 * We receive the negotiated features supported by us and the virtio device.
616 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
618 struct virtio_net *dev;
620 dev = get_device(ctx);
623 if (*pu & ~VHOST_FEATURES)
626 /* Store the negotiated feature list for the device. */
629 /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
630 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
631 LOG_DEBUG(VHOST_CONFIG,
632 "(%"PRIu64") Mergeable RX buffers enabled\n",
634 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
635 sizeof(struct virtio_net_hdr_mrg_rxbuf);
636 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
637 sizeof(struct virtio_net_hdr_mrg_rxbuf);
639 LOG_DEBUG(VHOST_CONFIG,
640 "(%"PRIu64") Mergeable RX buffers disabled\n",
642 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
643 sizeof(struct virtio_net_hdr);
644 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
645 sizeof(struct virtio_net_hdr);
652 * Called from CUSE IOCTL: VHOST_SET_MEM_TABLE
653 * This function creates and populates the memory structure for the device.
654 * This includes storing offsets used to translate buffer addresses.
657 set_mem_table(struct vhost_device_ctx ctx, const void *mem_regions_addr,
660 struct virtio_net *dev;
661 struct vhost_memory_region *mem_regions;
662 struct virtio_memory *mem;
663 uint64_t size = offsetof(struct vhost_memory, regions);
664 uint32_t regionidx, valid_regions;
666 dev = get_device(ctx);
671 munmap((void *)(uintptr_t)dev->mem->mapped_address,
672 (size_t)dev->mem->mapped_size);
676 /* Malloc the memory structure depending on the number of regions. */
677 mem = calloc(1, sizeof(struct virtio_memory) +
678 (sizeof(struct virtio_memory_regions) * nregions));
680 RTE_LOG(ERR, VHOST_CONFIG,
681 "(%"PRIu64") Failed to allocate memory for dev->mem.\n",
686 mem->nregions = nregions;
688 mem_regions = (void *)(uintptr_t)
689 ((uint64_t)(uintptr_t)mem_regions_addr + size);
691 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
692 /* Populate the region structure for each region. */
693 mem->regions[regionidx].guest_phys_address =
694 mem_regions[regionidx].guest_phys_addr;
695 mem->regions[regionidx].guest_phys_address_end =
696 mem->regions[regionidx].guest_phys_address +
697 mem_regions[regionidx].memory_size;
698 mem->regions[regionidx].memory_size =
699 mem_regions[regionidx].memory_size;
700 mem->regions[regionidx].userspace_address =
701 mem_regions[regionidx].userspace_addr;
703 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
705 (void *)(uintptr_t)mem->regions[regionidx].guest_phys_address,
706 (void *)(uintptr_t)mem->regions[regionidx].userspace_address,
707 mem->regions[regionidx].memory_size);
709 /*set the base address mapping*/
710 if (mem->regions[regionidx].guest_phys_address == 0x0) {
712 mem->regions[regionidx].userspace_address;
713 /* Map VM memory file */
714 if (host_memory_map(dev, mem, ctx.pid,
715 mem->base_address) != 0) {
722 /* Check that we have a valid base address. */
723 if (mem->base_address == 0) {
724 RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
730 * Check if all of our regions have valid mappings.
731 * Usually one does not exist in the QEMU memory file.
733 valid_regions = mem->nregions;
734 for (regionidx = 0; regionidx < mem->nregions; regionidx++) {
735 if ((mem->regions[regionidx].userspace_address <
736 mem->base_address) ||
737 (mem->regions[regionidx].userspace_address >
738 (mem->base_address + mem->mapped_size)))
743 * If a region does not have a valid mapping,
744 * we rebuild our memory struct to contain only valid entries.
746 if (valid_regions != mem->nregions) {
747 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
751 * Re-populate the memory structure with only valid regions.
752 * Invalid regions are over-written with memmove.
756 for (regionidx = mem->nregions; 0 != regionidx--;) {
757 if ((mem->regions[regionidx].userspace_address <
758 mem->base_address) ||
759 (mem->regions[regionidx].userspace_address >
760 (mem->base_address + mem->mapped_size))) {
761 memmove(&mem->regions[regionidx],
762 &mem->regions[regionidx + 1],
763 sizeof(struct virtio_memory_regions) *
770 mem->nregions = valid_regions;
774 * Calculate the address offset for each region.
775 * This offset is used to identify the vhost virtual address
776 * corresponding to a QEMU guest physical address.
778 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
779 dev->mem->regions[regionidx].address_offset =
780 dev->mem->regions[regionidx].userspace_address -
781 dev->mem->base_address +
782 dev->mem->mapped_address -
783 dev->mem->regions[regionidx].guest_phys_address;
790 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
791 * The virtio device sends us the size of the descriptor ring.
794 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
796 struct virtio_net *dev;
798 dev = get_device(ctx);
802 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
803 dev->virtqueue[state->index]->size = state->num;
809 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
810 * The virtio device sends us the desc, used and avail ring addresses.
811 * This function then converts these to our address space.
814 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
816 struct virtio_net *dev;
817 struct vhost_virtqueue *vq;
819 dev = get_device(ctx);
823 /* addr->index refers to the queue index. The txq 1, rxq is 0. */
824 vq = dev->virtqueue[addr->index];
826 /* The addresses are converted from QEMU virtual to Vhost virtual. */
827 vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
828 addr->desc_user_addr);
830 RTE_LOG(ERR, VHOST_CONFIG,
831 "(%"PRIu64") Failed to find desc ring address.\n",
836 vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
837 addr->avail_user_addr);
838 if (vq->avail == 0) {
839 RTE_LOG(ERR, VHOST_CONFIG,
840 "(%"PRIu64") Failed to find avail ring address.\n",
845 vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
846 addr->used_user_addr);
848 RTE_LOG(ERR, VHOST_CONFIG,
849 "(%"PRIu64") Failed to find used ring address.\n",
854 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
855 dev->device_fh, vq->desc);
856 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
857 dev->device_fh, vq->avail);
858 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
859 dev->device_fh, vq->used);
865 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
866 * The virtio device sends us the available ring last used index.
869 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
871 struct virtio_net *dev;
873 dev = get_device(ctx);
877 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
878 dev->virtqueue[state->index]->last_used_idx = state->num;
879 dev->virtqueue[state->index]->last_used_idx_res = state->num;
885 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
886 * We send the virtio device our available ring last used index.
889 get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
890 struct vhost_vring_state *state)
892 struct virtio_net *dev;
894 dev = get_device(ctx);
898 state->index = index;
899 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
900 state->num = dev->virtqueue[state->index]->last_used_idx;
907 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
908 * The virtio device sends an eventfd to interrupt the guest. This fd gets
909 * copied into our process space.
912 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
914 struct virtio_net *dev;
915 struct vhost_virtqueue *vq;
917 dev = get_device(ctx);
921 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
922 vq = dev->virtqueue[file->index];
925 close((int)vq->kickfd);
927 vq->kickfd = file->fd;
933 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
934 * The virtio device sends an eventfd that it can use to notify us.
935 * This fd gets copied into our process space.
938 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
940 struct virtio_net *dev;
941 struct vhost_virtqueue *vq;
943 dev = get_device(ctx);
947 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
948 vq = dev->virtqueue[file->index];
951 close((int)vq->callfd);
952 vq->callfd = file->fd;
958 * Function to get the tap device name from the provided file descriptor and
959 * save it in the device structure.
962 get_ifname(struct virtio_net *dev, int tap_fd, int pid)
966 uint32_t size, ifr_size;
969 fd_tap = eventfd_copy(tap_fd, pid);
973 ret = ioctl(fd_tap, TUNGETIFF, &ifr);
975 if (close(fd_tap) < 0)
976 RTE_LOG(ERR, VHOST_CONFIG,
977 "(%"PRIu64") fd close failed\n",
981 ifr_size = strnlen(ifr.ifr_name, sizeof(ifr.ifr_name));
982 size = ifr_size > sizeof(dev->ifname) ?
983 sizeof(dev->ifname) : ifr_size;
985 strncpy(dev->ifname, ifr.ifr_name, size);
987 RTE_LOG(ERR, VHOST_CONFIG,
988 "(%"PRIu64") TUNGETIFF ioctl failed\n",
995 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
996 * To complete device initialisation when the virtio driver is loaded,
997 * we are provided with a valid fd for a tap device (not used by us).
998 * If this happens then we can add the device to a data core.
999 * When the virtio driver is removed we get fd=-1.
1000 * At that point we remove the device from the data core.
1001 * The device will still exist in the device configuration linked list.
1004 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
1006 struct virtio_net *dev;
1008 dev = get_device(ctx);
1012 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
1013 dev->virtqueue[file->index]->backend = file->fd;
1016 * If the device isn't already running and both backend fds are set,
1017 * we add the device.
1019 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
1020 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
1021 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
1022 get_ifname(dev, file->fd, ctx.pid);
1023 return notify_ops->new_device(dev);
1025 /* Otherwise we remove it. */
1027 if (file->fd == VIRTIO_DEV_STOPPED)
1028 notify_ops->destroy_device(dev);
1033 * Function pointers are set for the device operations to allow CUSE to call
1034 * functions when an IOCTL, device_add or device_release is received.
1036 static const struct vhost_net_device_ops vhost_device_ops = {
1037 .new_device = new_device,
1038 .destroy_device = destroy_device,
1040 .get_features = get_features,
1041 .set_features = set_features,
1043 .set_mem_table = set_mem_table,
1045 .set_vring_num = set_vring_num,
1046 .set_vring_addr = set_vring_addr,
1047 .set_vring_base = set_vring_base,
1048 .get_vring_base = get_vring_base,
1050 .set_vring_kick = set_vring_kick,
1051 .set_vring_call = set_vring_call,
1053 .set_backend = set_backend,
1055 .set_owner = set_owner,
1056 .reset_owner = reset_owner,
1060 * Called by main to setup callbacks when registering CUSE device.
1062 struct vhost_net_device_ops const *
1063 get_virtio_net_callbacks(void)
1065 return &vhost_device_ops;
1068 int rte_vhost_enable_guest_notification(struct virtio_net *dev,
1069 uint16_t queue_id, int enable)
1072 RTE_LOG(ERR, VHOST_CONFIG,
1073 "guest notification isn't supported.\n");
1077 dev->virtqueue[queue_id]->used->flags =
1078 enable ? 0 : VRING_USED_F_NO_NOTIFY;
1082 uint64_t rte_vhost_feature_get(void)
1084 return VHOST_FEATURES;
1087 int rte_vhost_feature_disable(uint64_t feature_mask)
1089 VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
1093 int rte_vhost_feature_enable(uint64_t feature_mask)
1095 if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
1096 VHOST_FEATURES = VHOST_FEATURES | feature_mask;
1103 * Register ops so that we can add/remove device to data core.
1106 rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)