4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/vhost.h>
35 #include <linux/virtio_net.h>
42 #ifdef RTE_LIBRTE_VHOST_NUMA
46 #include <sys/socket.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_memory.h>
52 #include <rte_malloc.h>
53 #include <rte_virtio_net.h>
55 #include "vhost-net.h"
56 #include "virtio-net.h"
59 * Device linked list structure for configuration.
61 struct virtio_net_config_ll {
62 struct virtio_net dev; /* Virtio device.*/
63 struct virtio_net_config_ll *next; /* Next dev on linked list.*/
66 /* device ops to add/remove device to/from data core. */
67 struct virtio_net_device_ops const *notify_ops;
68 /* root address of the linked list of managed virtio devices */
69 static struct virtio_net_config_ll *ll_root;
71 #define VHOST_USER_F_PROTOCOL_FEATURES 30
73 /* Features supported by this lib. */
74 #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
75 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
76 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
77 (VHOST_SUPPORTS_MQ) | \
78 (1ULL << VIRTIO_F_VERSION_1) | \
79 (1ULL << VHOST_F_LOG_ALL) | \
80 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
81 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
85 * Converts QEMU virtual address to Vhost virtual address. This function is
86 * used to convert the ring addresses to our address space.
89 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
91 struct virtio_memory_regions *region;
92 uint64_t vhost_va = 0;
93 uint32_t regionidx = 0;
95 /* Find the region where the address lives. */
96 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
97 region = &dev->mem->regions[regionidx];
98 if ((qemu_va >= region->userspace_address) &&
99 (qemu_va <= region->userspace_address +
100 region->memory_size)) {
101 vhost_va = qemu_va + region->guest_phys_address +
102 region->address_offset -
103 region->userspace_address;
112 * Retrieves an entry from the devices configuration linked list.
114 static struct virtio_net_config_ll *
115 get_config_ll_entry(struct vhost_device_ctx ctx)
117 struct virtio_net_config_ll *ll_dev = ll_root;
119 /* Loop through linked list until the device_fh is found. */
120 while (ll_dev != NULL) {
121 if (ll_dev->dev.device_fh == ctx.fh)
123 ll_dev = ll_dev->next;
130 * Searches the configuration core linked list and
131 * retrieves the device if it exists.
134 get_device(struct vhost_device_ctx ctx)
136 struct virtio_net_config_ll *ll_dev;
138 ll_dev = get_config_ll_entry(ctx);
143 RTE_LOG(ERR, VHOST_CONFIG,
144 "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
149 * Add entry containing a device to the device configuration linked list.
152 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
154 struct virtio_net_config_ll *ll_dev = ll_root;
156 /* If ll_dev == NULL then this is the first device so go to else */
158 /* If the 1st device_fh != 0 then we insert our device here. */
159 if (ll_dev->dev.device_fh != 0) {
160 new_ll_dev->dev.device_fh = 0;
161 new_ll_dev->next = ll_dev;
162 ll_root = new_ll_dev;
165 * Increment through the ll until we find un unused
166 * device_fh. Insert the device at that entry.
168 while ((ll_dev->next != NULL) &&
169 (ll_dev->dev.device_fh ==
170 (ll_dev->next->dev.device_fh - 1)))
171 ll_dev = ll_dev->next;
173 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
174 new_ll_dev->next = ll_dev->next;
175 ll_dev->next = new_ll_dev;
178 ll_root = new_ll_dev;
179 ll_root->dev.device_fh = 0;
185 cleanup_vq(struct vhost_virtqueue *vq)
194 * Unmap any memory, close any file descriptors and
195 * free any memory owned by a device.
198 cleanup_device(struct virtio_net *dev)
202 /* Unmap QEMU memory file if mapped. */
204 munmap((void *)(uintptr_t)dev->mem->mapped_address,
205 (size_t)dev->mem->mapped_size);
209 for (i = 0; i < dev->virt_qp_nb; i++) {
210 cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ]);
211 cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ]);
216 * Release virtqueues and device memory.
219 free_device(struct virtio_net_config_ll *ll_dev)
223 for (i = 0; i < ll_dev->dev.virt_qp_nb; i++)
224 rte_free(ll_dev->dev.virtqueue[i * VIRTIO_QNUM]);
230 * Remove an entry from the device configuration linked list.
232 static struct virtio_net_config_ll *
233 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
234 struct virtio_net_config_ll *ll_dev_last)
236 /* First remove the device and then clean it up. */
237 if (ll_dev == ll_root) {
238 ll_root = ll_dev->next;
239 cleanup_device(&ll_dev->dev);
243 if (likely(ll_dev_last != NULL)) {
244 ll_dev_last->next = ll_dev->next;
245 cleanup_device(&ll_dev->dev);
247 return ll_dev_last->next;
249 cleanup_device(&ll_dev->dev);
251 RTE_LOG(ERR, VHOST_CONFIG,
252 "Remove entry from config_ll failed\n");
259 init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
261 memset(vq, 0, sizeof(struct vhost_virtqueue));
266 /* Backends are set to -1 indicating an inactive device. */
269 /* always set the default vq pair to enabled */
275 init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
277 uint32_t base_idx = qp_idx * VIRTIO_QNUM;
279 init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
280 init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
284 alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
286 struct vhost_virtqueue *virtqueue = NULL;
287 uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
288 uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
290 virtqueue = rte_malloc(NULL,
291 sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0);
292 if (virtqueue == NULL) {
293 RTE_LOG(ERR, VHOST_CONFIG,
294 "Failed to allocate memory for virt qp:%d.\n", qp_idx);
298 dev->virtqueue[virt_rx_q_idx] = virtqueue;
299 dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
301 init_vring_queue_pair(dev, qp_idx);
303 dev->virt_qp_nb += 1;
309 * Initialise all variables in device structure.
312 init_device(struct virtio_net *dev)
318 * Virtqueues have already been malloced so
319 * we don't want to set them to NULL.
321 vq_offset = offsetof(struct virtio_net, virtqueue);
322 memset(dev, 0, vq_offset);
324 for (i = 0; i < dev->virt_qp_nb; i++)
325 init_vring_queue_pair(dev, i);
329 * Function is called from the CUSE open function. The device structure is
330 * initialised and a new entry is added to the device configuration linked
334 new_device(struct vhost_device_ctx ctx)
336 struct virtio_net_config_ll *new_ll_dev;
338 /* Setup device and virtqueues. */
339 new_ll_dev = rte_malloc(NULL, sizeof(struct virtio_net_config_ll), 0);
340 if (new_ll_dev == NULL) {
341 RTE_LOG(ERR, VHOST_CONFIG,
342 "(%"PRIu64") Failed to allocate memory for dev.\n",
347 /* Initialise device and virtqueues. */
348 init_device(&new_ll_dev->dev);
350 new_ll_dev->next = NULL;
352 /* Add entry to device configuration linked list. */
353 add_config_ll_entry(new_ll_dev);
355 return new_ll_dev->dev.device_fh;
359 * Function is called from the CUSE release function. This function will
360 * cleanup the device and remove it from device configuration linked list.
363 destroy_device(struct vhost_device_ctx ctx)
365 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
366 struct virtio_net_config_ll *ll_dev_cur = ll_root;
368 /* Find the linked list entry for the device to be removed. */
369 ll_dev_cur_ctx = get_config_ll_entry(ctx);
370 while (ll_dev_cur != NULL) {
372 * If the device is found or
373 * a device that doesn't exist is found then it is removed.
375 if (ll_dev_cur == ll_dev_cur_ctx) {
377 * If the device is running on a data core then call
378 * the function to remove it from the data core.
380 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
381 notify_ops->destroy_device(&(ll_dev_cur->dev));
382 ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
385 ll_dev_last = ll_dev_cur;
386 ll_dev_cur = ll_dev_cur->next;
392 set_ifname(struct vhost_device_ctx ctx,
393 const char *if_name, unsigned int if_len)
395 struct virtio_net *dev;
398 dev = get_device(ctx);
402 len = if_len > sizeof(dev->ifname) ?
403 sizeof(dev->ifname) : if_len;
405 strncpy(dev->ifname, if_name, len);
410 * Called from CUSE IOCTL: VHOST_SET_OWNER
411 * This function just returns success at the moment unless
412 * the device hasn't been initialised.
415 set_owner(struct vhost_device_ctx ctx)
417 struct virtio_net *dev;
419 dev = get_device(ctx);
427 * Called from CUSE IOCTL: VHOST_RESET_OWNER
430 reset_owner(struct vhost_device_ctx ctx)
432 struct virtio_net *dev;
435 dev = get_device(ctx);
439 device_fh = dev->device_fh;
442 dev->device_fh = device_fh;
447 * Called from CUSE IOCTL: VHOST_GET_FEATURES
448 * The features that we support are requested.
451 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
453 struct virtio_net *dev;
455 dev = get_device(ctx);
459 /* Send our supported features. */
460 *pu = VHOST_FEATURES;
465 * Called from CUSE IOCTL: VHOST_SET_FEATURES
466 * We receive the negotiated features supported by us and the virtio device.
469 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
471 struct virtio_net *dev;
475 dev = get_device(ctx);
478 if (*pu & ~VHOST_FEATURES)
483 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
484 vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
486 vhost_hlen = sizeof(struct virtio_net_hdr);
488 LOG_DEBUG(VHOST_CONFIG,
489 "(%"PRIu64") Mergeable RX buffers %s, virtio 1 %s\n",
491 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
492 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
494 for (i = 0; i < dev->virt_qp_nb; i++) {
495 uint16_t base_idx = i * VIRTIO_QNUM;
497 dev->virtqueue[base_idx + VIRTIO_RXQ]->vhost_hlen = vhost_hlen;
498 dev->virtqueue[base_idx + VIRTIO_TXQ]->vhost_hlen = vhost_hlen;
505 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
506 * The virtio device sends us the size of the descriptor ring.
509 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
511 struct virtio_net *dev;
513 dev = get_device(ctx);
517 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
518 dev->virtqueue[state->index]->size = state->num;
524 * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
525 * same numa node as the memory of vring descriptor.
527 #ifdef RTE_LIBRTE_VHOST_NUMA
528 static struct virtio_net*
529 numa_realloc(struct virtio_net *dev, int index)
531 int oldnode, newnode;
532 struct virtio_net_config_ll *old_ll_dev, *new_ll_dev = NULL;
533 struct vhost_virtqueue *old_vq, *new_vq = NULL;
535 int realloc_dev = 0, realloc_vq = 0;
537 old_ll_dev = (struct virtio_net_config_ll *)dev;
538 old_vq = dev->virtqueue[index];
540 ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
541 MPOL_F_NODE | MPOL_F_ADDR);
542 ret = ret | get_mempolicy(&oldnode, NULL, 0, old_ll_dev,
543 MPOL_F_NODE | MPOL_F_ADDR);
545 RTE_LOG(ERR, VHOST_CONFIG,
546 "Unable to get vring desc or dev numa information.\n");
549 if (oldnode != newnode)
552 ret = get_mempolicy(&oldnode, NULL, 0, old_vq,
553 MPOL_F_NODE | MPOL_F_ADDR);
555 RTE_LOG(ERR, VHOST_CONFIG,
556 "Unable to get vq numa information.\n");
559 if (oldnode != newnode)
562 if (realloc_dev == 0 && realloc_vq == 0)
566 new_ll_dev = rte_malloc_socket(NULL,
567 sizeof(struct virtio_net_config_ll), 0, newnode);
569 new_vq = rte_malloc_socket(NULL,
570 sizeof(struct vhost_virtqueue), 0, newnode);
571 if (!new_ll_dev && !new_vq)
575 memcpy(new_vq, old_vq, sizeof(*new_vq));
577 memcpy(new_ll_dev, old_ll_dev, sizeof(*new_ll_dev));
578 (new_ll_dev ? new_ll_dev : old_ll_dev)->dev.virtqueue[index] =
579 new_vq ? new_vq : old_vq;
583 if (ll_root == old_ll_dev)
584 ll_root = new_ll_dev;
586 struct virtio_net_config_ll *prev = ll_root;
587 while (prev->next != old_ll_dev)
589 prev->next = new_ll_dev;
590 new_ll_dev->next = old_ll_dev->next;
592 rte_free(old_ll_dev);
595 return realloc_dev ? &new_ll_dev->dev : dev;
598 static struct virtio_net*
599 numa_realloc(struct virtio_net *dev, int index __rte_unused)
606 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
607 * The virtio device sends us the desc, used and avail ring addresses.
608 * This function then converts these to our address space.
611 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
613 struct virtio_net *dev;
614 struct vhost_virtqueue *vq;
616 dev = get_device(ctx);
620 /* addr->index refers to the queue index. The txq 1, rxq is 0. */
621 vq = dev->virtqueue[addr->index];
623 /* The addresses are converted from QEMU virtual to Vhost virtual. */
624 vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
625 addr->desc_user_addr);
627 RTE_LOG(ERR, VHOST_CONFIG,
628 "(%"PRIu64") Failed to find desc ring address.\n",
633 dev = numa_realloc(dev, addr->index);
634 vq = dev->virtqueue[addr->index];
636 vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
637 addr->avail_user_addr);
638 if (vq->avail == 0) {
639 RTE_LOG(ERR, VHOST_CONFIG,
640 "(%"PRIu64") Failed to find avail ring address.\n",
645 vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
646 addr->used_user_addr);
648 RTE_LOG(ERR, VHOST_CONFIG,
649 "(%"PRIu64") Failed to find used ring address.\n",
654 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
655 dev->device_fh, vq->desc);
656 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
657 dev->device_fh, vq->avail);
658 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
659 dev->device_fh, vq->used);
665 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
666 * The virtio device sends us the available ring last used index.
669 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
671 struct virtio_net *dev;
673 dev = get_device(ctx);
677 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
678 dev->virtqueue[state->index]->last_used_idx = state->num;
679 dev->virtqueue[state->index]->last_used_idx_res = state->num;
685 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
686 * We send the virtio device our available ring last used index.
689 get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
690 struct vhost_vring_state *state)
692 struct virtio_net *dev;
694 dev = get_device(ctx);
698 state->index = index;
699 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
700 state->num = dev->virtqueue[state->index]->last_used_idx;
707 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
708 * The virtio device sends an eventfd to interrupt the guest. This fd gets
709 * copied into our process space.
712 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
714 struct virtio_net *dev;
715 struct vhost_virtqueue *vq;
716 uint32_t cur_qp_idx = file->index / VIRTIO_QNUM;
718 dev = get_device(ctx);
723 * FIXME: VHOST_SET_VRING_CALL is the first per-vring message
724 * we get, so we do vring queue pair allocation here.
726 if (cur_qp_idx + 1 > dev->virt_qp_nb) {
727 if (alloc_vring_queue_pair(dev, cur_qp_idx) < 0)
731 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
732 vq = dev->virtqueue[file->index];
738 vq->callfd = file->fd;
744 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
745 * The virtio device sends an eventfd that it can use to notify us.
746 * This fd gets copied into our process space.
749 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
751 struct virtio_net *dev;
752 struct vhost_virtqueue *vq;
754 dev = get_device(ctx);
758 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
759 vq = dev->virtqueue[file->index];
764 vq->kickfd = file->fd;
770 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
771 * To complete device initialisation when the virtio driver is loaded,
772 * we are provided with a valid fd for a tap device (not used by us).
773 * If this happens then we can add the device to a data core.
774 * When the virtio driver is removed we get fd=-1.
775 * At that point we remove the device from the data core.
776 * The device will still exist in the device configuration linked list.
779 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
781 struct virtio_net *dev;
783 dev = get_device(ctx);
787 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
788 dev->virtqueue[file->index]->backend = file->fd;
791 * If the device isn't already running and both backend fds are set,
794 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
795 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
796 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
797 return notify_ops->new_device(dev);
799 /* Otherwise we remove it. */
801 if (file->fd == VIRTIO_DEV_STOPPED)
802 notify_ops->destroy_device(dev);
807 * Function pointers are set for the device operations to allow CUSE to call
808 * functions when an IOCTL, device_add or device_release is received.
810 static const struct vhost_net_device_ops vhost_device_ops = {
811 .new_device = new_device,
812 .destroy_device = destroy_device,
814 .set_ifname = set_ifname,
816 .get_features = get_features,
817 .set_features = set_features,
819 .set_vring_num = set_vring_num,
820 .set_vring_addr = set_vring_addr,
821 .set_vring_base = set_vring_base,
822 .get_vring_base = get_vring_base,
824 .set_vring_kick = set_vring_kick,
825 .set_vring_call = set_vring_call,
827 .set_backend = set_backend,
829 .set_owner = set_owner,
830 .reset_owner = reset_owner,
834 * Called by main to setup callbacks when registering CUSE device.
836 struct vhost_net_device_ops const *
837 get_virtio_net_callbacks(void)
839 return &vhost_device_ops;
842 int rte_vhost_enable_guest_notification(struct virtio_net *dev,
843 uint16_t queue_id, int enable)
846 RTE_LOG(ERR, VHOST_CONFIG,
847 "guest notification isn't supported.\n");
851 dev->virtqueue[queue_id]->used->flags =
852 enable ? 0 : VRING_USED_F_NO_NOTIFY;
856 uint64_t rte_vhost_feature_get(void)
858 return VHOST_FEATURES;
861 int rte_vhost_feature_disable(uint64_t feature_mask)
863 VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
867 int rte_vhost_feature_enable(uint64_t feature_mask)
869 if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
870 VHOST_FEATURES = VHOST_FEATURES | feature_mask;
877 * Register ops so that we can add/remove device to data core.
880 rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)