4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/vhost.h>
35 #include <linux/virtio_net.h>
42 #ifdef RTE_LIBRTE_VHOST_NUMA
46 #include <sys/socket.h>
48 #include <rte_ethdev.h>
50 #include <rte_string_fns.h>
51 #include <rte_memory.h>
52 #include <rte_malloc.h>
53 #include <rte_virtio_net.h>
55 #include "vhost-net.h"
56 #include "virtio-net.h"
59 * Device linked list structure for configuration.
61 struct virtio_net_config_ll {
62 struct virtio_net dev; /* Virtio device.*/
63 struct virtio_net_config_ll *next; /* Next dev on linked list.*/
66 /* device ops to add/remove device to/from data core. */
67 struct virtio_net_device_ops const *notify_ops;
68 /* root address of the linked list of managed virtio devices */
69 static struct virtio_net_config_ll *ll_root;
71 #define VHOST_USER_F_PROTOCOL_FEATURES 30
73 /* Features supported by this lib. */
74 #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
75 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
76 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
77 (VHOST_SUPPORTS_MQ) | \
78 (1ULL << VIRTIO_F_VERSION_1) | \
79 (1ULL << VHOST_F_LOG_ALL) | \
80 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
81 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
82 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
83 (1ULL << VIRTIO_NET_F_CSUM))
85 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
89 * Converts QEMU virtual address to Vhost virtual address. This function is
90 * used to convert the ring addresses to our address space.
93 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
95 struct virtio_memory_regions *region;
96 uint64_t vhost_va = 0;
97 uint32_t regionidx = 0;
99 /* Find the region where the address lives. */
100 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
101 region = &dev->mem->regions[regionidx];
102 if ((qemu_va >= region->userspace_address) &&
103 (qemu_va <= region->userspace_address +
104 region->memory_size)) {
105 vhost_va = qemu_va + region->guest_phys_address +
106 region->address_offset -
107 region->userspace_address;
116 * Retrieves an entry from the devices configuration linked list.
118 static struct virtio_net_config_ll *
119 get_config_ll_entry(struct vhost_device_ctx ctx)
121 struct virtio_net_config_ll *ll_dev = ll_root;
123 /* Loop through linked list until the device_fh is found. */
124 while (ll_dev != NULL) {
125 if (ll_dev->dev.device_fh == ctx.fh)
127 ll_dev = ll_dev->next;
134 * Searches the configuration core linked list and
135 * retrieves the device if it exists.
138 get_device(struct vhost_device_ctx ctx)
140 struct virtio_net_config_ll *ll_dev;
142 ll_dev = get_config_ll_entry(ctx);
147 RTE_LOG(ERR, VHOST_CONFIG,
148 "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
153 * Add entry containing a device to the device configuration linked list.
156 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
158 struct virtio_net_config_ll *ll_dev = ll_root;
160 /* If ll_dev == NULL then this is the first device so go to else */
162 /* If the 1st device_fh != 0 then we insert our device here. */
163 if (ll_dev->dev.device_fh != 0) {
164 new_ll_dev->dev.device_fh = 0;
165 new_ll_dev->next = ll_dev;
166 ll_root = new_ll_dev;
169 * Increment through the ll until we find un unused
170 * device_fh. Insert the device at that entry.
172 while ((ll_dev->next != NULL) &&
173 (ll_dev->dev.device_fh ==
174 (ll_dev->next->dev.device_fh - 1)))
175 ll_dev = ll_dev->next;
177 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
178 new_ll_dev->next = ll_dev->next;
179 ll_dev->next = new_ll_dev;
182 ll_root = new_ll_dev;
183 ll_root->dev.device_fh = 0;
189 cleanup_vq(struct vhost_virtqueue *vq, int destroy)
191 if ((vq->callfd >= 0) && (destroy != 0))
198 * Unmap any memory, close any file descriptors and
199 * free any memory owned by a device.
202 cleanup_device(struct virtio_net *dev, int destroy)
206 /* Unmap QEMU memory file if mapped. */
208 munmap((void *)(uintptr_t)dev->mem->mapped_address,
209 (size_t)dev->mem->mapped_size);
214 for (i = 0; i < dev->virt_qp_nb; i++) {
215 cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_RXQ], destroy);
216 cleanup_vq(dev->virtqueue[i * VIRTIO_QNUM + VIRTIO_TXQ], destroy);
221 * Release virtqueues and device memory.
224 free_device(struct virtio_net_config_ll *ll_dev)
228 for (i = 0; i < ll_dev->dev.virt_qp_nb; i++)
229 rte_free(ll_dev->dev.virtqueue[i * VIRTIO_QNUM]);
235 * Remove an entry from the device configuration linked list.
237 static struct virtio_net_config_ll *
238 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
239 struct virtio_net_config_ll *ll_dev_last)
241 /* First remove the device and then clean it up. */
242 if (ll_dev == ll_root) {
243 ll_root = ll_dev->next;
244 cleanup_device(&ll_dev->dev, 1);
248 if (likely(ll_dev_last != NULL)) {
249 ll_dev_last->next = ll_dev->next;
250 cleanup_device(&ll_dev->dev, 1);
252 return ll_dev_last->next;
254 cleanup_device(&ll_dev->dev, 1);
256 RTE_LOG(ERR, VHOST_CONFIG,
257 "Remove entry from config_ll failed\n");
264 init_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
266 memset(vq, 0, sizeof(struct vhost_virtqueue));
271 /* Backends are set to -1 indicating an inactive device. */
274 /* always set the default vq pair to enabled */
280 init_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
282 uint32_t base_idx = qp_idx * VIRTIO_QNUM;
284 init_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
285 init_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
289 reset_vring_queue(struct vhost_virtqueue *vq, int qp_idx)
294 init_vring_queue(vq, qp_idx);
299 reset_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
301 uint32_t base_idx = qp_idx * VIRTIO_QNUM;
303 reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_RXQ], qp_idx);
304 reset_vring_queue(dev->virtqueue[base_idx + VIRTIO_TXQ], qp_idx);
308 alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx)
310 struct vhost_virtqueue *virtqueue = NULL;
311 uint32_t virt_rx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_RXQ;
312 uint32_t virt_tx_q_idx = qp_idx * VIRTIO_QNUM + VIRTIO_TXQ;
314 virtqueue = rte_malloc(NULL,
315 sizeof(struct vhost_virtqueue) * VIRTIO_QNUM, 0);
316 if (virtqueue == NULL) {
317 RTE_LOG(ERR, VHOST_CONFIG,
318 "Failed to allocate memory for virt qp:%d.\n", qp_idx);
322 dev->virtqueue[virt_rx_q_idx] = virtqueue;
323 dev->virtqueue[virt_tx_q_idx] = virtqueue + VIRTIO_TXQ;
325 init_vring_queue_pair(dev, qp_idx);
327 dev->virt_qp_nb += 1;
333 * Reset some variables in device structure, while keeping few
334 * others untouched, such as device_fh, ifname, virt_qp_nb: they
335 * should be same unless the device is removed.
338 reset_device(struct virtio_net *dev)
343 dev->protocol_features = 0;
346 for (i = 0; i < dev->virt_qp_nb; i++)
347 reset_vring_queue_pair(dev, i);
351 * Function is called from the CUSE open function. The device structure is
352 * initialised and a new entry is added to the device configuration linked
356 new_device(struct vhost_device_ctx ctx)
358 struct virtio_net_config_ll *new_ll_dev;
360 /* Setup device and virtqueues. */
361 new_ll_dev = rte_zmalloc(NULL, sizeof(struct virtio_net_config_ll), 0);
362 if (new_ll_dev == NULL) {
363 RTE_LOG(ERR, VHOST_CONFIG,
364 "(%"PRIu64") Failed to allocate memory for dev.\n",
369 new_ll_dev->next = NULL;
371 /* Add entry to device configuration linked list. */
372 add_config_ll_entry(new_ll_dev);
374 return new_ll_dev->dev.device_fh;
378 * Function is called from the CUSE release function. This function will
379 * cleanup the device and remove it from device configuration linked list.
382 destroy_device(struct vhost_device_ctx ctx)
384 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
385 struct virtio_net_config_ll *ll_dev_cur = ll_root;
387 /* Find the linked list entry for the device to be removed. */
388 ll_dev_cur_ctx = get_config_ll_entry(ctx);
389 while (ll_dev_cur != NULL) {
391 * If the device is found or
392 * a device that doesn't exist is found then it is removed.
394 if (ll_dev_cur == ll_dev_cur_ctx) {
396 * If the device is running on a data core then call
397 * the function to remove it from the data core.
399 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
400 notify_ops->destroy_device(&(ll_dev_cur->dev));
401 ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
404 ll_dev_last = ll_dev_cur;
405 ll_dev_cur = ll_dev_cur->next;
411 set_ifname(struct vhost_device_ctx ctx,
412 const char *if_name, unsigned int if_len)
414 struct virtio_net *dev;
417 dev = get_device(ctx);
421 len = if_len > sizeof(dev->ifname) ?
422 sizeof(dev->ifname) : if_len;
424 strncpy(dev->ifname, if_name, len);
429 * Called from CUSE IOCTL: VHOST_SET_OWNER
430 * This function just returns success at the moment unless
431 * the device hasn't been initialised.
434 set_owner(struct vhost_device_ctx ctx)
436 struct virtio_net *dev;
438 dev = get_device(ctx);
446 * Called from CUSE IOCTL: VHOST_RESET_OWNER
449 reset_owner(struct vhost_device_ctx ctx)
451 struct virtio_net *dev;
453 dev = get_device(ctx);
457 if (dev->flags & VIRTIO_DEV_RUNNING)
458 notify_ops->destroy_device(dev);
460 cleanup_device(dev, 0);
466 * Called from CUSE IOCTL: VHOST_GET_FEATURES
467 * The features that we support are requested.
470 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
472 struct virtio_net *dev;
474 dev = get_device(ctx);
478 /* Send our supported features. */
479 *pu = VHOST_FEATURES;
484 * Called from CUSE IOCTL: VHOST_SET_FEATURES
485 * We receive the negotiated features supported by us and the virtio device.
488 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
490 struct virtio_net *dev;
494 dev = get_device(ctx);
497 if (*pu & ~VHOST_FEATURES)
502 ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
503 vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
505 vhost_hlen = sizeof(struct virtio_net_hdr);
507 LOG_DEBUG(VHOST_CONFIG,
508 "(%"PRIu64") Mergeable RX buffers %s, virtio 1 %s\n",
510 (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
511 (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
513 for (i = 0; i < dev->virt_qp_nb; i++) {
514 uint16_t base_idx = i * VIRTIO_QNUM;
516 dev->virtqueue[base_idx + VIRTIO_RXQ]->vhost_hlen = vhost_hlen;
517 dev->virtqueue[base_idx + VIRTIO_TXQ]->vhost_hlen = vhost_hlen;
524 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
525 * The virtio device sends us the size of the descriptor ring.
528 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
530 struct virtio_net *dev;
532 dev = get_device(ctx);
536 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
537 dev->virtqueue[state->index]->size = state->num;
543 * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
544 * same numa node as the memory of vring descriptor.
546 #ifdef RTE_LIBRTE_VHOST_NUMA
547 static struct virtio_net*
548 numa_realloc(struct virtio_net *dev, int index)
550 int oldnode, newnode;
551 struct virtio_net_config_ll *old_ll_dev, *new_ll_dev = NULL;
552 struct vhost_virtqueue *old_vq, *new_vq = NULL;
554 int realloc_dev = 0, realloc_vq = 0;
556 old_ll_dev = (struct virtio_net_config_ll *)dev;
557 old_vq = dev->virtqueue[index];
559 ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
560 MPOL_F_NODE | MPOL_F_ADDR);
561 ret = ret | get_mempolicy(&oldnode, NULL, 0, old_ll_dev,
562 MPOL_F_NODE | MPOL_F_ADDR);
564 RTE_LOG(ERR, VHOST_CONFIG,
565 "Unable to get vring desc or dev numa information.\n");
568 if (oldnode != newnode)
571 ret = get_mempolicy(&oldnode, NULL, 0, old_vq,
572 MPOL_F_NODE | MPOL_F_ADDR);
574 RTE_LOG(ERR, VHOST_CONFIG,
575 "Unable to get vq numa information.\n");
578 if (oldnode != newnode)
581 if (realloc_dev == 0 && realloc_vq == 0)
585 new_ll_dev = rte_malloc_socket(NULL,
586 sizeof(struct virtio_net_config_ll), 0, newnode);
588 new_vq = rte_malloc_socket(NULL,
589 sizeof(struct vhost_virtqueue), 0, newnode);
590 if (!new_ll_dev && !new_vq)
594 memcpy(new_vq, old_vq, sizeof(*new_vq));
596 memcpy(new_ll_dev, old_ll_dev, sizeof(*new_ll_dev));
597 (new_ll_dev ? new_ll_dev : old_ll_dev)->dev.virtqueue[index] =
598 new_vq ? new_vq : old_vq;
602 if (ll_root == old_ll_dev)
603 ll_root = new_ll_dev;
605 struct virtio_net_config_ll *prev = ll_root;
606 while (prev->next != old_ll_dev)
608 prev->next = new_ll_dev;
609 new_ll_dev->next = old_ll_dev->next;
611 rte_free(old_ll_dev);
614 return realloc_dev ? &new_ll_dev->dev : dev;
617 static struct virtio_net*
618 numa_realloc(struct virtio_net *dev, int index __rte_unused)
625 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
626 * The virtio device sends us the desc, used and avail ring addresses.
627 * This function then converts these to our address space.
630 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
632 struct virtio_net *dev;
633 struct vhost_virtqueue *vq;
635 dev = get_device(ctx);
639 /* addr->index refers to the queue index. The txq 1, rxq is 0. */
640 vq = dev->virtqueue[addr->index];
642 /* The addresses are converted from QEMU virtual to Vhost virtual. */
643 vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
644 addr->desc_user_addr);
646 RTE_LOG(ERR, VHOST_CONFIG,
647 "(%"PRIu64") Failed to find desc ring address.\n",
652 dev = numa_realloc(dev, addr->index);
653 vq = dev->virtqueue[addr->index];
655 vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
656 addr->avail_user_addr);
657 if (vq->avail == 0) {
658 RTE_LOG(ERR, VHOST_CONFIG,
659 "(%"PRIu64") Failed to find avail ring address.\n",
664 vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
665 addr->used_user_addr);
667 RTE_LOG(ERR, VHOST_CONFIG,
668 "(%"PRIu64") Failed to find used ring address.\n",
673 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
674 dev->device_fh, vq->desc);
675 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
676 dev->device_fh, vq->avail);
677 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
678 dev->device_fh, vq->used);
684 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
685 * The virtio device sends us the available ring last used index.
688 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
690 struct virtio_net *dev;
692 dev = get_device(ctx);
696 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
697 dev->virtqueue[state->index]->last_used_idx = state->num;
698 dev->virtqueue[state->index]->last_used_idx_res = state->num;
704 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
705 * We send the virtio device our available ring last used index.
708 get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
709 struct vhost_vring_state *state)
711 struct virtio_net *dev;
713 dev = get_device(ctx);
717 state->index = index;
718 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
719 state->num = dev->virtqueue[state->index]->last_used_idx;
726 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
727 * The virtio device sends an eventfd to interrupt the guest. This fd gets
728 * copied into our process space.
731 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
733 struct virtio_net *dev;
734 struct vhost_virtqueue *vq;
735 uint32_t cur_qp_idx = file->index / VIRTIO_QNUM;
737 dev = get_device(ctx);
742 * FIXME: VHOST_SET_VRING_CALL is the first per-vring message
743 * we get, so we do vring queue pair allocation here.
745 if (cur_qp_idx + 1 > dev->virt_qp_nb) {
746 if (alloc_vring_queue_pair(dev, cur_qp_idx) < 0)
750 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
751 vq = dev->virtqueue[file->index];
757 vq->callfd = file->fd;
763 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
764 * The virtio device sends an eventfd that it can use to notify us.
765 * This fd gets copied into our process space.
768 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
770 struct virtio_net *dev;
771 struct vhost_virtqueue *vq;
773 dev = get_device(ctx);
777 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
778 vq = dev->virtqueue[file->index];
783 vq->kickfd = file->fd;
789 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
790 * To complete device initialisation when the virtio driver is loaded,
791 * we are provided with a valid fd for a tap device (not used by us).
792 * If this happens then we can add the device to a data core.
793 * When the virtio driver is removed we get fd=-1.
794 * At that point we remove the device from the data core.
795 * The device will still exist in the device configuration linked list.
798 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
800 struct virtio_net *dev;
802 dev = get_device(ctx);
806 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
807 dev->virtqueue[file->index]->backend = file->fd;
810 * If the device isn't already running and both backend fds are set,
813 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
814 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
815 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
816 return notify_ops->new_device(dev);
818 /* Otherwise we remove it. */
820 if (file->fd == VIRTIO_DEV_STOPPED)
821 notify_ops->destroy_device(dev);
826 * Function pointers are set for the device operations to allow CUSE to call
827 * functions when an IOCTL, device_add or device_release is received.
829 static const struct vhost_net_device_ops vhost_device_ops = {
830 .new_device = new_device,
831 .destroy_device = destroy_device,
833 .set_ifname = set_ifname,
835 .get_features = get_features,
836 .set_features = set_features,
838 .set_vring_num = set_vring_num,
839 .set_vring_addr = set_vring_addr,
840 .set_vring_base = set_vring_base,
841 .get_vring_base = get_vring_base,
843 .set_vring_kick = set_vring_kick,
844 .set_vring_call = set_vring_call,
846 .set_backend = set_backend,
848 .set_owner = set_owner,
849 .reset_owner = reset_owner,
853 * Called by main to setup callbacks when registering CUSE device.
855 struct vhost_net_device_ops const *
856 get_virtio_net_callbacks(void)
858 return &vhost_device_ops;
861 int rte_vhost_enable_guest_notification(struct virtio_net *dev,
862 uint16_t queue_id, int enable)
865 RTE_LOG(ERR, VHOST_CONFIG,
866 "guest notification isn't supported.\n");
870 dev->virtqueue[queue_id]->used->flags = VRING_USED_F_NO_NOTIFY;
874 uint64_t rte_vhost_feature_get(void)
876 return VHOST_FEATURES;
879 int rte_vhost_feature_disable(uint64_t feature_mask)
881 VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
885 int rte_vhost_feature_enable(uint64_t feature_mask)
887 if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
888 VHOST_FEATURES = VHOST_FEATURES | feature_mask;
895 * Register ops so that we can add/remove device to data core.
898 rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)