4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <linux/vhost.h>
35 #include <linux/virtio_net.h>
41 #ifdef RTE_LIBRTE_VHOST_NUMA
45 #include <sys/socket.h>
47 #include <rte_ethdev.h>
49 #include <rte_string_fns.h>
50 #include <rte_memory.h>
51 #include <rte_malloc.h>
52 #include <rte_virtio_net.h>
54 #include "vhost-net.h"
55 #include "virtio-net.h"
58 * Device linked list structure for configuration.
60 struct virtio_net_config_ll {
61 struct virtio_net dev; /* Virtio device.*/
62 struct virtio_net_config_ll *next; /* Next dev on linked list.*/
65 /* device ops to add/remove device to/from data core. */
66 struct virtio_net_device_ops const *notify_ops;
67 /* root address of the linked list of managed virtio devices */
68 static struct virtio_net_config_ll *ll_root;
70 #define VHOST_USER_F_PROTOCOL_FEATURES 30
72 /* Features supported by this lib. */
73 #define VHOST_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
74 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
75 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
76 (1ULL << VHOST_F_LOG_ALL) | \
77 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))
78 static uint64_t VHOST_FEATURES = VHOST_SUPPORTED_FEATURES;
82 * Converts QEMU virtual address to Vhost virtual address. This function is
83 * used to convert the ring addresses to our address space.
86 qva_to_vva(struct virtio_net *dev, uint64_t qemu_va)
88 struct virtio_memory_regions *region;
89 uint64_t vhost_va = 0;
90 uint32_t regionidx = 0;
92 /* Find the region where the address lives. */
93 for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
94 region = &dev->mem->regions[regionidx];
95 if ((qemu_va >= region->userspace_address) &&
96 (qemu_va <= region->userspace_address +
97 region->memory_size)) {
98 vhost_va = qemu_va + region->guest_phys_address +
99 region->address_offset -
100 region->userspace_address;
109 * Retrieves an entry from the devices configuration linked list.
111 static struct virtio_net_config_ll *
112 get_config_ll_entry(struct vhost_device_ctx ctx)
114 struct virtio_net_config_ll *ll_dev = ll_root;
116 /* Loop through linked list until the device_fh is found. */
117 while (ll_dev != NULL) {
118 if (ll_dev->dev.device_fh == ctx.fh)
120 ll_dev = ll_dev->next;
127 * Searches the configuration core linked list and
128 * retrieves the device if it exists.
131 get_device(struct vhost_device_ctx ctx)
133 struct virtio_net_config_ll *ll_dev;
135 ll_dev = get_config_ll_entry(ctx);
140 RTE_LOG(ERR, VHOST_CONFIG,
141 "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
146 * Add entry containing a device to the device configuration linked list.
149 add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
151 struct virtio_net_config_ll *ll_dev = ll_root;
153 /* If ll_dev == NULL then this is the first device so go to else */
155 /* If the 1st device_fh != 0 then we insert our device here. */
156 if (ll_dev->dev.device_fh != 0) {
157 new_ll_dev->dev.device_fh = 0;
158 new_ll_dev->next = ll_dev;
159 ll_root = new_ll_dev;
162 * Increment through the ll until we find un unused
163 * device_fh. Insert the device at that entry.
165 while ((ll_dev->next != NULL) &&
166 (ll_dev->dev.device_fh ==
167 (ll_dev->next->dev.device_fh - 1)))
168 ll_dev = ll_dev->next;
170 new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
171 new_ll_dev->next = ll_dev->next;
172 ll_dev->next = new_ll_dev;
175 ll_root = new_ll_dev;
176 ll_root->dev.device_fh = 0;
182 * Unmap any memory, close any file descriptors and
183 * free any memory owned by a device.
186 cleanup_device(struct virtio_net *dev)
188 /* Unmap QEMU memory file if mapped. */
190 munmap((void *)(uintptr_t)dev->mem->mapped_address,
191 (size_t)dev->mem->mapped_size);
195 /* Close any event notifiers opened by device. */
196 if (dev->virtqueue[VIRTIO_RXQ]->callfd >= 0)
197 close(dev->virtqueue[VIRTIO_RXQ]->callfd);
198 if (dev->virtqueue[VIRTIO_RXQ]->kickfd >= 0)
199 close(dev->virtqueue[VIRTIO_RXQ]->kickfd);
200 if (dev->virtqueue[VIRTIO_TXQ]->callfd >= 0)
201 close(dev->virtqueue[VIRTIO_TXQ]->callfd);
202 if (dev->virtqueue[VIRTIO_TXQ]->kickfd >= 0)
203 close(dev->virtqueue[VIRTIO_TXQ]->kickfd);
207 * Release virtqueues and device memory.
210 free_device(struct virtio_net_config_ll *ll_dev)
212 /* Free any malloc'd memory */
213 rte_free(ll_dev->dev.virtqueue[VIRTIO_RXQ]);
214 rte_free(ll_dev->dev.virtqueue[VIRTIO_TXQ]);
219 * Remove an entry from the device configuration linked list.
221 static struct virtio_net_config_ll *
222 rm_config_ll_entry(struct virtio_net_config_ll *ll_dev,
223 struct virtio_net_config_ll *ll_dev_last)
225 /* First remove the device and then clean it up. */
226 if (ll_dev == ll_root) {
227 ll_root = ll_dev->next;
228 cleanup_device(&ll_dev->dev);
232 if (likely(ll_dev_last != NULL)) {
233 ll_dev_last->next = ll_dev->next;
234 cleanup_device(&ll_dev->dev);
236 return ll_dev_last->next;
238 cleanup_device(&ll_dev->dev);
240 RTE_LOG(ERR, VHOST_CONFIG,
241 "Remove entry from config_ll failed\n");
248 * Initialise all variables in device structure.
251 init_device(struct virtio_net *dev)
256 * Virtqueues have already been malloced so
257 * we don't want to set them to NULL.
259 vq_offset = offsetof(struct virtio_net, mem);
261 /* Set everything to 0. */
262 memset((void *)(uintptr_t)((uint64_t)(uintptr_t)dev + vq_offset), 0,
263 (sizeof(struct virtio_net) - (size_t)vq_offset));
264 memset(dev->virtqueue[VIRTIO_RXQ], 0, sizeof(struct vhost_virtqueue));
265 memset(dev->virtqueue[VIRTIO_TXQ], 0, sizeof(struct vhost_virtqueue));
267 dev->virtqueue[VIRTIO_RXQ]->kickfd = -1;
268 dev->virtqueue[VIRTIO_RXQ]->callfd = -1;
269 dev->virtqueue[VIRTIO_TXQ]->kickfd = -1;
270 dev->virtqueue[VIRTIO_TXQ]->callfd = -1;
272 /* Backends are set to -1 indicating an inactive device. */
273 dev->virtqueue[VIRTIO_RXQ]->backend = VIRTIO_DEV_STOPPED;
274 dev->virtqueue[VIRTIO_TXQ]->backend = VIRTIO_DEV_STOPPED;
278 * Function is called from the CUSE open function. The device structure is
279 * initialised and a new entry is added to the device configuration linked
283 new_device(struct vhost_device_ctx ctx)
285 struct virtio_net_config_ll *new_ll_dev;
286 struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
288 /* Setup device and virtqueues. */
289 new_ll_dev = rte_malloc(NULL, sizeof(struct virtio_net_config_ll), 0);
290 if (new_ll_dev == NULL) {
291 RTE_LOG(ERR, VHOST_CONFIG,
292 "(%"PRIu64") Failed to allocate memory for dev.\n",
297 virtqueue_rx = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
298 if (virtqueue_rx == NULL) {
299 rte_free(new_ll_dev);
300 RTE_LOG(ERR, VHOST_CONFIG,
301 "(%"PRIu64") Failed to allocate memory for rxq.\n",
306 virtqueue_tx = rte_malloc(NULL, sizeof(struct vhost_virtqueue), 0);
307 if (virtqueue_tx == NULL) {
308 rte_free(virtqueue_rx);
309 rte_free(new_ll_dev);
310 RTE_LOG(ERR, VHOST_CONFIG,
311 "(%"PRIu64") Failed to allocate memory for txq.\n",
316 new_ll_dev->dev.virtqueue[VIRTIO_RXQ] = virtqueue_rx;
317 new_ll_dev->dev.virtqueue[VIRTIO_TXQ] = virtqueue_tx;
319 /* Initialise device and virtqueues. */
320 init_device(&new_ll_dev->dev);
322 new_ll_dev->next = NULL;
324 /* Add entry to device configuration linked list. */
325 add_config_ll_entry(new_ll_dev);
327 return new_ll_dev->dev.device_fh;
331 * Function is called from the CUSE release function. This function will
332 * cleanup the device and remove it from device configuration linked list.
335 destroy_device(struct vhost_device_ctx ctx)
337 struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
338 struct virtio_net_config_ll *ll_dev_cur = ll_root;
340 /* Find the linked list entry for the device to be removed. */
341 ll_dev_cur_ctx = get_config_ll_entry(ctx);
342 while (ll_dev_cur != NULL) {
344 * If the device is found or
345 * a device that doesn't exist is found then it is removed.
347 if (ll_dev_cur == ll_dev_cur_ctx) {
349 * If the device is running on a data core then call
350 * the function to remove it from the data core.
352 if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
353 notify_ops->destroy_device(&(ll_dev_cur->dev));
354 ll_dev_cur = rm_config_ll_entry(ll_dev_cur,
357 ll_dev_last = ll_dev_cur;
358 ll_dev_cur = ll_dev_cur->next;
364 set_ifname(struct vhost_device_ctx ctx,
365 const char *if_name, unsigned int if_len)
367 struct virtio_net *dev;
370 dev = get_device(ctx);
374 len = if_len > sizeof(dev->ifname) ?
375 sizeof(dev->ifname) : if_len;
377 strncpy(dev->ifname, if_name, len);
382 * Called from CUSE IOCTL: VHOST_SET_OWNER
383 * This function just returns success at the moment unless
384 * the device hasn't been initialised.
387 set_owner(struct vhost_device_ctx ctx)
389 struct virtio_net *dev;
391 dev = get_device(ctx);
399 * Called from CUSE IOCTL: VHOST_RESET_OWNER
402 reset_owner(struct vhost_device_ctx ctx)
404 struct virtio_net *dev;
407 dev = get_device(ctx);
411 device_fh = dev->device_fh;
414 dev->device_fh = device_fh;
419 * Called from CUSE IOCTL: VHOST_GET_FEATURES
420 * The features that we support are requested.
423 get_features(struct vhost_device_ctx ctx, uint64_t *pu)
425 struct virtio_net *dev;
427 dev = get_device(ctx);
431 /* Send our supported features. */
432 *pu = VHOST_FEATURES;
437 * Called from CUSE IOCTL: VHOST_SET_FEATURES
438 * We receive the negotiated features supported by us and the virtio device.
441 set_features(struct vhost_device_ctx ctx, uint64_t *pu)
443 struct virtio_net *dev;
445 dev = get_device(ctx);
448 if (*pu & ~VHOST_FEATURES)
451 /* Store the negotiated feature list for the device. */
454 /* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
455 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
456 LOG_DEBUG(VHOST_CONFIG,
457 "(%"PRIu64") Mergeable RX buffers enabled\n",
459 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
460 sizeof(struct virtio_net_hdr_mrg_rxbuf);
461 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
462 sizeof(struct virtio_net_hdr_mrg_rxbuf);
464 LOG_DEBUG(VHOST_CONFIG,
465 "(%"PRIu64") Mergeable RX buffers disabled\n",
467 dev->virtqueue[VIRTIO_RXQ]->vhost_hlen =
468 sizeof(struct virtio_net_hdr);
469 dev->virtqueue[VIRTIO_TXQ]->vhost_hlen =
470 sizeof(struct virtio_net_hdr);
476 * Called from CUSE IOCTL: VHOST_SET_VRING_NUM
477 * The virtio device sends us the size of the descriptor ring.
480 set_vring_num(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
482 struct virtio_net *dev;
484 dev = get_device(ctx);
488 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
489 dev->virtqueue[state->index]->size = state->num;
495 * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
496 * same numa node as the memory of vring descriptor.
498 #ifdef RTE_LIBRTE_VHOST_NUMA
499 static struct virtio_net*
500 numa_realloc(struct virtio_net *dev, int index)
502 int oldnode, newnode;
503 struct virtio_net_config_ll *old_ll_dev, *new_ll_dev = NULL;
504 struct vhost_virtqueue *old_vq, *new_vq = NULL;
506 int realloc_dev = 0, realloc_vq = 0;
508 old_ll_dev = (struct virtio_net_config_ll *)dev;
509 old_vq = dev->virtqueue[index];
511 ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
512 MPOL_F_NODE | MPOL_F_ADDR);
513 ret = ret | get_mempolicy(&oldnode, NULL, 0, old_ll_dev,
514 MPOL_F_NODE | MPOL_F_ADDR);
516 RTE_LOG(ERR, VHOST_CONFIG,
517 "Unable to get vring desc or dev numa information.\n");
520 if (oldnode != newnode)
523 ret = get_mempolicy(&oldnode, NULL, 0, old_vq,
524 MPOL_F_NODE | MPOL_F_ADDR);
526 RTE_LOG(ERR, VHOST_CONFIG,
527 "Unable to get vq numa information.\n");
530 if (oldnode != newnode)
533 if (realloc_dev == 0 && realloc_vq == 0)
537 new_ll_dev = rte_malloc_socket(NULL,
538 sizeof(struct virtio_net_config_ll), 0, newnode);
540 new_vq = rte_malloc_socket(NULL,
541 sizeof(struct vhost_virtqueue), 0, newnode);
542 if (!new_ll_dev && !new_vq)
546 memcpy(new_vq, old_vq, sizeof(*new_vq));
548 memcpy(new_ll_dev, old_ll_dev, sizeof(*new_ll_dev));
549 (new_ll_dev ? new_ll_dev : old_ll_dev)->dev.virtqueue[index] =
550 new_vq ? new_vq : old_vq;
554 if (ll_root == old_ll_dev)
555 ll_root = new_ll_dev;
557 struct virtio_net_config_ll *prev = ll_root;
558 while (prev->next != old_ll_dev)
560 prev->next = new_ll_dev;
561 new_ll_dev->next = old_ll_dev->next;
563 rte_free(old_ll_dev);
566 return realloc_dev ? &new_ll_dev->dev : dev;
569 static struct virtio_net*
570 numa_realloc(struct virtio_net *dev, int index __rte_unused)
577 * Called from CUSE IOCTL: VHOST_SET_VRING_ADDR
578 * The virtio device sends us the desc, used and avail ring addresses.
579 * This function then converts these to our address space.
582 set_vring_addr(struct vhost_device_ctx ctx, struct vhost_vring_addr *addr)
584 struct virtio_net *dev;
585 struct vhost_virtqueue *vq;
587 dev = get_device(ctx);
591 /* addr->index refers to the queue index. The txq 1, rxq is 0. */
592 vq = dev->virtqueue[addr->index];
594 /* The addresses are converted from QEMU virtual to Vhost virtual. */
595 vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
596 addr->desc_user_addr);
598 RTE_LOG(ERR, VHOST_CONFIG,
599 "(%"PRIu64") Failed to find desc ring address.\n",
604 dev = numa_realloc(dev, addr->index);
605 vq = dev->virtqueue[addr->index];
607 vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
608 addr->avail_user_addr);
609 if (vq->avail == 0) {
610 RTE_LOG(ERR, VHOST_CONFIG,
611 "(%"PRIu64") Failed to find avail ring address.\n",
616 vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
617 addr->used_user_addr);
619 RTE_LOG(ERR, VHOST_CONFIG,
620 "(%"PRIu64") Failed to find used ring address.\n",
625 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n",
626 dev->device_fh, vq->desc);
627 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n",
628 dev->device_fh, vq->avail);
629 LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n",
630 dev->device_fh, vq->used);
636 * Called from CUSE IOCTL: VHOST_SET_VRING_BASE
637 * The virtio device sends us the available ring last used index.
640 set_vring_base(struct vhost_device_ctx ctx, struct vhost_vring_state *state)
642 struct virtio_net *dev;
644 dev = get_device(ctx);
648 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
649 dev->virtqueue[state->index]->last_used_idx = state->num;
650 dev->virtqueue[state->index]->last_used_idx_res = state->num;
656 * Called from CUSE IOCTL: VHOST_GET_VRING_BASE
657 * We send the virtio device our available ring last used index.
660 get_vring_base(struct vhost_device_ctx ctx, uint32_t index,
661 struct vhost_vring_state *state)
663 struct virtio_net *dev;
665 dev = get_device(ctx);
669 state->index = index;
670 /* State->index refers to the queue index. The txq is 1, rxq is 0. */
671 state->num = dev->virtqueue[state->index]->last_used_idx;
678 * Called from CUSE IOCTL: VHOST_SET_VRING_CALL
679 * The virtio device sends an eventfd to interrupt the guest. This fd gets
680 * copied into our process space.
683 set_vring_call(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
685 struct virtio_net *dev;
686 struct vhost_virtqueue *vq;
688 dev = get_device(ctx);
692 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
693 vq = dev->virtqueue[file->index];
698 vq->callfd = file->fd;
704 * Called from CUSE IOCTL: VHOST_SET_VRING_KICK
705 * The virtio device sends an eventfd that it can use to notify us.
706 * This fd gets copied into our process space.
709 set_vring_kick(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
711 struct virtio_net *dev;
712 struct vhost_virtqueue *vq;
714 dev = get_device(ctx);
718 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
719 vq = dev->virtqueue[file->index];
724 vq->kickfd = file->fd;
730 * Called from CUSE IOCTL: VHOST_NET_SET_BACKEND
731 * To complete device initialisation when the virtio driver is loaded,
732 * we are provided with a valid fd for a tap device (not used by us).
733 * If this happens then we can add the device to a data core.
734 * When the virtio driver is removed we get fd=-1.
735 * At that point we remove the device from the data core.
736 * The device will still exist in the device configuration linked list.
739 set_backend(struct vhost_device_ctx ctx, struct vhost_vring_file *file)
741 struct virtio_net *dev;
743 dev = get_device(ctx);
747 /* file->index refers to the queue index. The txq is 1, rxq is 0. */
748 dev->virtqueue[file->index]->backend = file->fd;
751 * If the device isn't already running and both backend fds are set,
754 if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
755 if (((int)dev->virtqueue[VIRTIO_TXQ]->backend != VIRTIO_DEV_STOPPED) &&
756 ((int)dev->virtqueue[VIRTIO_RXQ]->backend != VIRTIO_DEV_STOPPED)) {
757 return notify_ops->new_device(dev);
759 /* Otherwise we remove it. */
761 if (file->fd == VIRTIO_DEV_STOPPED)
762 notify_ops->destroy_device(dev);
767 * Function pointers are set for the device operations to allow CUSE to call
768 * functions when an IOCTL, device_add or device_release is received.
770 static const struct vhost_net_device_ops vhost_device_ops = {
771 .new_device = new_device,
772 .destroy_device = destroy_device,
774 .set_ifname = set_ifname,
776 .get_features = get_features,
777 .set_features = set_features,
779 .set_vring_num = set_vring_num,
780 .set_vring_addr = set_vring_addr,
781 .set_vring_base = set_vring_base,
782 .get_vring_base = get_vring_base,
784 .set_vring_kick = set_vring_kick,
785 .set_vring_call = set_vring_call,
787 .set_backend = set_backend,
789 .set_owner = set_owner,
790 .reset_owner = reset_owner,
794 * Called by main to setup callbacks when registering CUSE device.
796 struct vhost_net_device_ops const *
797 get_virtio_net_callbacks(void)
799 return &vhost_device_ops;
802 int rte_vhost_enable_guest_notification(struct virtio_net *dev,
803 uint16_t queue_id, int enable)
806 RTE_LOG(ERR, VHOST_CONFIG,
807 "guest notification isn't supported.\n");
811 dev->virtqueue[queue_id]->used->flags =
812 enable ? 0 : VRING_USED_F_NO_NOTIFY;
816 uint64_t rte_vhost_feature_get(void)
818 return VHOST_FEATURES;
821 int rte_vhost_feature_disable(uint64_t feature_mask)
823 VHOST_FEATURES = VHOST_FEATURES & ~feature_mask;
827 int rte_vhost_feature_enable(uint64_t feature_mask)
829 if ((feature_mask & VHOST_SUPPORTED_FEATURES) == feature_mask) {
830 VHOST_FEATURES = VHOST_FEATURES | feature_mask;
837 * Register ops so that we can add/remove device to data core.
840 rte_vhost_driver_callback_register(struct virtio_net_device_ops const * const ops)