4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifdef RTE_EXEC_ENV_LINUXAPP
40 #include "virtio_pci.h"
41 #include "virtio_logs.h"
42 #include "virtqueue.h"
45 * Following macros are derived from linux/pci_regs.h, however,
46 * we can't simply include that header here, as there is no such
47 * file for non-Linux platform.
49 #define PCI_CAPABILITY_LIST 0x34
50 #define PCI_CAP_ID_VNDR 0x09
53 legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
54 void *dst, int length)
60 off = VIRTIO_PCI_CONFIG(hw) + offset;
61 for (d = dst; length > 0; d += size, off += size, length -= size) {
64 *(uint32_t *)d = VIRTIO_READ_REG_4(hw, off);
65 } else if (length >= 2) {
67 *(uint16_t *)d = VIRTIO_READ_REG_2(hw, off);
70 *d = VIRTIO_READ_REG_1(hw, off);
76 legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
77 const void *src, int length)
83 off = VIRTIO_PCI_CONFIG(hw) + offset;
84 for (s = src; length > 0; s += size, off += size, length -= size) {
87 VIRTIO_WRITE_REG_4(hw, off, *(const uint32_t *)s);
88 } else if (length >= 2) {
90 VIRTIO_WRITE_REG_2(hw, off, *(const uint16_t *)s);
93 VIRTIO_WRITE_REG_1(hw, off, *s);
99 legacy_get_features(struct virtio_hw *hw)
101 return VIRTIO_READ_REG_4(hw, VIRTIO_PCI_HOST_FEATURES);
105 legacy_set_features(struct virtio_hw *hw, uint64_t features)
107 if ((features >> 32) != 0) {
109 "only 32 bit features are allowed for legacy virtio!");
112 VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_GUEST_FEATURES, features);
116 legacy_get_status(struct virtio_hw *hw)
118 return VIRTIO_READ_REG_1(hw, VIRTIO_PCI_STATUS);
122 legacy_set_status(struct virtio_hw *hw, uint8_t status)
124 VIRTIO_WRITE_REG_1(hw, VIRTIO_PCI_STATUS, status);
128 legacy_reset(struct virtio_hw *hw)
130 legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
134 legacy_get_isr(struct virtio_hw *hw)
136 return VIRTIO_READ_REG_1(hw, VIRTIO_PCI_ISR);
139 /* Enable one vector (0) for Link State Intrerrupt */
141 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
143 VIRTIO_WRITE_REG_2(hw, VIRTIO_MSI_CONFIG_VECTOR, vec);
144 return VIRTIO_READ_REG_2(hw, VIRTIO_MSI_CONFIG_VECTOR);
148 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
150 VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, queue_id);
151 return VIRTIO_READ_REG_2(hw, VIRTIO_PCI_QUEUE_NUM);
155 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
157 VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vq->vq_queue_index);
159 VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN,
160 vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT);
164 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
166 VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_SEL, vq->vq_queue_index);
168 VIRTIO_WRITE_REG_4(hw, VIRTIO_PCI_QUEUE_PFN, 0);
172 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
174 VIRTIO_WRITE_REG_2(hw, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index);
177 #ifdef RTE_EXEC_ENV_LINUXAPP
179 parse_sysfs_value(const char *filename, unsigned long *val)
185 f = fopen(filename, "r");
187 PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s",
192 if (fgets(buf, sizeof(buf), f) == NULL) {
193 PMD_INIT_LOG(ERR, "%s(): cannot read sysfs value %s",
198 *val = strtoul(buf, &end, 0);
199 if ((buf[0] == '\0') || (end == NULL) || (*end != '\n')) {
200 PMD_INIT_LOG(ERR, "%s(): cannot parse sysfs value %s",
210 get_uio_dev(struct rte_pci_addr *loc, char *buf, unsigned int buflen,
211 unsigned int *uio_num)
215 char dirname[PATH_MAX];
218 * depending on kernel version, uio can be located in uio/uioX
221 snprintf(dirname, sizeof(dirname),
222 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
223 loc->domain, loc->bus, loc->devid, loc->function);
224 dir = opendir(dirname);
226 /* retry with the parent directory */
227 snprintf(dirname, sizeof(dirname),
228 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
229 loc->domain, loc->bus, loc->devid, loc->function);
230 dir = opendir(dirname);
233 PMD_INIT_LOG(ERR, "Cannot opendir %s", dirname);
238 /* take the first file starting with "uio" */
239 while ((e = readdir(dir)) != NULL) {
240 /* format could be uio%d ...*/
241 int shortprefix_len = sizeof("uio") - 1;
242 /* ... or uio:uio%d */
243 int longprefix_len = sizeof("uio:uio") - 1;
246 if (strncmp(e->d_name, "uio", 3) != 0)
249 /* first try uio%d */
251 *uio_num = strtoull(e->d_name + shortprefix_len, &endptr, 10);
252 if (errno == 0 && endptr != (e->d_name + shortprefix_len)) {
253 snprintf(buf, buflen, "%s/uio%u", dirname, *uio_num);
257 /* then try uio:uio%d */
259 *uio_num = strtoull(e->d_name + longprefix_len, &endptr, 10);
260 if (errno == 0 && endptr != (e->d_name + longprefix_len)) {
261 snprintf(buf, buflen, "%s/uio:uio%u", dirname,
268 /* No uio resource found */
270 PMD_INIT_LOG(ERR, "Could not find uio resource");
278 legacy_virtio_has_msix(const struct rte_pci_addr *loc)
281 char dirname[PATH_MAX];
283 snprintf(dirname, sizeof(dirname),
284 SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs",
285 loc->domain, loc->bus, loc->devid, loc->function);
287 d = opendir(dirname);
294 /* Extract I/O port numbers from sysfs */
296 virtio_resource_init_by_uio(struct rte_pci_device *pci_dev)
298 char dirname[PATH_MAX];
299 char filename[PATH_MAX];
300 unsigned long start, size;
301 unsigned int uio_num;
303 if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname), &uio_num) < 0)
306 /* get portio size */
307 snprintf(filename, sizeof(filename),
308 "%s/portio/port0/size", dirname);
309 if (parse_sysfs_value(filename, &size) < 0) {
310 PMD_INIT_LOG(ERR, "%s(): cannot parse size",
315 /* get portio start */
316 snprintf(filename, sizeof(filename),
317 "%s/portio/port0/start", dirname);
318 if (parse_sysfs_value(filename, &start) < 0) {
319 PMD_INIT_LOG(ERR, "%s(): cannot parse portio start",
323 pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
324 pci_dev->mem_resource[0].len = (uint64_t)size;
326 "PCI Port IO found start=0x%lx with size=0x%lx",
330 memset(dirname, 0, sizeof(dirname));
331 snprintf(dirname, sizeof(dirname), "/dev/uio%u", uio_num);
332 pci_dev->intr_handle.fd = open(dirname, O_RDWR);
333 if (pci_dev->intr_handle.fd < 0) {
334 PMD_INIT_LOG(ERR, "Cannot open %s: %s\n",
335 dirname, strerror(errno));
339 pci_dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
340 pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
345 /* Extract port I/O numbers from proc/ioports */
347 virtio_resource_init_by_ioports(struct rte_pci_device *pci_dev)
357 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
358 pci_dev->addr.domain,
361 pci_dev->addr.function);
363 fp = fopen("/proc/ioports", "r");
365 PMD_INIT_LOG(ERR, "%s(): can't open ioports", __func__);
369 while (getdelim(&line, &linesz, '\n', fp) > 0) {
374 n = strcspn(ptr, ":");
378 while (*left && isspace(*left))
381 if (!strncmp(left, pci_id, strlen(pci_id))) {
384 while (*ptr && isspace(*ptr))
387 sscanf(ptr, "%04hx-%04hx", &start, &end);
388 size = end - start + 1;
400 pci_dev->mem_resource[0].addr = (void *)(uintptr_t)(uint32_t)start;
401 pci_dev->mem_resource[0].len = (uint64_t)size;
403 "PCI Port IO found start=0x%x with size=0x%x",
406 /* can't support lsc interrupt without uio */
407 pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC;
412 /* Extract I/O port numbers from sysfs */
414 legacy_virtio_resource_init(struct rte_pci_device *pci_dev)
416 if (virtio_resource_init_by_uio(pci_dev) == 0)
419 return virtio_resource_init_by_ioports(pci_dev);
424 legayc_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
426 /* nic_uio does not enable interrupts, return 0 (false). */
431 legacy_virtio_resource_init(struct rte_pci_device *pci_dev __rte_unused)
433 /* no setup required */
438 static const struct virtio_pci_ops legacy_ops = {
439 .read_dev_cfg = legacy_read_dev_config,
440 .write_dev_cfg = legacy_write_dev_config,
441 .reset = legacy_reset,
442 .get_status = legacy_get_status,
443 .set_status = legacy_set_status,
444 .get_features = legacy_get_features,
445 .set_features = legacy_set_features,
446 .get_isr = legacy_get_isr,
447 .set_config_irq = legacy_set_config_irq,
448 .get_queue_num = legacy_get_queue_num,
449 .setup_queue = legacy_setup_queue,
450 .del_queue = legacy_del_queue,
451 .notify_queue = legacy_notify_queue,
455 static inline uint8_t
456 io_read8(uint8_t *addr)
458 return *(volatile uint8_t *)addr;
462 io_write8(uint8_t val, uint8_t *addr)
464 *(volatile uint8_t *)addr = val;
467 static inline uint16_t
468 io_read16(uint16_t *addr)
470 return *(volatile uint16_t *)addr;
474 io_write16(uint16_t val, uint16_t *addr)
476 *(volatile uint16_t *)addr = val;
479 static inline uint32_t
480 io_read32(uint32_t *addr)
482 return *(volatile uint32_t *)addr;
486 io_write32(uint32_t val, uint32_t *addr)
488 *(volatile uint32_t *)addr = val;
492 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
494 io_write32(val & ((1ULL << 32) - 1), lo);
495 io_write32(val >> 32, hi);
499 modern_read_dev_config(struct virtio_hw *hw, size_t offset,
500 void *dst, int length)
504 uint8_t old_gen, new_gen;
507 old_gen = io_read8(&hw->common_cfg->config_generation);
510 for (i = 0; i < length; i++)
511 *p++ = io_read8((uint8_t *)hw->dev_cfg + offset + i);
513 new_gen = io_read8(&hw->common_cfg->config_generation);
514 } while (old_gen != new_gen);
518 modern_write_dev_config(struct virtio_hw *hw, size_t offset,
519 const void *src, int length)
522 const uint8_t *p = src;
524 for (i = 0; i < length; i++)
525 io_write8(*p++, (uint8_t *)hw->dev_cfg + offset + i);
529 modern_get_features(struct virtio_hw *hw)
531 uint32_t features_lo, features_hi;
533 io_write32(0, &hw->common_cfg->device_feature_select);
534 features_lo = io_read32(&hw->common_cfg->device_feature);
536 io_write32(1, &hw->common_cfg->device_feature_select);
537 features_hi = io_read32(&hw->common_cfg->device_feature);
539 return ((uint64_t)features_hi << 32) | features_lo;
543 modern_set_features(struct virtio_hw *hw, uint64_t features)
545 io_write32(0, &hw->common_cfg->guest_feature_select);
546 io_write32(features & ((1ULL << 32) - 1),
547 &hw->common_cfg->guest_feature);
549 io_write32(1, &hw->common_cfg->guest_feature_select);
550 io_write32(features >> 32,
551 &hw->common_cfg->guest_feature);
555 modern_get_status(struct virtio_hw *hw)
557 return io_read8(&hw->common_cfg->device_status);
561 modern_set_status(struct virtio_hw *hw, uint8_t status)
563 io_write8(status, &hw->common_cfg->device_status);
567 modern_reset(struct virtio_hw *hw)
569 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
570 modern_get_status(hw);
574 modern_get_isr(struct virtio_hw *hw)
576 return io_read8(hw->isr);
580 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
582 io_write16(vec, &hw->common_cfg->msix_config);
583 return io_read16(&hw->common_cfg->msix_config);
587 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
589 io_write16(queue_id, &hw->common_cfg->queue_select);
590 return io_read16(&hw->common_cfg->queue_size);
594 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
596 uint64_t desc_addr, avail_addr, used_addr;
599 desc_addr = vq->mz->phys_addr;
600 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
601 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
602 ring[vq->vq_nentries]),
603 VIRTIO_PCI_VRING_ALIGN);
605 io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
607 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
608 &hw->common_cfg->queue_desc_hi);
609 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
610 &hw->common_cfg->queue_avail_hi);
611 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
612 &hw->common_cfg->queue_used_hi);
614 notify_off = io_read16(&hw->common_cfg->queue_notify_off);
615 vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
616 notify_off * hw->notify_off_multiplier);
618 io_write16(1, &hw->common_cfg->queue_enable);
620 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
621 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
622 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
623 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
624 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
625 vq->notify_addr, notify_off);
629 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
631 io_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
633 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
634 &hw->common_cfg->queue_desc_hi);
635 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
636 &hw->common_cfg->queue_avail_hi);
637 io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
638 &hw->common_cfg->queue_used_hi);
640 io_write16(0, &hw->common_cfg->queue_enable);
644 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
646 io_write16(1, vq->notify_addr);
649 static const struct virtio_pci_ops modern_ops = {
650 .read_dev_cfg = modern_read_dev_config,
651 .write_dev_cfg = modern_write_dev_config,
652 .reset = modern_reset,
653 .get_status = modern_get_status,
654 .set_status = modern_set_status,
655 .get_features = modern_get_features,
656 .set_features = modern_set_features,
657 .get_isr = modern_get_isr,
658 .set_config_irq = modern_set_config_irq,
659 .get_queue_num = modern_get_queue_num,
660 .setup_queue = modern_setup_queue,
661 .del_queue = modern_del_queue,
662 .notify_queue = modern_notify_queue,
667 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
668 void *dst, int length)
670 hw->vtpci_ops->read_dev_cfg(hw, offset, dst, length);
674 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
675 const void *src, int length)
677 hw->vtpci_ops->write_dev_cfg(hw, offset, src, length);
681 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
686 * Limit negotiated features to what the driver, virtqueue, and
689 features = host_features & hw->guest_features;
690 hw->vtpci_ops->set_features(hw, features);
696 vtpci_reset(struct virtio_hw *hw)
698 hw->vtpci_ops->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
699 /* flush status write */
700 hw->vtpci_ops->get_status(hw);
704 vtpci_reinit_complete(struct virtio_hw *hw)
706 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
710 vtpci_set_status(struct virtio_hw *hw, uint8_t status)
712 if (status != VIRTIO_CONFIG_STATUS_RESET)
713 status |= hw->vtpci_ops->get_status(hw);
715 hw->vtpci_ops->set_status(hw, status);
719 vtpci_get_status(struct virtio_hw *hw)
721 return hw->vtpci_ops->get_status(hw);
725 vtpci_isr(struct virtio_hw *hw)
727 return hw->vtpci_ops->get_isr(hw);
731 /* Enable one vector (0) for Link State Intrerrupt */
733 vtpci_irq_config(struct virtio_hw *hw, uint16_t vec)
735 return hw->vtpci_ops->set_config_irq(hw, vec);
739 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
741 uint8_t bar = cap->bar;
742 uint32_t length = cap->length;
743 uint32_t offset = cap->offset;
747 PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
751 if (offset + length < offset) {
752 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
757 if (offset + length > dev->mem_resource[bar].len) {
759 "invalid cap: overflows bar space: %u > %" PRIu64,
760 offset + length, dev->mem_resource[bar].len);
764 base = dev->mem_resource[bar].addr;
766 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
770 return base + offset;
774 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
777 struct virtio_pci_cap cap;
780 if (rte_eal_pci_map_device(dev) < 0) {
781 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
785 ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
787 PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
792 ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos);
795 "failed to read pci cap at pos: %x", pos);
799 if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
801 "[%2x] skipping non VNDR cap id: %02x",
807 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
808 pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
810 switch (cap.cfg_type) {
811 case VIRTIO_PCI_CAP_COMMON_CFG:
812 hw->common_cfg = get_cfg_addr(dev, &cap);
814 case VIRTIO_PCI_CAP_NOTIFY_CFG:
815 rte_eal_pci_read_config(dev, &hw->notify_off_multiplier,
816 4, pos + sizeof(cap));
817 hw->notify_base = get_cfg_addr(dev, &cap);
819 case VIRTIO_PCI_CAP_DEVICE_CFG:
820 hw->dev_cfg = get_cfg_addr(dev, &cap);
822 case VIRTIO_PCI_CAP_ISR_CFG:
823 hw->isr = get_cfg_addr(dev, &cap);
831 if (hw->common_cfg == NULL || hw->notify_base == NULL ||
832 hw->dev_cfg == NULL || hw->isr == NULL) {
833 PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
837 PMD_INIT_LOG(INFO, "found modern virtio pci device.");
839 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg);
840 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg);
841 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr);
842 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
843 hw->notify_base, hw->notify_off_multiplier);
849 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw)
854 * Try if we can succeed reading virtio pci caps, which exists
855 * only on modern pci device. If failed, we fallback to legacy
858 if (virtio_read_caps(dev, hw) == 0) {
859 PMD_INIT_LOG(INFO, "modern virtio pci detected.");
860 hw->vtpci_ops = &modern_ops;
862 dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC;
866 PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
867 if (legacy_virtio_resource_init(dev) < 0)
870 hw->vtpci_ops = &legacy_ops;
871 hw->use_msix = legacy_virtio_has_msix(&dev->addr);
872 hw->io_base = (uint32_t)(uintptr_t)dev->mem_resource[0].addr;