1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
6 #ifdef RTE_EXEC_ENV_LINUX
14 #include "virtio_pci.h"
15 #include "virtio_logs.h"
16 #include "virtqueue.h"
19 * Following macros are derived from linux/pci_regs.h, however,
20 * we can't simply include that header here, as there is no such
21 * file for non-Linux platform.
23 #define PCI_CAPABILITY_LIST 0x34
24 #define PCI_CAP_ID_VNDR 0x09
25 #define PCI_CAP_ID_MSIX 0x11
28 * The remaining space is defined by each driver as the per-driver
29 * configuration space.
31 #define VIRTIO_PCI_CONFIG(dev) \
32 (((dev)->msix_status == VIRTIO_MSIX_ENABLED) ? 24 : 20)
35 struct virtio_pci_internal {
36 struct rte_pci_ioport io;
39 #define VTPCI_IO(hw) (&virtio_pci_internal[(hw)->port_id].io)
41 struct virtio_pci_internal virtio_pci_internal[RTE_MAX_ETHPORTS];
44 check_vq_phys_addr_ok(struct virtqueue *vq)
46 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
47 * and only accepts 32 bit page frame number.
48 * Check if the allocated physical memory exceeds 16TB.
50 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
51 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
52 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
59 #define PCI_MSIX_ENABLE 0x8000
61 static enum virtio_msix_status
62 vtpci_msix_detect(struct rte_pci_device *dev)
67 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
70 "failed to read pci capability list, ret %d", ret);
71 return VIRTIO_MSIX_NONE;
77 ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);
78 if (ret != sizeof(cap)) {
80 "failed to read pci cap at pos: %x ret %d",
85 if (cap[0] == PCI_CAP_ID_MSIX) {
88 ret = rte_pci_read_config(dev, &flags, sizeof(flags),
90 if (ret != sizeof(flags)) {
92 "failed to read pci cap at pos:"
93 " %x ret %d", pos + 2, ret);
97 if (flags & PCI_MSIX_ENABLE)
98 return VIRTIO_MSIX_ENABLED;
100 return VIRTIO_MSIX_DISABLED;
106 return VIRTIO_MSIX_NONE;
110 * Since we are in legacy mode:
111 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
113 * "Note that this is possible because while the virtio header is PCI (i.e.
114 * little) endian, the device-specific region is encoded in the native endian of
115 * the guest (where such distinction is applicable)."
117 * For powerpc which supports both, qemu supposes that cpu is big endian and
118 * enforces this for the virtio-net stuff.
121 legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
122 void *dst, int length)
124 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
125 #ifdef RTE_ARCH_PPC_64
131 rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
132 VIRTIO_PCI_CONFIG(dev) + offset);
133 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
134 } else if (length >= 2) {
136 rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
137 VIRTIO_PCI_CONFIG(dev) + offset);
138 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
141 rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
142 VIRTIO_PCI_CONFIG(dev) + offset);
145 dst = (char *)dst + size;
150 rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
151 VIRTIO_PCI_CONFIG(dev) + offset);
156 legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
157 const void *src, int length)
159 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
160 #ifdef RTE_ARCH_PPC_64
170 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
171 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
172 VIRTIO_PCI_CONFIG(dev) + offset);
173 } else if (length >= 2) {
175 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
176 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
177 VIRTIO_PCI_CONFIG(dev) + offset);
180 rte_pci_ioport_write(VTPCI_IO(hw), src, size,
181 VIRTIO_PCI_CONFIG(dev) + offset);
184 src = (const char *)src + size;
189 rte_pci_ioport_write(VTPCI_IO(hw), src, length,
190 VIRTIO_PCI_CONFIG(dev) + offset);
195 legacy_get_features(struct virtio_hw *hw)
199 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
204 legacy_set_features(struct virtio_hw *hw, uint64_t features)
206 if ((features >> 32) != 0) {
208 "only 32 bit features are allowed for legacy virtio!");
211 rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
212 VIRTIO_PCI_GUEST_FEATURES);
216 legacy_features_ok(struct virtio_hw *hw __rte_unused)
222 legacy_get_status(struct virtio_hw *hw)
226 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
231 legacy_set_status(struct virtio_hw *hw, uint8_t status)
233 rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
237 legacy_get_isr(struct virtio_hw *hw)
241 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
245 /* Enable one vector (0) for Link State Intrerrupt */
247 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
251 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
252 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
257 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
261 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
262 VIRTIO_PCI_QUEUE_SEL);
263 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
264 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
269 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
273 rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
274 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
279 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
283 if (!check_vq_phys_addr_ok(vq))
286 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
287 VIRTIO_PCI_QUEUE_SEL);
288 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
289 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
295 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
299 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
300 VIRTIO_PCI_QUEUE_SEL);
301 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
305 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
307 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
308 VIRTIO_PCI_QUEUE_NOTIFY);
312 legacy_intr_detect(struct virtio_hw *hw)
314 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
316 dev->msix_status = vtpci_msix_detect(dev->pci_dev);
317 hw->intr_lsc = !!dev->msix_status;
321 legacy_dev_close(struct virtio_hw *hw)
323 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
325 rte_pci_unmap_device(dev->pci_dev);
326 rte_pci_ioport_unmap(VTPCI_IO(hw));
331 const struct virtio_ops legacy_ops = {
332 .read_dev_cfg = legacy_read_dev_config,
333 .write_dev_cfg = legacy_write_dev_config,
334 .get_status = legacy_get_status,
335 .set_status = legacy_set_status,
336 .get_features = legacy_get_features,
337 .set_features = legacy_set_features,
338 .features_ok = legacy_features_ok,
339 .get_isr = legacy_get_isr,
340 .set_config_irq = legacy_set_config_irq,
341 .set_queue_irq = legacy_set_queue_irq,
342 .get_queue_num = legacy_get_queue_num,
343 .setup_queue = legacy_setup_queue,
344 .del_queue = legacy_del_queue,
345 .notify_queue = legacy_notify_queue,
346 .intr_detect = legacy_intr_detect,
347 .dev_close = legacy_dev_close,
351 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
353 rte_write32(val & ((1ULL << 32) - 1), lo);
354 rte_write32(val >> 32, hi);
358 modern_read_dev_config(struct virtio_hw *hw, size_t offset,
359 void *dst, int length)
361 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
364 uint8_t old_gen, new_gen;
367 old_gen = rte_read8(&dev->common_cfg->config_generation);
370 for (i = 0; i < length; i++)
371 *p++ = rte_read8((uint8_t *)dev->dev_cfg + offset + i);
373 new_gen = rte_read8(&dev->common_cfg->config_generation);
374 } while (old_gen != new_gen);
378 modern_write_dev_config(struct virtio_hw *hw, size_t offset,
379 const void *src, int length)
381 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
383 const uint8_t *p = src;
385 for (i = 0; i < length; i++)
386 rte_write8((*p++), (((uint8_t *)dev->dev_cfg) + offset + i));
390 modern_get_features(struct virtio_hw *hw)
392 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
393 uint32_t features_lo, features_hi;
395 rte_write32(0, &dev->common_cfg->device_feature_select);
396 features_lo = rte_read32(&dev->common_cfg->device_feature);
398 rte_write32(1, &dev->common_cfg->device_feature_select);
399 features_hi = rte_read32(&dev->common_cfg->device_feature);
401 return ((uint64_t)features_hi << 32) | features_lo;
405 modern_set_features(struct virtio_hw *hw, uint64_t features)
407 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
409 rte_write32(0, &dev->common_cfg->guest_feature_select);
410 rte_write32(features & ((1ULL << 32) - 1),
411 &dev->common_cfg->guest_feature);
413 rte_write32(1, &dev->common_cfg->guest_feature_select);
414 rte_write32(features >> 32,
415 &dev->common_cfg->guest_feature);
419 modern_features_ok(struct virtio_hw *hw)
421 if (!virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
422 PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
430 modern_get_status(struct virtio_hw *hw)
432 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
434 return rte_read8(&dev->common_cfg->device_status);
438 modern_set_status(struct virtio_hw *hw, uint8_t status)
440 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
442 rte_write8(status, &dev->common_cfg->device_status);
446 modern_get_isr(struct virtio_hw *hw)
448 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
450 return rte_read8(dev->isr);
454 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
456 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
458 rte_write16(vec, &dev->common_cfg->msix_config);
459 return rte_read16(&dev->common_cfg->msix_config);
463 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
465 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
467 rte_write16(vq->vq_queue_index, &dev->common_cfg->queue_select);
468 rte_write16(vec, &dev->common_cfg->queue_msix_vector);
469 return rte_read16(&dev->common_cfg->queue_msix_vector);
473 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
475 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
477 rte_write16(queue_id, &dev->common_cfg->queue_select);
478 return rte_read16(&dev->common_cfg->queue_size);
482 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
484 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
485 uint64_t desc_addr, avail_addr, used_addr;
488 if (!check_vq_phys_addr_ok(vq))
491 desc_addr = vq->vq_ring_mem;
492 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
493 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
494 ring[vq->vq_nentries]),
497 rte_write16(vq->vq_queue_index, &dev->common_cfg->queue_select);
499 io_write64_twopart(desc_addr, &dev->common_cfg->queue_desc_lo,
500 &dev->common_cfg->queue_desc_hi);
501 io_write64_twopart(avail_addr, &dev->common_cfg->queue_avail_lo,
502 &dev->common_cfg->queue_avail_hi);
503 io_write64_twopart(used_addr, &dev->common_cfg->queue_used_lo,
504 &dev->common_cfg->queue_used_hi);
506 notify_off = rte_read16(&dev->common_cfg->queue_notify_off);
507 vq->notify_addr = (void *)((uint8_t *)dev->notify_base +
508 notify_off * dev->notify_off_multiplier);
510 rte_write16(1, &dev->common_cfg->queue_enable);
512 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
513 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
514 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
515 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
516 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
517 vq->notify_addr, notify_off);
523 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
525 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
527 rte_write16(vq->vq_queue_index, &dev->common_cfg->queue_select);
529 io_write64_twopart(0, &dev->common_cfg->queue_desc_lo,
530 &dev->common_cfg->queue_desc_hi);
531 io_write64_twopart(0, &dev->common_cfg->queue_avail_lo,
532 &dev->common_cfg->queue_avail_hi);
533 io_write64_twopart(0, &dev->common_cfg->queue_used_lo,
534 &dev->common_cfg->queue_used_hi);
536 rte_write16(0, &dev->common_cfg->queue_enable);
540 modern_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
542 uint32_t notify_data;
544 if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
545 rte_write16(vq->vq_queue_index, vq->notify_addr);
549 if (virtio_with_packed_queue(hw)) {
551 * Bit[0:15]: vq queue index
552 * Bit[16:30]: avail index
553 * Bit[31]: avail wrap counter
555 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
556 VRING_PACKED_DESC_F_AVAIL)) << 31) |
557 ((uint32_t)vq->vq_avail_idx << 16) |
561 * Bit[0:15]: vq queue index
562 * Bit[16:31]: avail index
564 notify_data = ((uint32_t)vq->vq_avail_idx << 16) |
567 rte_write32(notify_data, vq->notify_addr);
573 modern_intr_detect(struct virtio_hw *hw)
575 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
577 dev->msix_status = vtpci_msix_detect(dev->pci_dev);
578 hw->intr_lsc = !!dev->msix_status;
582 modern_dev_close(struct virtio_hw *hw)
584 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
586 rte_pci_unmap_device(dev->pci_dev);
591 const struct virtio_ops modern_ops = {
592 .read_dev_cfg = modern_read_dev_config,
593 .write_dev_cfg = modern_write_dev_config,
594 .get_status = modern_get_status,
595 .set_status = modern_set_status,
596 .get_features = modern_get_features,
597 .set_features = modern_set_features,
598 .features_ok = modern_features_ok,
599 .get_isr = modern_get_isr,
600 .set_config_irq = modern_set_config_irq,
601 .set_queue_irq = modern_set_queue_irq,
602 .get_queue_num = modern_get_queue_num,
603 .setup_queue = modern_setup_queue,
604 .del_queue = modern_del_queue,
605 .notify_queue = modern_notify_queue,
606 .intr_detect = modern_intr_detect,
607 .dev_close = modern_dev_close,
611 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
613 uint8_t bar = cap->bar;
614 uint32_t length = cap->length;
615 uint32_t offset = cap->offset;
618 if (bar >= PCI_MAX_RESOURCE) {
619 PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
623 if (offset + length < offset) {
624 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
629 if (offset + length > dev->mem_resource[bar].len) {
631 "invalid cap: overflows bar space: %u > %" PRIu64,
632 offset + length, dev->mem_resource[bar].len);
636 base = dev->mem_resource[bar].addr;
638 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
642 return base + offset;
646 virtio_read_caps(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
648 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
650 struct virtio_pci_cap cap;
653 if (rte_pci_map_device(pci_dev)) {
654 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
658 ret = rte_pci_read_config(pci_dev, &pos, 1, PCI_CAPABILITY_LIST);
661 "failed to read pci capability list, ret %d", ret);
666 ret = rte_pci_read_config(pci_dev, &cap, 2, pos);
669 "failed to read pci cap at pos: %x ret %d",
674 if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
675 /* Transitional devices would also have this capability,
676 * that's why we also check if msix is enabled.
677 * 1st byte is cap ID; 2nd byte is the position of next
678 * cap; next two bytes are the flags.
682 ret = rte_pci_read_config(pci_dev, &flags, sizeof(flags),
684 if (ret != sizeof(flags)) {
686 "failed to read pci cap at pos:"
687 " %x ret %d", pos + 2, ret);
691 if (flags & PCI_MSIX_ENABLE)
692 dev->msix_status = VIRTIO_MSIX_ENABLED;
694 dev->msix_status = VIRTIO_MSIX_DISABLED;
697 if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
699 "[%2x] skipping non VNDR cap id: %02x",
704 ret = rte_pci_read_config(pci_dev, &cap, sizeof(cap), pos);
705 if (ret != sizeof(cap)) {
707 "failed to read pci cap at pos: %x ret %d",
713 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
714 pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
716 switch (cap.cfg_type) {
717 case VIRTIO_PCI_CAP_COMMON_CFG:
718 dev->common_cfg = get_cfg_addr(pci_dev, &cap);
720 case VIRTIO_PCI_CAP_NOTIFY_CFG:
721 ret = rte_pci_read_config(pci_dev,
722 &dev->notify_off_multiplier,
723 4, pos + sizeof(cap));
726 "failed to read notify_off_multiplier, ret %d",
729 dev->notify_base = get_cfg_addr(pci_dev, &cap);
731 case VIRTIO_PCI_CAP_DEVICE_CFG:
732 dev->dev_cfg = get_cfg_addr(pci_dev, &cap);
734 case VIRTIO_PCI_CAP_ISR_CFG:
735 dev->isr = get_cfg_addr(pci_dev, &cap);
743 if (dev->common_cfg == NULL || dev->notify_base == NULL ||
744 dev->dev_cfg == NULL || dev->isr == NULL) {
745 PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
749 PMD_INIT_LOG(INFO, "found modern virtio pci device.");
751 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", dev->common_cfg);
752 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", dev->dev_cfg);
753 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", dev->isr);
754 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
755 dev->notify_base, dev->notify_off_multiplier);
762 * if there is error mapping with VFIO/UIO.
763 * if port map error when driver type is KDRV_NONE.
764 * if marked as allowed but driver type is KDRV_UNKNOWN.
765 * Return 1 if kernel driver is managing the device.
766 * Return 0 on success.
769 vtpci_init(struct rte_pci_device *pci_dev, struct virtio_pci_dev *dev)
771 struct virtio_hw *hw = &dev->hw;
773 RTE_BUILD_BUG_ON(offsetof(struct virtio_pci_dev, hw) != 0);
775 dev->pci_dev = pci_dev;
778 * Try if we can succeed reading virtio pci caps, which exists
779 * only on modern pci device. If failed, we fallback to legacy
782 if (virtio_read_caps(pci_dev, hw) == 0) {
783 PMD_INIT_LOG(INFO, "modern virtio pci detected.");
784 VIRTIO_OPS(hw) = &modern_ops;
789 PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
790 if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) {
791 rte_pci_unmap_device(pci_dev);
792 if (pci_dev->kdrv == RTE_PCI_KDRV_UNKNOWN &&
793 (!pci_dev->device.devargs ||
794 pci_dev->device.devargs->bus !=
795 rte_bus_find_by_name("pci"))) {
797 "skip kernel managed virtio device.");
803 VIRTIO_OPS(hw) = &legacy_ops;
807 VIRTIO_OPS(hw)->intr_detect(hw);
812 void vtpci_legacy_ioport_unmap(struct virtio_hw *hw)
814 rte_pci_ioport_unmap(VTPCI_IO(hw));
817 int vtpci_legacy_ioport_map(struct virtio_hw *hw)
819 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
821 return rte_pci_ioport_map(dev->pci_dev, 0, VTPCI_IO(hw));