1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
6 #ifdef RTE_EXEC_ENV_LINUX
14 #include "virtio_pci.h"
15 #include "virtio_logs.h"
16 #include "virtqueue.h"
19 * Following macros are derived from linux/pci_regs.h, however,
20 * we can't simply include that header here, as there is no such
21 * file for non-Linux platform.
23 #define PCI_CAPABILITY_LIST 0x34
24 #define PCI_CAP_ID_VNDR 0x09
25 #define PCI_CAP_ID_MSIX 0x11
28 * The remaining space is defined by each driver as the per-driver
29 * configuration space.
31 #define VIRTIO_PCI_CONFIG(hw) \
32 (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20)
35 struct virtio_pci_internal {
36 struct rte_pci_ioport io;
39 #define VTPCI_IO(hw) (&virtio_pci_internal[(hw)->port_id].io)
41 struct virtio_pci_internal virtio_pci_internal[RTE_MAX_ETHPORTS];
44 check_vq_phys_addr_ok(struct virtqueue *vq)
46 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
47 * and only accepts 32 bit page frame number.
48 * Check if the allocated physical memory exceeds 16TB.
50 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
51 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
52 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
59 #define PCI_MSIX_ENABLE 0x8000
61 static enum virtio_msix_status
62 vtpci_msix_detect(struct rte_pci_device *dev)
67 ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
70 "failed to read pci capability list, ret %d", ret);
71 return VIRTIO_MSIX_NONE;
77 ret = rte_pci_read_config(dev, cap, sizeof(cap), pos);
78 if (ret != sizeof(cap)) {
80 "failed to read pci cap at pos: %x ret %d",
85 if (cap[0] == PCI_CAP_ID_MSIX) {
88 ret = rte_pci_read_config(dev, &flags, sizeof(flags),
90 if (ret != sizeof(flags)) {
92 "failed to read pci cap at pos:"
93 " %x ret %d", pos + 2, ret);
97 if (flags & PCI_MSIX_ENABLE)
98 return VIRTIO_MSIX_ENABLED;
100 return VIRTIO_MSIX_DISABLED;
106 return VIRTIO_MSIX_NONE;
110 * Since we are in legacy mode:
111 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
113 * "Note that this is possible because while the virtio header is PCI (i.e.
114 * little) endian, the device-specific region is encoded in the native endian of
115 * the guest (where such distinction is applicable)."
117 * For powerpc which supports both, qemu supposes that cpu is big endian and
118 * enforces this for the virtio-net stuff.
121 legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
122 void *dst, int length)
124 #ifdef RTE_ARCH_PPC_64
130 rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
131 VIRTIO_PCI_CONFIG(hw) + offset);
132 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
133 } else if (length >= 2) {
135 rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
136 VIRTIO_PCI_CONFIG(hw) + offset);
137 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
140 rte_pci_ioport_read(VTPCI_IO(hw), dst, size,
141 VIRTIO_PCI_CONFIG(hw) + offset);
144 dst = (char *)dst + size;
149 rte_pci_ioport_read(VTPCI_IO(hw), dst, length,
150 VIRTIO_PCI_CONFIG(hw) + offset);
155 legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
156 const void *src, int length)
158 #ifdef RTE_ARCH_PPC_64
168 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
169 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
170 VIRTIO_PCI_CONFIG(hw) + offset);
171 } else if (length >= 2) {
173 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
174 rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
175 VIRTIO_PCI_CONFIG(hw) + offset);
178 rte_pci_ioport_write(VTPCI_IO(hw), src, size,
179 VIRTIO_PCI_CONFIG(hw) + offset);
182 src = (const char *)src + size;
187 rte_pci_ioport_write(VTPCI_IO(hw), src, length,
188 VIRTIO_PCI_CONFIG(hw) + offset);
193 legacy_get_features(struct virtio_hw *hw)
197 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES);
202 legacy_set_features(struct virtio_hw *hw, uint64_t features)
204 if ((features >> 32) != 0) {
206 "only 32 bit features are allowed for legacy virtio!");
209 rte_pci_ioport_write(VTPCI_IO(hw), &features, 4,
210 VIRTIO_PCI_GUEST_FEATURES);
214 legacy_features_ok(struct virtio_hw *hw __rte_unused)
220 legacy_get_status(struct virtio_hw *hw)
224 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
229 legacy_set_status(struct virtio_hw *hw, uint8_t status)
231 rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
235 legacy_get_isr(struct virtio_hw *hw)
239 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
243 /* Enable one vector (0) for Link State Intrerrupt */
245 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
249 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR);
250 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR);
255 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
259 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
260 VIRTIO_PCI_QUEUE_SEL);
261 rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR);
262 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
267 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
271 rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL);
272 rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
277 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
281 if (!check_vq_phys_addr_ok(vq))
284 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
285 VIRTIO_PCI_QUEUE_SEL);
286 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
287 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
293 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
297 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
298 VIRTIO_PCI_QUEUE_SEL);
299 rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
303 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
305 rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
306 VIRTIO_PCI_QUEUE_NOTIFY);
310 legacy_intr_detect(struct virtio_hw *hw)
312 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
314 hw->use_msix = vtpci_msix_detect(dev->pci_dev);
318 legacy_dev_close(struct virtio_hw *hw)
320 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
322 rte_pci_unmap_device(dev->pci_dev);
323 rte_pci_ioport_unmap(VTPCI_IO(hw));
328 const struct virtio_ops legacy_ops = {
329 .read_dev_cfg = legacy_read_dev_config,
330 .write_dev_cfg = legacy_write_dev_config,
331 .get_status = legacy_get_status,
332 .set_status = legacy_set_status,
333 .get_features = legacy_get_features,
334 .set_features = legacy_set_features,
335 .features_ok = legacy_features_ok,
336 .get_isr = legacy_get_isr,
337 .set_config_irq = legacy_set_config_irq,
338 .set_queue_irq = legacy_set_queue_irq,
339 .get_queue_num = legacy_get_queue_num,
340 .setup_queue = legacy_setup_queue,
341 .del_queue = legacy_del_queue,
342 .notify_queue = legacy_notify_queue,
343 .intr_detect = legacy_intr_detect,
344 .dev_close = legacy_dev_close,
348 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
350 rte_write32(val & ((1ULL << 32) - 1), lo);
351 rte_write32(val >> 32, hi);
355 modern_read_dev_config(struct virtio_hw *hw, size_t offset,
356 void *dst, int length)
358 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
361 uint8_t old_gen, new_gen;
364 old_gen = rte_read8(&dev->common_cfg->config_generation);
367 for (i = 0; i < length; i++)
368 *p++ = rte_read8((uint8_t *)dev->dev_cfg + offset + i);
370 new_gen = rte_read8(&dev->common_cfg->config_generation);
371 } while (old_gen != new_gen);
375 modern_write_dev_config(struct virtio_hw *hw, size_t offset,
376 const void *src, int length)
378 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
380 const uint8_t *p = src;
382 for (i = 0; i < length; i++)
383 rte_write8((*p++), (((uint8_t *)dev->dev_cfg) + offset + i));
387 modern_get_features(struct virtio_hw *hw)
389 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
390 uint32_t features_lo, features_hi;
392 rte_write32(0, &dev->common_cfg->device_feature_select);
393 features_lo = rte_read32(&dev->common_cfg->device_feature);
395 rte_write32(1, &dev->common_cfg->device_feature_select);
396 features_hi = rte_read32(&dev->common_cfg->device_feature);
398 return ((uint64_t)features_hi << 32) | features_lo;
402 modern_set_features(struct virtio_hw *hw, uint64_t features)
404 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
406 rte_write32(0, &dev->common_cfg->guest_feature_select);
407 rte_write32(features & ((1ULL << 32) - 1),
408 &dev->common_cfg->guest_feature);
410 rte_write32(1, &dev->common_cfg->guest_feature_select);
411 rte_write32(features >> 32,
412 &dev->common_cfg->guest_feature);
416 modern_features_ok(struct virtio_hw *hw)
418 if (!virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
419 PMD_INIT_LOG(ERR, "Version 1+ required with modern devices\n");
427 modern_get_status(struct virtio_hw *hw)
429 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
431 return rte_read8(&dev->common_cfg->device_status);
435 modern_set_status(struct virtio_hw *hw, uint8_t status)
437 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
439 rte_write8(status, &dev->common_cfg->device_status);
443 modern_get_isr(struct virtio_hw *hw)
445 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
447 return rte_read8(dev->isr);
451 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
453 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
455 rte_write16(vec, &dev->common_cfg->msix_config);
456 return rte_read16(&dev->common_cfg->msix_config);
460 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
462 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
464 rte_write16(vq->vq_queue_index, &dev->common_cfg->queue_select);
465 rte_write16(vec, &dev->common_cfg->queue_msix_vector);
466 return rte_read16(&dev->common_cfg->queue_msix_vector);
470 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
472 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
474 rte_write16(queue_id, &dev->common_cfg->queue_select);
475 return rte_read16(&dev->common_cfg->queue_size);
479 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
481 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
482 uint64_t desc_addr, avail_addr, used_addr;
485 if (!check_vq_phys_addr_ok(vq))
488 desc_addr = vq->vq_ring_mem;
489 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
490 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
491 ring[vq->vq_nentries]),
492 VIRTIO_PCI_VRING_ALIGN);
494 rte_write16(vq->vq_queue_index, &dev->common_cfg->queue_select);
496 io_write64_twopart(desc_addr, &dev->common_cfg->queue_desc_lo,
497 &dev->common_cfg->queue_desc_hi);
498 io_write64_twopart(avail_addr, &dev->common_cfg->queue_avail_lo,
499 &dev->common_cfg->queue_avail_hi);
500 io_write64_twopart(used_addr, &dev->common_cfg->queue_used_lo,
501 &dev->common_cfg->queue_used_hi);
503 notify_off = rte_read16(&dev->common_cfg->queue_notify_off);
504 vq->notify_addr = (void *)((uint8_t *)dev->notify_base +
505 notify_off * dev->notify_off_multiplier);
507 rte_write16(1, &dev->common_cfg->queue_enable);
509 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
510 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
511 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
512 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
513 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
514 vq->notify_addr, notify_off);
520 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
522 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
524 rte_write16(vq->vq_queue_index, &dev->common_cfg->queue_select);
526 io_write64_twopart(0, &dev->common_cfg->queue_desc_lo,
527 &dev->common_cfg->queue_desc_hi);
528 io_write64_twopart(0, &dev->common_cfg->queue_avail_lo,
529 &dev->common_cfg->queue_avail_hi);
530 io_write64_twopart(0, &dev->common_cfg->queue_used_lo,
531 &dev->common_cfg->queue_used_hi);
533 rte_write16(0, &dev->common_cfg->queue_enable);
537 modern_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
539 uint32_t notify_data;
541 if (!virtio_with_feature(hw, VIRTIO_F_NOTIFICATION_DATA)) {
542 rte_write16(vq->vq_queue_index, vq->notify_addr);
546 if (virtio_with_packed_queue(hw)) {
548 * Bit[0:15]: vq queue index
549 * Bit[16:30]: avail index
550 * Bit[31]: avail wrap counter
552 notify_data = ((uint32_t)(!!(vq->vq_packed.cached_flags &
553 VRING_PACKED_DESC_F_AVAIL)) << 31) |
554 ((uint32_t)vq->vq_avail_idx << 16) |
558 * Bit[0:15]: vq queue index
559 * Bit[16:31]: avail index
561 notify_data = ((uint32_t)vq->vq_avail_idx << 16) |
564 rte_write32(notify_data, vq->notify_addr);
570 modern_intr_detect(struct virtio_hw *hw)
572 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
574 hw->use_msix = vtpci_msix_detect(dev->pci_dev);
578 modern_dev_close(struct virtio_hw *hw)
580 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
582 rte_pci_unmap_device(dev->pci_dev);
587 const struct virtio_ops modern_ops = {
588 .read_dev_cfg = modern_read_dev_config,
589 .write_dev_cfg = modern_write_dev_config,
590 .get_status = modern_get_status,
591 .set_status = modern_set_status,
592 .get_features = modern_get_features,
593 .set_features = modern_set_features,
594 .features_ok = modern_features_ok,
595 .get_isr = modern_get_isr,
596 .set_config_irq = modern_set_config_irq,
597 .set_queue_irq = modern_set_queue_irq,
598 .get_queue_num = modern_get_queue_num,
599 .setup_queue = modern_setup_queue,
600 .del_queue = modern_del_queue,
601 .notify_queue = modern_notify_queue,
602 .intr_detect = modern_intr_detect,
603 .dev_close = modern_dev_close,
607 vtpci_isr(struct virtio_hw *hw)
609 return VIRTIO_OPS(hw)->get_isr(hw);
613 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
615 uint8_t bar = cap->bar;
616 uint32_t length = cap->length;
617 uint32_t offset = cap->offset;
620 if (bar >= PCI_MAX_RESOURCE) {
621 PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
625 if (offset + length < offset) {
626 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
631 if (offset + length > dev->mem_resource[bar].len) {
633 "invalid cap: overflows bar space: %u > %" PRIu64,
634 offset + length, dev->mem_resource[bar].len);
638 base = dev->mem_resource[bar].addr;
640 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
644 return base + offset;
648 virtio_read_caps(struct rte_pci_device *pci_dev, struct virtio_hw *hw)
650 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
652 struct virtio_pci_cap cap;
655 if (rte_pci_map_device(pci_dev)) {
656 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
660 ret = rte_pci_read_config(pci_dev, &pos, 1, PCI_CAPABILITY_LIST);
663 "failed to read pci capability list, ret %d", ret);
668 ret = rte_pci_read_config(pci_dev, &cap, 2, pos);
671 "failed to read pci cap at pos: %x ret %d",
676 if (cap.cap_vndr == PCI_CAP_ID_MSIX) {
677 /* Transitional devices would also have this capability,
678 * that's why we also check if msix is enabled.
679 * 1st byte is cap ID; 2nd byte is the position of next
680 * cap; next two bytes are the flags.
684 ret = rte_pci_read_config(pci_dev, &flags, sizeof(flags),
686 if (ret != sizeof(flags)) {
688 "failed to read pci cap at pos:"
689 " %x ret %d", pos + 2, ret);
693 if (flags & PCI_MSIX_ENABLE)
694 hw->use_msix = VIRTIO_MSIX_ENABLED;
696 hw->use_msix = VIRTIO_MSIX_DISABLED;
699 if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
701 "[%2x] skipping non VNDR cap id: %02x",
706 ret = rte_pci_read_config(pci_dev, &cap, sizeof(cap), pos);
707 if (ret != sizeof(cap)) {
709 "failed to read pci cap at pos: %x ret %d",
715 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
716 pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
718 switch (cap.cfg_type) {
719 case VIRTIO_PCI_CAP_COMMON_CFG:
720 dev->common_cfg = get_cfg_addr(pci_dev, &cap);
722 case VIRTIO_PCI_CAP_NOTIFY_CFG:
723 ret = rte_pci_read_config(pci_dev,
724 &dev->notify_off_multiplier,
725 4, pos + sizeof(cap));
728 "failed to read notify_off_multiplier, ret %d",
731 dev->notify_base = get_cfg_addr(pci_dev, &cap);
733 case VIRTIO_PCI_CAP_DEVICE_CFG:
734 dev->dev_cfg = get_cfg_addr(pci_dev, &cap);
736 case VIRTIO_PCI_CAP_ISR_CFG:
737 dev->isr = get_cfg_addr(pci_dev, &cap);
745 if (dev->common_cfg == NULL || dev->notify_base == NULL ||
746 dev->dev_cfg == NULL || dev->isr == NULL) {
747 PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
751 PMD_INIT_LOG(INFO, "found modern virtio pci device.");
753 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", dev->common_cfg);
754 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", dev->dev_cfg);
755 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", dev->isr);
756 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
757 dev->notify_base, dev->notify_off_multiplier);
764 * if there is error mapping with VFIO/UIO.
765 * if port map error when driver type is KDRV_NONE.
766 * if marked as allowed but driver type is KDRV_UNKNOWN.
767 * Return 1 if kernel driver is managing the device.
768 * Return 0 on success.
771 vtpci_init(struct rte_pci_device *pci_dev, struct virtio_pci_dev *dev)
773 struct virtio_hw *hw = &dev->hw;
775 RTE_BUILD_BUG_ON(offsetof(struct virtio_pci_dev, hw) != 0);
777 dev->pci_dev = pci_dev;
780 * Try if we can succeed reading virtio pci caps, which exists
781 * only on modern pci device. If failed, we fallback to legacy
784 if (virtio_read_caps(pci_dev, hw) == 0) {
785 PMD_INIT_LOG(INFO, "modern virtio pci detected.");
786 VIRTIO_OPS(hw) = &modern_ops;
791 PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
792 if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) {
793 rte_pci_unmap_device(pci_dev);
794 if (pci_dev->kdrv == RTE_PCI_KDRV_UNKNOWN &&
795 (!pci_dev->device.devargs ||
796 pci_dev->device.devargs->bus !=
797 rte_bus_find_by_name("pci"))) {
799 "skip kernel managed virtio device.");
805 VIRTIO_OPS(hw) = &legacy_ops;
809 VIRTIO_OPS(hw)->intr_detect(hw);
814 void vtpci_legacy_ioport_unmap(struct virtio_hw *hw)
816 rte_pci_ioport_unmap(VTPCI_IO(hw));
819 int vtpci_legacy_ioport_map(struct virtio_hw *hw)
821 struct virtio_pci_dev *dev = virtio_pci_get_dev(hw);
823 return rte_pci_ioport_map(dev->pci_dev, 0, VTPCI_IO(hw));