4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifdef RTE_EXEC_ENV_LINUXAPP
42 #include "virtio_pci.h"
43 #include "virtio_logs.h"
44 #include "virtqueue.h"
47 * Following macros are derived from linux/pci_regs.h, however,
48 * we can't simply include that header here, as there is no such
49 * file for non-Linux platform.
51 #define PCI_CAPABILITY_LIST 0x34
52 #define PCI_CAP_ID_VNDR 0x09
53 #define PCI_CAP_ID_MSIX 0x11
56 * The remaining space is defined by each driver as the per-driver
57 * configuration space.
59 #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
62 check_vq_phys_addr_ok(struct virtqueue *vq)
64 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
65 * and only accepts 32 bit page frame number.
66 * Check if the allocated physical memory exceeds 16TB.
68 if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >>
69 (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
70 PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!");
78 * Since we are in legacy mode:
79 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
81 * "Note that this is possible because while the virtio header is PCI (i.e.
82 * little) endian, the device-specific region is encoded in the native endian of
83 * the guest (where such distinction is applicable)."
85 * For powerpc which supports both, qemu supposes that cpu is big endian and
86 * enforces this for the virtio-net stuff.
89 legacy_read_dev_config(struct virtio_hw *hw, size_t offset,
90 void *dst, int length)
92 #ifdef RTE_ARCH_PPC_64
98 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
99 VIRTIO_PCI_CONFIG(hw) + offset);
100 *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst);
101 } else if (length >= 2) {
103 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
104 VIRTIO_PCI_CONFIG(hw) + offset);
105 *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst);
108 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size,
109 VIRTIO_PCI_CONFIG(hw) + offset);
112 dst = (char *)dst + size;
117 rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length,
118 VIRTIO_PCI_CONFIG(hw) + offset);
123 legacy_write_dev_config(struct virtio_hw *hw, size_t offset,
124 const void *src, int length)
126 #ifdef RTE_ARCH_PPC_64
136 tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src);
137 rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size,
138 VIRTIO_PCI_CONFIG(hw) + offset);
139 } else if (length >= 2) {
141 tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src);
142 rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size,
143 VIRTIO_PCI_CONFIG(hw) + offset);
146 rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size,
147 VIRTIO_PCI_CONFIG(hw) + offset);
150 src = (const char *)src + size;
155 rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length,
156 VIRTIO_PCI_CONFIG(hw) + offset);
161 legacy_get_features(struct virtio_hw *hw)
165 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4,
166 VIRTIO_PCI_HOST_FEATURES);
171 legacy_set_features(struct virtio_hw *hw, uint64_t features)
173 if ((features >> 32) != 0) {
175 "only 32 bit features are allowed for legacy virtio!");
178 rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4,
179 VIRTIO_PCI_GUEST_FEATURES);
183 legacy_get_status(struct virtio_hw *hw)
187 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS);
192 legacy_set_status(struct virtio_hw *hw, uint8_t status)
194 rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS);
198 legacy_reset(struct virtio_hw *hw)
200 legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
204 legacy_get_isr(struct virtio_hw *hw)
208 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR);
212 /* Enable one vector (0) for Link State Intrerrupt */
214 legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
218 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
219 VIRTIO_MSI_CONFIG_VECTOR);
220 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2,
221 VIRTIO_MSI_CONFIG_VECTOR);
226 legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
230 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
231 VIRTIO_PCI_QUEUE_SEL);
232 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2,
233 VIRTIO_MSI_QUEUE_VECTOR);
234 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR);
239 legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
243 rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2,
244 VIRTIO_PCI_QUEUE_SEL);
245 rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM);
250 legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
254 if (!check_vq_phys_addr_ok(vq))
257 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
258 VIRTIO_PCI_QUEUE_SEL);
259 src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
260 rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
266 legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
270 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
271 VIRTIO_PCI_QUEUE_SEL);
272 rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN);
276 legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq)
278 rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2,
279 VIRTIO_PCI_QUEUE_NOTIFY);
282 #ifdef RTE_EXEC_ENV_LINUXAPP
284 legacy_virtio_has_msix(const struct rte_pci_addr *loc)
287 char dirname[PATH_MAX];
289 snprintf(dirname, sizeof(dirname),
290 "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(),
291 loc->domain, loc->bus, loc->devid, loc->function);
293 d = opendir(dirname);
301 legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused)
303 /* nic_uio does not enable interrupts, return 0 (false). */
309 legacy_virtio_resource_init(struct rte_pci_device *pci_dev,
310 struct virtio_hw *hw, uint32_t *dev_flags)
312 if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
315 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN)
316 *dev_flags |= RTE_ETH_DEV_INTR_LSC;
318 *dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
323 const struct virtio_pci_ops legacy_ops = {
324 .read_dev_cfg = legacy_read_dev_config,
325 .write_dev_cfg = legacy_write_dev_config,
326 .reset = legacy_reset,
327 .get_status = legacy_get_status,
328 .set_status = legacy_set_status,
329 .get_features = legacy_get_features,
330 .set_features = legacy_set_features,
331 .get_isr = legacy_get_isr,
332 .set_config_irq = legacy_set_config_irq,
333 .set_queue_irq = legacy_set_queue_irq,
334 .get_queue_num = legacy_get_queue_num,
335 .setup_queue = legacy_setup_queue,
336 .del_queue = legacy_del_queue,
337 .notify_queue = legacy_notify_queue,
341 io_write64_twopart(uint64_t val, uint32_t *lo, uint32_t *hi)
343 rte_write32(val & ((1ULL << 32) - 1), lo);
344 rte_write32(val >> 32, hi);
348 modern_read_dev_config(struct virtio_hw *hw, size_t offset,
349 void *dst, int length)
353 uint8_t old_gen, new_gen;
356 old_gen = rte_read8(&hw->common_cfg->config_generation);
359 for (i = 0; i < length; i++)
360 *p++ = rte_read8((uint8_t *)hw->dev_cfg + offset + i);
362 new_gen = rte_read8(&hw->common_cfg->config_generation);
363 } while (old_gen != new_gen);
367 modern_write_dev_config(struct virtio_hw *hw, size_t offset,
368 const void *src, int length)
371 const uint8_t *p = src;
373 for (i = 0; i < length; i++)
374 rte_write8((*p++), (((uint8_t *)hw->dev_cfg) + offset + i));
378 modern_get_features(struct virtio_hw *hw)
380 uint32_t features_lo, features_hi;
382 rte_write32(0, &hw->common_cfg->device_feature_select);
383 features_lo = rte_read32(&hw->common_cfg->device_feature);
385 rte_write32(1, &hw->common_cfg->device_feature_select);
386 features_hi = rte_read32(&hw->common_cfg->device_feature);
388 return ((uint64_t)features_hi << 32) | features_lo;
392 modern_set_features(struct virtio_hw *hw, uint64_t features)
394 rte_write32(0, &hw->common_cfg->guest_feature_select);
395 rte_write32(features & ((1ULL << 32) - 1),
396 &hw->common_cfg->guest_feature);
398 rte_write32(1, &hw->common_cfg->guest_feature_select);
399 rte_write32(features >> 32,
400 &hw->common_cfg->guest_feature);
404 modern_get_status(struct virtio_hw *hw)
406 return rte_read8(&hw->common_cfg->device_status);
410 modern_set_status(struct virtio_hw *hw, uint8_t status)
412 rte_write8(status, &hw->common_cfg->device_status);
416 modern_reset(struct virtio_hw *hw)
418 modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
419 modern_get_status(hw);
423 modern_get_isr(struct virtio_hw *hw)
425 return rte_read8(hw->isr);
429 modern_set_config_irq(struct virtio_hw *hw, uint16_t vec)
431 rte_write16(vec, &hw->common_cfg->msix_config);
432 return rte_read16(&hw->common_cfg->msix_config);
436 modern_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec)
438 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
439 rte_write16(vec, &hw->common_cfg->queue_msix_vector);
440 return rte_read16(&hw->common_cfg->queue_msix_vector);
444 modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id)
446 rte_write16(queue_id, &hw->common_cfg->queue_select);
447 return rte_read16(&hw->common_cfg->queue_size);
451 modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
453 uint64_t desc_addr, avail_addr, used_addr;
456 if (!check_vq_phys_addr_ok(vq))
459 desc_addr = vq->vq_ring_mem;
460 avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc);
461 used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail,
462 ring[vq->vq_nentries]),
463 VIRTIO_PCI_VRING_ALIGN);
465 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
467 io_write64_twopart(desc_addr, &hw->common_cfg->queue_desc_lo,
468 &hw->common_cfg->queue_desc_hi);
469 io_write64_twopart(avail_addr, &hw->common_cfg->queue_avail_lo,
470 &hw->common_cfg->queue_avail_hi);
471 io_write64_twopart(used_addr, &hw->common_cfg->queue_used_lo,
472 &hw->common_cfg->queue_used_hi);
474 notify_off = rte_read16(&hw->common_cfg->queue_notify_off);
475 vq->notify_addr = (void *)((uint8_t *)hw->notify_base +
476 notify_off * hw->notify_off_multiplier);
478 rte_write16(1, &hw->common_cfg->queue_enable);
480 PMD_INIT_LOG(DEBUG, "queue %u addresses:", vq->vq_queue_index);
481 PMD_INIT_LOG(DEBUG, "\t desc_addr: %" PRIx64, desc_addr);
482 PMD_INIT_LOG(DEBUG, "\t aval_addr: %" PRIx64, avail_addr);
483 PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr);
484 PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)",
485 vq->notify_addr, notify_off);
491 modern_del_queue(struct virtio_hw *hw, struct virtqueue *vq)
493 rte_write16(vq->vq_queue_index, &hw->common_cfg->queue_select);
495 io_write64_twopart(0, &hw->common_cfg->queue_desc_lo,
496 &hw->common_cfg->queue_desc_hi);
497 io_write64_twopart(0, &hw->common_cfg->queue_avail_lo,
498 &hw->common_cfg->queue_avail_hi);
499 io_write64_twopart(0, &hw->common_cfg->queue_used_lo,
500 &hw->common_cfg->queue_used_hi);
502 rte_write16(0, &hw->common_cfg->queue_enable);
506 modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq)
508 rte_write16(vq->vq_queue_index, vq->notify_addr);
511 const struct virtio_pci_ops modern_ops = {
512 .read_dev_cfg = modern_read_dev_config,
513 .write_dev_cfg = modern_write_dev_config,
514 .reset = modern_reset,
515 .get_status = modern_get_status,
516 .set_status = modern_set_status,
517 .get_features = modern_get_features,
518 .set_features = modern_set_features,
519 .get_isr = modern_get_isr,
520 .set_config_irq = modern_set_config_irq,
521 .set_queue_irq = modern_set_queue_irq,
522 .get_queue_num = modern_get_queue_num,
523 .setup_queue = modern_setup_queue,
524 .del_queue = modern_del_queue,
525 .notify_queue = modern_notify_queue,
530 vtpci_read_dev_config(struct virtio_hw *hw, size_t offset,
531 void *dst, int length)
533 VTPCI_OPS(hw)->read_dev_cfg(hw, offset, dst, length);
537 vtpci_write_dev_config(struct virtio_hw *hw, size_t offset,
538 const void *src, int length)
540 VTPCI_OPS(hw)->write_dev_cfg(hw, offset, src, length);
544 vtpci_negotiate_features(struct virtio_hw *hw, uint64_t host_features)
549 * Limit negotiated features to what the driver, virtqueue, and
552 features = host_features & hw->guest_features;
553 VTPCI_OPS(hw)->set_features(hw, features);
559 vtpci_reset(struct virtio_hw *hw)
561 VTPCI_OPS(hw)->set_status(hw, VIRTIO_CONFIG_STATUS_RESET);
562 /* flush status write */
563 VTPCI_OPS(hw)->get_status(hw);
567 vtpci_reinit_complete(struct virtio_hw *hw)
569 vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER_OK);
573 vtpci_set_status(struct virtio_hw *hw, uint8_t status)
575 if (status != VIRTIO_CONFIG_STATUS_RESET)
576 status |= VTPCI_OPS(hw)->get_status(hw);
578 VTPCI_OPS(hw)->set_status(hw, status);
582 vtpci_get_status(struct virtio_hw *hw)
584 return VTPCI_OPS(hw)->get_status(hw);
588 vtpci_isr(struct virtio_hw *hw)
590 return VTPCI_OPS(hw)->get_isr(hw);
594 get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap)
596 uint8_t bar = cap->bar;
597 uint32_t length = cap->length;
598 uint32_t offset = cap->offset;
602 PMD_INIT_LOG(ERR, "invalid bar: %u", bar);
606 if (offset + length < offset) {
607 PMD_INIT_LOG(ERR, "offset(%u) + length(%u) overflows",
612 if (offset + length > dev->mem_resource[bar].len) {
614 "invalid cap: overflows bar space: %u > %" PRIu64,
615 offset + length, dev->mem_resource[bar].len);
619 base = dev->mem_resource[bar].addr;
621 PMD_INIT_LOG(ERR, "bar %u base addr is NULL", bar);
625 return base + offset;
629 virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw)
632 struct virtio_pci_cap cap;
635 if (rte_eal_pci_map_device(dev)) {
636 PMD_INIT_LOG(DEBUG, "failed to map pci device!");
640 ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST);
642 PMD_INIT_LOG(DEBUG, "failed to read pci capability list");
647 ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos);
650 "failed to read pci cap at pos: %x", pos);
654 if (cap.cap_vndr == PCI_CAP_ID_MSIX)
657 if (cap.cap_vndr != PCI_CAP_ID_VNDR) {
659 "[%2x] skipping non VNDR cap id: %02x",
665 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
666 pos, cap.cfg_type, cap.bar, cap.offset, cap.length);
668 switch (cap.cfg_type) {
669 case VIRTIO_PCI_CAP_COMMON_CFG:
670 hw->common_cfg = get_cfg_addr(dev, &cap);
672 case VIRTIO_PCI_CAP_NOTIFY_CFG:
673 rte_eal_pci_read_config(dev, &hw->notify_off_multiplier,
674 4, pos + sizeof(cap));
675 hw->notify_base = get_cfg_addr(dev, &cap);
677 case VIRTIO_PCI_CAP_DEVICE_CFG:
678 hw->dev_cfg = get_cfg_addr(dev, &cap);
680 case VIRTIO_PCI_CAP_ISR_CFG:
681 hw->isr = get_cfg_addr(dev, &cap);
689 if (hw->common_cfg == NULL || hw->notify_base == NULL ||
690 hw->dev_cfg == NULL || hw->isr == NULL) {
691 PMD_INIT_LOG(INFO, "no modern virtio pci device found.");
695 PMD_INIT_LOG(INFO, "found modern virtio pci device.");
697 PMD_INIT_LOG(DEBUG, "common cfg mapped at: %p", hw->common_cfg);
698 PMD_INIT_LOG(DEBUG, "device cfg mapped at: %p", hw->dev_cfg);
699 PMD_INIT_LOG(DEBUG, "isr cfg mapped at: %p", hw->isr);
700 PMD_INIT_LOG(DEBUG, "notify base: %p, notify off multiplier: %u",
701 hw->notify_base, hw->notify_off_multiplier);
708 * if there is error mapping with VFIO/UIO.
709 * if port map error when driver type is KDRV_NONE.
710 * if whitelisted but driver type is KDRV_UNKNOWN.
711 * Return 1 if kernel driver is managing the device.
712 * Return 0 on success.
715 vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw,
719 * Try if we can succeed reading virtio pci caps, which exists
720 * only on modern pci device. If failed, we fallback to legacy
723 if (virtio_read_caps(dev, hw) == 0) {
724 PMD_INIT_LOG(INFO, "modern virtio pci detected.");
725 virtio_hw_internal[hw->port_id].vtpci_ops = &modern_ops;
727 *dev_flags |= RTE_ETH_DEV_INTR_LSC;
731 PMD_INIT_LOG(INFO, "trying with legacy virtio pci.");
732 if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) {
733 if (dev->kdrv == RTE_KDRV_UNKNOWN &&
734 (!dev->device.devargs ||
735 dev->device.devargs->type !=
736 RTE_DEVTYPE_WHITELISTED_PCI)) {
738 "skip kernel managed virtio device.");
744 virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops;
745 hw->use_msix = legacy_virtio_has_msix(&dev->addr);