X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_pci.c;h=4468e89cbaf4c678aed1cebccc7a3e63c34d1079;hb=7edc7158d771571d432c76ec66b34aa4bfc88be8;hp=1e1775720fbc73bc7cc5e3b1e29e8d18517fca96;hpb=a60a0c15076224b664668c09a96922f9a2c6b142;p=dpdk.git diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c index 1e1775720f..4468e89cba 100644 --- a/drivers/net/virtio/virtio_pci.c +++ b/drivers/net/virtio/virtio_pci.c @@ -1,43 +1,15 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include -#ifdef RTE_EXEC_ENV_LINUXAPP +#ifdef RTE_EXEC_ENV_LINUX #include #include #endif #include +#include #include "virtio_pci.h" #include "virtio_logs.h" @@ -56,7 +28,8 @@ * The remaining space is defined by each driver as the per-driver * configuration space. */ -#define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20) +#define VIRTIO_PCI_CONFIG(hw) \ + (((hw)->use_msix == VIRTIO_MSIX_ENABLED) ? 24 : 20) static inline int check_vq_phys_addr_ok(struct virtqueue *vq) @@ -95,17 +68,17 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset, while (length > 0) { if (length >= 4) { size = 4; - rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, + rte_pci_ioport_read(VTPCI_IO(hw), dst, size, VIRTIO_PCI_CONFIG(hw) + offset); *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); } else if (length >= 2) { size = 2; - rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, + rte_pci_ioport_read(VTPCI_IO(hw), dst, size, VIRTIO_PCI_CONFIG(hw) + offset); *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); } else { size = 1; - rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, size, + rte_pci_ioport_read(VTPCI_IO(hw), dst, size, VIRTIO_PCI_CONFIG(hw) + offset); } @@ -114,8 +87,8 @@ legacy_read_dev_config(struct virtio_hw *hw, size_t offset, length -= size; } #else - rte_eal_pci_ioport_read(VTPCI_IO(hw), dst, length, - VIRTIO_PCI_CONFIG(hw) + offset); + rte_pci_ioport_read(VTPCI_IO(hw), dst, length, + VIRTIO_PCI_CONFIG(hw) + offset); #endif } @@ -134,16 +107,16 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset, if (length >= 4) { size = 4; tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); - rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, + rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u32, size, VIRTIO_PCI_CONFIG(hw) + offset); } else if (length >= 2) { size = 2; tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); - rte_eal_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, + rte_pci_ioport_write(VTPCI_IO(hw), &tmp.u16, size, VIRTIO_PCI_CONFIG(hw) + offset); } else { size = 1; - rte_eal_pci_ioport_write(VTPCI_IO(hw), src, size, + rte_pci_ioport_write(VTPCI_IO(hw), src, size, VIRTIO_PCI_CONFIG(hw) + offset); } @@ -152,8 +125,8 @@ legacy_write_dev_config(struct virtio_hw *hw, size_t offset, length -= size; } #else - rte_eal_pci_ioport_write(VTPCI_IO(hw), src, length, - VIRTIO_PCI_CONFIG(hw) + offset); + rte_pci_ioport_write(VTPCI_IO(hw), src, length, + VIRTIO_PCI_CONFIG(hw) + offset); #endif } @@ -162,8 +135,7 @@ legacy_get_features(struct virtio_hw *hw) { uint32_t dst; - rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 4, - VIRTIO_PCI_HOST_FEATURES); + rte_pci_ioport_read(VTPCI_IO(hw), &dst, 4, VIRTIO_PCI_HOST_FEATURES); return dst; } @@ -175,8 +147,8 @@ legacy_set_features(struct virtio_hw *hw, uint64_t features) "only 32 bit features are allowed for legacy virtio!"); return; } - rte_eal_pci_ioport_write(VTPCI_IO(hw), &features, 4, - VIRTIO_PCI_GUEST_FEATURES); + rte_pci_ioport_write(VTPCI_IO(hw), &features, 4, + VIRTIO_PCI_GUEST_FEATURES); } static uint8_t @@ -184,20 +156,14 @@ legacy_get_status(struct virtio_hw *hw) { uint8_t dst; - rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); + rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_STATUS); return dst; } static void legacy_set_status(struct virtio_hw *hw, uint8_t status) { - rte_eal_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); -} - -static void -legacy_reset(struct virtio_hw *hw) -{ - legacy_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); + rte_pci_ioport_write(VTPCI_IO(hw), &status, 1, VIRTIO_PCI_STATUS); } static uint8_t @@ -205,7 +171,7 @@ legacy_get_isr(struct virtio_hw *hw) { uint8_t dst; - rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); + rte_pci_ioport_read(VTPCI_IO(hw), &dst, 1, VIRTIO_PCI_ISR); return dst; } @@ -215,10 +181,8 @@ legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec) { uint16_t dst; - rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2, - VIRTIO_MSI_CONFIG_VECTOR); - rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, - VIRTIO_MSI_CONFIG_VECTOR); + rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_CONFIG_VECTOR); + rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_CONFIG_VECTOR); return dst; } @@ -227,11 +191,10 @@ legacy_set_queue_irq(struct virtio_hw *hw, struct virtqueue *vq, uint16_t vec) { uint16_t dst; - rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, - VIRTIO_PCI_QUEUE_SEL); - rte_eal_pci_ioport_write(VTPCI_IO(hw), &vec, 2, - VIRTIO_MSI_QUEUE_VECTOR); - rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR); + rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); + rte_pci_ioport_write(VTPCI_IO(hw), &vec, 2, VIRTIO_MSI_QUEUE_VECTOR); + rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_MSI_QUEUE_VECTOR); return dst; } @@ -240,9 +203,8 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) { uint16_t dst; - rte_eal_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, - VIRTIO_PCI_QUEUE_SEL); - rte_eal_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); + rte_pci_ioport_write(VTPCI_IO(hw), &queue_id, 2, VIRTIO_PCI_QUEUE_SEL); + rte_pci_ioport_read(VTPCI_IO(hw), &dst, 2, VIRTIO_PCI_QUEUE_NUM); return dst; } @@ -254,10 +216,10 @@ legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) if (!check_vq_phys_addr_ok(vq)) return -1; - rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, - VIRTIO_PCI_QUEUE_SEL); + rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; - rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); + rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); return 0; } @@ -267,48 +229,21 @@ legacy_del_queue(struct virtio_hw *hw, struct virtqueue *vq) { uint32_t src = 0; - rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, - VIRTIO_PCI_QUEUE_SEL); - rte_eal_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); + rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_SEL); + rte_pci_ioport_write(VTPCI_IO(hw), &src, 4, VIRTIO_PCI_QUEUE_PFN); } static void legacy_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) { - rte_eal_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, - VIRTIO_PCI_QUEUE_NOTIFY); + rte_pci_ioport_write(VTPCI_IO(hw), &vq->vq_queue_index, 2, + VIRTIO_PCI_QUEUE_NOTIFY); } -#ifdef RTE_EXEC_ENV_LINUXAPP -static int -legacy_virtio_has_msix(const struct rte_pci_addr *loc) -{ - DIR *d; - char dirname[PATH_MAX]; - - snprintf(dirname, sizeof(dirname), - "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(), - loc->domain, loc->bus, loc->devid, loc->function); - - d = opendir(dirname); - if (d) - closedir(d); - - return d != NULL; -} -#else -static int -legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused) -{ - /* nic_uio does not enable interrupts, return 0 (false). */ - return 0; -} -#endif - const struct virtio_pci_ops legacy_ops = { .read_dev_cfg = legacy_read_dev_config, .write_dev_cfg = legacy_write_dev_config, - .reset = legacy_reset, .get_status = legacy_get_status, .set_status = legacy_set_status, .get_features = legacy_get_features, @@ -397,13 +332,6 @@ modern_set_status(struct virtio_hw *hw, uint8_t status) rte_write8(status, &hw->common_cfg->device_status); } -static void -modern_reset(struct virtio_hw *hw) -{ - modern_set_status(hw, VIRTIO_CONFIG_STATUS_RESET); - modern_get_status(hw); -} - static uint8_t modern_get_isr(struct virtio_hw *hw) { @@ -496,7 +424,6 @@ modern_notify_queue(struct virtio_hw *hw __rte_unused, struct virtqueue *vq) const struct virtio_pci_ops modern_ops = { .read_dev_cfg = modern_read_dev_config, .write_dev_cfg = modern_write_dev_config, - .reset = modern_reset, .get_status = modern_get_status, .set_status = modern_set_status, .get_features = modern_get_features, @@ -583,7 +510,7 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) uint32_t offset = cap->offset; uint8_t *base; - if (bar > 5) { + if (bar >= PCI_MAX_RESOURCE) { PMD_INIT_LOG(ERR, "invalid bar: %u", bar); return NULL; } @@ -610,6 +537,8 @@ get_cfg_addr(struct rte_pci_device *dev, struct virtio_pci_cap *cap) return base + offset; } +#define PCI_MSIX_ENABLE 0x8000 + static int virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) { @@ -617,27 +546,49 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) struct virtio_pci_cap cap; int ret; - if (rte_eal_pci_map_device(dev)) { + if (rte_pci_map_device(dev)) { PMD_INIT_LOG(DEBUG, "failed to map pci device!"); return -1; } - ret = rte_eal_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); - if (ret < 0) { - PMD_INIT_LOG(DEBUG, "failed to read pci capability list"); + ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); + if (ret != 1) { + PMD_INIT_LOG(DEBUG, + "failed to read pci capability list, ret %d", ret); return -1; } while (pos) { - ret = rte_eal_pci_read_config(dev, &cap, sizeof(cap), pos); - if (ret < 0) { - PMD_INIT_LOG(ERR, - "failed to read pci cap at pos: %x", pos); + ret = rte_pci_read_config(dev, &cap, 2, pos); + if (ret != 2) { + PMD_INIT_LOG(DEBUG, + "failed to read pci cap at pos: %x ret %d", + pos, ret); break; } - if (cap.cap_vndr == PCI_CAP_ID_MSIX) - hw->use_msix = 1; + if (cap.cap_vndr == PCI_CAP_ID_MSIX) { + /* Transitional devices would also have this capability, + * that's why we also check if msix is enabled. + * 1st byte is cap ID; 2nd byte is the position of next + * cap; next two bytes are the flags. + */ + uint16_t flags; + + ret = rte_pci_read_config(dev, &flags, sizeof(flags), + pos + 2); + if (ret != sizeof(flags)) { + PMD_INIT_LOG(DEBUG, + "failed to read pci cap at pos:" + " %x ret %d", pos + 2, ret); + break; + } + + if (flags & PCI_MSIX_ENABLE) + hw->use_msix = VIRTIO_MSIX_ENABLED; + else + hw->use_msix = VIRTIO_MSIX_DISABLED; + } if (cap.cap_vndr != PCI_CAP_ID_VNDR) { PMD_INIT_LOG(DEBUG, @@ -646,6 +597,14 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) goto next; } + ret = rte_pci_read_config(dev, &cap, sizeof(cap), pos); + if (ret != sizeof(cap)) { + PMD_INIT_LOG(DEBUG, + "failed to read pci cap at pos: %x ret %d", + pos, ret); + break; + } + PMD_INIT_LOG(DEBUG, "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u", pos, cap.cfg_type, cap.bar, cap.offset, cap.length); @@ -655,9 +614,15 @@ virtio_read_caps(struct rte_pci_device *dev, struct virtio_hw *hw) hw->common_cfg = get_cfg_addr(dev, &cap); break; case VIRTIO_PCI_CAP_NOTIFY_CFG: - rte_eal_pci_read_config(dev, &hw->notify_off_multiplier, - 4, pos + sizeof(cap)); - hw->notify_base = get_cfg_addr(dev, &cap); + ret = rte_pci_read_config(dev, + &hw->notify_off_multiplier, + 4, pos + sizeof(cap)); + if (ret != 4) + PMD_INIT_LOG(DEBUG, + "failed to read notify_off_multiplier, ret %d", + ret); + else + hw->notify_base = get_cfg_addr(dev, &cap); break; case VIRTIO_PCI_CAP_DEVICE_CFG: hw->dev_cfg = get_cfg_addr(dev, &cap); @@ -712,11 +677,12 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) } PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); - if (rte_eal_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { + if (rte_pci_ioport_map(dev, 0, VTPCI_IO(hw)) < 0) { + rte_pci_unmap_device(dev); if (dev->kdrv == RTE_KDRV_UNKNOWN && (!dev->device.devargs || - dev->device.devargs->type != - RTE_DEVTYPE_WHITELISTED_PCI)) { + dev->device.devargs->bus != + rte_bus_find_by_name("pci"))) { PMD_INIT_LOG(INFO, "skip kernel managed virtio device."); return 1; @@ -725,8 +691,55 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) } virtio_hw_internal[hw->port_id].vtpci_ops = &legacy_ops; - hw->use_msix = legacy_virtio_has_msix(&dev->addr); hw->modern = 0; return 0; } + +enum virtio_msix_status +vtpci_msix_detect(struct rte_pci_device *dev) +{ + uint8_t pos; + int ret; + + ret = rte_pci_read_config(dev, &pos, 1, PCI_CAPABILITY_LIST); + if (ret != 1) { + PMD_INIT_LOG(DEBUG, + "failed to read pci capability list, ret %d", ret); + return VIRTIO_MSIX_NONE; + } + + while (pos) { + uint8_t cap[2]; + + ret = rte_pci_read_config(dev, cap, sizeof(cap), pos); + if (ret != sizeof(cap)) { + PMD_INIT_LOG(DEBUG, + "failed to read pci cap at pos: %x ret %d", + pos, ret); + break; + } + + if (cap[0] == PCI_CAP_ID_MSIX) { + uint16_t flags; + + ret = rte_pci_read_config(dev, &flags, sizeof(flags), + pos + sizeof(cap)); + if (ret != sizeof(flags)) { + PMD_INIT_LOG(DEBUG, + "failed to read pci cap at pos:" + " %x ret %d", pos + 2, ret); + break; + } + + if (flags & PCI_MSIX_ENABLE) + return VIRTIO_MSIX_ENABLED; + else + return VIRTIO_MSIX_DISABLED; + } + + pos = cap[1]; + } + + return VIRTIO_MSIX_NONE; +}