X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_ethdev.c;h=19235a9fc0d1f488afc39d3ee4e2a9c91ac01b6e;hb=3dcfe0390c90560bdd4295dcb009e217521ca4a3;hp=0a76cce21ee083caf15dfb3ecaee6ce5d866f14a;hpb=9470427c88e1545931236a352b613e5785d18648;p=dpdk.git diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 0a76cce21e..19235a9fc0 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -38,6 +38,7 @@ #include #include +#include #include #include #include @@ -545,6 +546,9 @@ virtio_free_queues(struct virtio_hw *hw) int queue_type; uint16_t i; + if (hw->vqs == NULL) + return; + for (i = 0; i < nr_vq; i++) { vq = hw->vqs[i]; if (!vq) @@ -563,9 +567,11 @@ virtio_free_queues(struct virtio_hw *hw) } rte_free(vq); + hw->vqs[i] = NULL; } rte_free(hw->vqs); + hw->vqs = NULL; } static int @@ -593,16 +599,29 @@ virtio_alloc_queues(struct rte_eth_dev *dev) return 0; } +static void virtio_queues_unbind_intr(struct rte_eth_dev *dev); + static void virtio_dev_close(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; + struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; PMD_INIT_LOG(DEBUG, "virtio_dev_close"); /* reset the NIC */ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR); + VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR); + if (intr_conf->rxq) + virtio_queues_unbind_intr(dev); + + if (intr_conf->lsc || intr_conf->rxq) { + rte_intr_disable(dev->intr_handle); + rte_intr_efd_disable(dev->intr_handle); + rte_free(dev->intr_handle->intr_vec); + dev->intr_handle->intr_vec = NULL; + } + vtpci_reset(hw); virtio_dev_free_mbufs(dev); virtio_free_queues(hw); @@ -617,7 +636,7 @@ virtio_dev_promiscuous_enable(struct rte_eth_dev *dev) int ret; if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { - PMD_INIT_LOG(INFO, "host does not support rx control\n"); + PMD_INIT_LOG(INFO, "host does not support rx control"); return; } @@ -640,7 +659,7 @@ virtio_dev_promiscuous_disable(struct rte_eth_dev *dev) int ret; if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { - PMD_INIT_LOG(INFO, "host does not support rx control\n"); + PMD_INIT_LOG(INFO, "host does not support rx control"); return; } @@ -663,7 +682,7 @@ virtio_dev_allmulticast_enable(struct rte_eth_dev *dev) int ret; if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { - PMD_INIT_LOG(INFO, "host does not support rx control\n"); + PMD_INIT_LOG(INFO, "host does not support rx control"); return; } @@ -686,7 +705,7 @@ virtio_dev_allmulticast_disable(struct rte_eth_dev *dev) int ret; if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) { - PMD_INIT_LOG(INFO, "host does not support rx control\n"); + PMD_INIT_LOG(INFO, "host does not support rx control"); return; } @@ -708,15 +727,38 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN + hw->vtnet_hdr_size; uint32_t frame_size = mtu + ether_hdr_len; + uint32_t max_frame_size = hw->max_mtu + ether_hdr_len; + + max_frame_size = RTE_MIN(max_frame_size, VIRTIO_MAX_RX_PKTLEN); - if (mtu < ETHER_MIN_MTU || frame_size > VIRTIO_MAX_RX_PKTLEN) { - PMD_INIT_LOG(ERR, "MTU should be between %d and %d\n", - ETHER_MIN_MTU, VIRTIO_MAX_RX_PKTLEN - ether_hdr_len); + if (mtu < ETHER_MIN_MTU || frame_size > max_frame_size) { + PMD_INIT_LOG(ERR, "MTU should be between %d and %d", + ETHER_MIN_MTU, max_frame_size - ether_hdr_len); return -EINVAL; } return 0; } +static int +virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; + struct virtqueue *vq = rxvq->vq; + + virtqueue_enable_intr(vq); + return 0; +} + +static int +virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) +{ + struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id]; + struct virtqueue *vq = rxvq->vq; + + virtqueue_disable_intr(vq); + return 0; +} + /* * dev_ops for virtio, bare necessities for basic operation */ @@ -738,7 +780,10 @@ static const struct eth_dev_ops virtio_eth_dev_ops = { .xstats_reset = virtio_dev_stats_reset, .link_update = virtio_dev_link_update, .rx_queue_setup = virtio_dev_rx_queue_setup, + .rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable, + .rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable, .rx_queue_release = virtio_dev_queue_release, + .rx_descriptor_done = virtio_dev_rx_queue_done, .tx_queue_setup = virtio_dev_tx_queue_setup, .tx_queue_release = virtio_dev_queue_release, /* collect stats per queue */ @@ -1122,6 +1167,18 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features) PMD_INIT_LOG(DEBUG, "host_features before negotiate = %" PRIx64, host_features); + /* If supported, ensure MTU value is valid before acknowledging it. */ + if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) { + struct virtio_net_config config; + + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, mtu), + &config.mtu, sizeof(config.mtu)); + + if (config.mtu < ETHER_MIN_MTU) + req_features &= ~(1ULL << VIRTIO_NET_F_MTU); + } + /* * Negotiate features: Subset of device feature bits are written back * guest feature bits. @@ -1154,9 +1211,8 @@ virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features) * Process Virtio Config changed interrupt and call the callback * if link state changed. */ -static void -virtio_interrupt_handler(struct rte_intr_handle *handle, - void *param) +void +virtio_interrupt_handler(void *param) { struct rte_eth_dev *dev = param; struct virtio_hw *hw = dev->data->dev_private; @@ -1166,7 +1222,7 @@ virtio_interrupt_handler(struct rte_intr_handle *handle, isr = vtpci_isr(hw); PMD_DRV_LOG(INFO, "interrupt status = %#x", isr); - if (rte_intr_enable(handle) < 0) + if (rte_intr_enable(dev->intr_handle) < 0) PMD_DRV_LOG(ERR, "interrupt enable failed"); if (isr & VIRTIO_PCI_ISR_CONFIG) { @@ -1187,6 +1243,95 @@ rx_func_get(struct rte_eth_dev *eth_dev) eth_dev->rx_pkt_burst = &virtio_recv_pkts; } +/* Only support 1:1 queue/interrupt mapping so far. + * TODO: support n:1 queue/interrupt mapping when there are limited number of + * interrupt vectors (data->dev_private; + + PMD_INIT_LOG(INFO, "queue/interrupt binding"); + for (i = 0; i < dev->data->nb_rx_queues; ++i) { + dev->intr_handle->intr_vec[i] = i + 1; + if (VTPCI_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) == + VIRTIO_MSI_NO_VECTOR) { + PMD_DRV_LOG(ERR, "failed to set queue vector"); + return -EBUSY; + } + } + + return 0; +} + +static void +virtio_queues_unbind_intr(struct rte_eth_dev *dev) +{ + uint32_t i; + struct virtio_hw *hw = dev->data->dev_private; + + PMD_INIT_LOG(INFO, "queue/interrupt unbinding"); + for (i = 0; i < dev->data->nb_rx_queues; ++i) + VTPCI_OPS(hw)->set_queue_irq(hw, + hw->vqs[i * VTNET_CQ], + VIRTIO_MSI_NO_VECTOR); +} + +static int +virtio_configure_intr(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + + if (!rte_intr_cap_multiple(dev->intr_handle)) { + PMD_INIT_LOG(ERR, "Multiple intr vector not supported"); + return -ENOTSUP; + } + + if (rte_intr_efd_enable(dev->intr_handle, dev->data->nb_rx_queues)) { + PMD_INIT_LOG(ERR, "Fail to create eventfd"); + return -1; + } + + if (!dev->intr_handle->intr_vec) { + dev->intr_handle->intr_vec = + rte_zmalloc("intr_vec", + hw->max_queue_pairs * sizeof(int), 0); + if (!dev->intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %u rxq vectors", + hw->max_queue_pairs); + return -ENOMEM; + } + } + + /* Re-register callback to update max_intr */ + rte_intr_callback_unregister(dev->intr_handle, + virtio_interrupt_handler, + dev); + rte_intr_callback_register(dev->intr_handle, + virtio_interrupt_handler, + dev); + + /* DO NOT try to remove this! This function will enable msix, or QEMU + * will encounter SIGSEGV when DRIVER_OK is sent. + * And for legacy devices, this should be done before queue/vec binding + * to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR + * (22) will be ignored. + */ + if (rte_intr_enable(dev->intr_handle) < 0) { + PMD_DRV_LOG(ERR, "interrupt enable failed"); + return -1; + } + + if (virtio_queues_bind_intr(dev) < 0) { + PMD_INIT_LOG(ERR, "Failed to bind queue/interrupt"); + return -1; + } + + return 0; +} + /* reset device and renegotiate features if needed */ static int virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) @@ -1208,18 +1353,18 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) if (virtio_negotiate_features(hw, req_features) < 0) return -1; - /* If host does not support status then disable LSC */ - if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) - eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; - else - eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; - - if (eth_dev->device) { + if (!hw->virtio_user_dev) { pci_dev = RTE_DEV_TO_PCI(eth_dev->device); rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; } + eth_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + /* If host does not support both status and MSI-X then disable LSC */ + if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix) + eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + else + eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC; + rx_func_get(eth_dev); /* Setting up rx_header size for the device */ @@ -1268,6 +1413,32 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) hw->max_queue_pairs = config->max_virtqueue_pairs; + if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) { + vtpci_read_dev_config(hw, + offsetof(struct virtio_net_config, mtu), + &config->mtu, + sizeof(config->mtu)); + + /* + * MTU value has already been checked at negotiation + * time, but check again in case it has changed since + * then, which should not happen. + */ + if (config->mtu < ETHER_MIN_MTU) { + PMD_INIT_LOG(ERR, "invalid max MTU value (%u)", + config->mtu); + return -1; + } + + hw->max_mtu = config->mtu; + /* Set initial MTU to maximum one supported by vhost */ + eth_dev->data->mtu = config->mtu; + + } else { + hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN - + VLAN_TAG_LEN - hw->vtnet_hdr_size; + } + PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=%d", config->max_virtqueue_pairs); PMD_INIT_LOG(DEBUG, "config->status=%d", config->status); @@ -1284,6 +1455,14 @@ virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features) ret = virtio_alloc_queues(eth_dev); if (ret < 0) return ret; + + if (eth_dev->data->dev_conf.intr_conf.rxq) { + if (virtio_configure_intr(eth_dev) < 0) { + PMD_INIT_LOG(ERR, "failed to configure interrupt"); + return -1; + } + } + vtpci_reinit_complete(hw); if (pci_dev) @@ -1305,7 +1484,7 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw) if (hw->modern) { /* * We don't have to re-parse the PCI config space, since - * rte_eal_pci_map_device() makes sure the mapped address + * rte_pci_map_device() makes sure the mapped address * in secondary process would equal to the one mapped in * the primary process: error will be returned if that * requirement is not met. @@ -1314,12 +1493,12 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw) * (such as dev_cfg, common_cfg, etc.) parsed from the * primary process, which is stored in shared memory. */ - if (rte_eal_pci_map_device(pci_dev)) { + if (rte_pci_map_device(pci_dev)) { PMD_INIT_LOG(DEBUG, "failed to map pci device!"); return -1; } } else { - if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) + if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0) return -1; } @@ -1329,9 +1508,12 @@ virtio_remap_pci(struct rte_pci_device *pci_dev, struct virtio_hw *hw) static void virtio_set_vtpci_ops(struct virtio_hw *hw) { +#ifdef RTE_VIRTIO_USER if (hw->virtio_user_dev) VTPCI_OPS(hw) = &virtio_user_ops; - else if (hw->modern) + else +#endif + if (hw->modern) VTPCI_OPS(hw) = &modern_ops; else VTPCI_OPS(hw) = &legacy_ops; @@ -1345,7 +1527,6 @@ int eth_virtio_dev_init(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; - uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE; int ret; RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf)); @@ -1380,19 +1561,16 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) return -ENOMEM; } + hw->port_id = eth_dev->data->port_id; /* For virtio_user case the hw->virtio_user_dev is populated by * virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called. */ if (!hw->virtio_user_dev) { - ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw, - &dev_flags); + ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw); if (ret) return ret; } - hw->port_id = eth_dev->data->port_id; - eth_dev->data->dev_flags = dev_flags; - /* reset device and negotiate default features */ ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES); if (ret < 0) @@ -1430,26 +1608,33 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) virtio_interrupt_handler, eth_dev); if (eth_dev->device) - rte_eal_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device)); + rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device)); PMD_INIT_LOG(DEBUG, "dev_uninit completed"); return 0; } -static struct eth_driver rte_virtio_pmd = { - .pci_drv = { - .driver = { - .name = "net_virtio", - }, - .id_table = pci_id_virtio_map, - .drv_flags = 0, - .probe = rte_eth_dev_pci_probe, - .remove = rte_eth_dev_pci_remove, +static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw), + eth_virtio_dev_init); +} + +static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit); +} + +static struct rte_pci_driver rte_virtio_pmd = { + .driver = { + .name = "net_virtio", }, - .eth_dev_init = eth_virtio_dev_init, - .eth_dev_uninit = eth_virtio_dev_uninit, - .dev_private_size = sizeof(struct virtio_hw), + .id_table = pci_id_virtio_map, + .drv_flags = 0, + .probe = eth_virtio_pci_probe, + .remove = eth_virtio_pci_remove, }; RTE_INIT(rte_virtio_pmd_init); @@ -1461,7 +1646,7 @@ rte_virtio_pmd_init(void) return; } - rte_eal_pci_register(&rte_virtio_pmd.pci_drv); + rte_pci_register(&rte_virtio_pmd); } /* @@ -1521,7 +1706,9 @@ virtio_dev_configure(struct rte_eth_dev *dev) } if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) - if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) { + /* Enable vector (0) for Link State Intrerrupt */ + if (VTPCI_OPS(hw)->set_config_irq(hw, 0) == + VIRTIO_MSI_NO_VECTOR) { PMD_DRV_LOG(ERR, "failed to set config vector"); return -EBUSY; } @@ -1544,6 +1731,15 @@ virtio_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, "link status not supported by host"); return -ENOTSUP; } + } + + /* Enable uio/vfio intr/eventfd mapping: althrough we already did that + * in device configure, but it could be unmapped when device is + * stopped. + */ + if (dev->data->dev_conf.intr_conf.lsc || + dev->data->dev_conf.intr_conf.rxq) { + rte_intr_disable(dev->intr_handle); if (rte_intr_enable(dev->intr_handle) < 0) { PMD_DRV_LOG(ERR, "interrupt enable failed"); @@ -1551,9 +1747,6 @@ virtio_dev_start(struct rte_eth_dev *dev) } } - /* Initialize Link state */ - virtio_dev_link_update(dev, 0); - /*Notify the backend *Otherwise the tap backend might already stop its queue due to fullness. *vhost backend will have no chance to be waked up @@ -1583,6 +1776,11 @@ virtio_dev_start(struct rte_eth_dev *dev) VIRTQUEUE_DUMP(txvq->vq); } + hw->started = 1; + + /* Initialize Link state */ + virtio_dev_link_update(dev, 0); + return 0; } @@ -1637,13 +1835,16 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) static void virtio_dev_stop(struct rte_eth_dev *dev) { + struct virtio_hw *hw = dev->data->dev_private; struct rte_eth_link link; + struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; PMD_INIT_LOG(DEBUG, "stop"); - if (dev->data->dev_conf.intr_conf.lsc) + if (intr_conf->lsc || intr_conf->rxq) rte_intr_disable(dev->intr_handle); + hw->started = 0; memset(&link, 0, sizeof(link)); virtio_dev_atomic_write_link_status(dev, &link); } @@ -1660,7 +1861,9 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet link.link_duplex = ETH_LINK_FULL_DUPLEX; link.link_speed = SPEED_10G; - if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { + if (hw->started == 0) { + link.link_status = ETH_LINK_DOWN; + } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) { PMD_INIT_LOG(DEBUG, "Get link status from hw"); vtpci_read_dev_config(hw, offsetof(struct virtio_net_config, status), @@ -1685,9 +1888,11 @@ virtio_dev_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complet static void virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - uint64_t tso_mask; + uint64_t tso_mask, host_features; struct virtio_hw *hw = dev->data->dev_private; + dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */ + dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL; dev_info->max_rx_queues = RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES); @@ -1699,18 +1904,25 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->default_txconf = (struct rte_eth_txconf) { .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS }; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_LRO; - dev_info->tx_offload_capa = 0; + host_features = VTPCI_OPS(hw)->get_features(hw); + dev_info->rx_offload_capa = 0; + if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) { + dev_info->rx_offload_capa |= + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM; + } + tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) | + (1ULL << VIRTIO_NET_F_GUEST_TSO6); + if ((host_features & tso_mask) == tso_mask) + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO; + + dev_info->tx_offload_capa = 0; if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) { dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; } - tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) | (1ULL << VIRTIO_NET_F_HOST_TSO6); if ((hw->guest_features & tso_mask) == tso_mask)