-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
*/
#include <stdint.h>
#include <errno.h>
#include <unistd.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
+#include <rte_ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_ether.h>
+#include <rte_ip.h>
+#include <rte_arp.h>
#include <rte_common.h>
#include <rte_errno.h>
+#include <rte_cpuflags.h>
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_dev.h>
+#include <rte_cycles.h>
#include "virtio_ethdev.h"
#include "virtio_pci.h"
static void virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
+static int virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
-static void virtio_dev_stats_get(struct rte_eth_dev *dev,
+static int virtio_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int virtio_dev_xstats_get(struct rte_eth_dev *dev,
struct rte_eth_xstat *xstats, unsigned n);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
-static void virtio_mac_addr_add(struct rte_eth_dev *dev,
+static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
- uint32_t index, uint32_t vmdq __rte_unused);
+ uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static void virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
+static int virtio_intr_enable(struct rte_eth_dev *dev);
+static int virtio_intr_disable(struct rte_eth_dev *dev);
+
static int virtio_dev_queue_stats_mapping_set(
- __rte_unused struct rte_eth_dev *eth_dev,
- __rte_unused uint16_t queue_id,
- __rte_unused uint8_t stat_idx,
- __rte_unused uint8_t is_rx);
+ struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
+
+int virtio_logtype_init;
+int virtio_logtype_driver;
+
+static void virtio_notify_peers(struct rte_eth_dev *dev);
+static void virtio_ack_link_announce(struct rte_eth_dev *dev);
/*
* The set of PCI devices this driver supports
uint32_t head, i;
int k, sum = 0;
virtio_net_ctrl_ack status = ~0;
- struct virtio_pmd_ctrl result;
+ struct virtio_pmd_ctrl *result;
struct virtqueue *vq;
ctrl->status = status;
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
+
+ rte_spinlock_lock(&cvq->lock);
vq = cvq->vq;
head = vq->vq_desc_head_idx;
"vq->hw->cvq = %p vq = %p",
vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
- if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1))
+ if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ rte_spinlock_unlock(&cvq->lock);
return -1;
+ }
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
vq->vq_free_cnt, vq->vq_desc_head_idx);
- memcpy(&result, cvq->virtio_net_hdr_mz->addr,
- sizeof(struct virtio_pmd_ctrl));
+ result = cvq->virtio_net_hdr_mz->addr;
- return result.status;
+ rte_spinlock_unlock(&cvq->lock);
+ return result->status;
}
static int
/* do nothing */
}
-static int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
-{
- if (vtpci_queue_idx == hw->max_queue_pairs * 2)
- return VTNET_CQ;
- else if (vtpci_queue_idx % 2 == 0)
- return VTNET_RQ;
- else
- return VTNET_TQ;
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
}
}
- memset(mz->addr, 0, sizeof(mz->len));
+ memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->phys_addr;
+ vq->vq_ring_mem = mz->iova;
vq->vq_ring_virt_mem = mz->addr;
PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->phys_addr);
+ (uint64_t)mz->iova);
PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
(uint64_t)(uintptr_t)mz->addr);
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->phys_addr;
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
hw->cvq = cvq;
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
if (!hw->virtio_user_dev)
- vq->offset = offsetof(struct rte_mbuf, buf_physaddr);
+ vq->offset = offsetof(struct rte_mbuf, buf_iova);
else {
vq->vq_ring_mem = (uintptr_t)mz->addr;
vq->offset = offsetof(struct rte_mbuf, buf_addr);
int queue_type;
uint16_t i;
+ if (hw->vqs == NULL)
+ return;
+
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
if (!vq)
}
rte_free(vq);
+ hw->vqs[i] = NULL;
}
rte_free(hw->vqs);
+ hw->vqs = NULL;
}
static int
virtio_queues_unbind_intr(dev);
if (intr_conf->lsc || intr_conf->rxq) {
- rte_intr_disable(dev->intr_handle);
+ virtio_intr_disable(dev);
rte_intr_efd_disable(dev->intr_handle);
rte_free(dev->intr_handle->intr_vec);
dev->intr_handle->intr_vec = NULL;
.stats_reset = virtio_dev_stats_reset,
.xstats_reset = virtio_dev_stats_reset,
.link_update = virtio_dev_link_update,
+ .vlan_offload_set = virtio_dev_vlan_offload_set,
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
/* Note: limit checked in rte_eth_xstats_names() */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- struct virtqueue *rxvq = dev->data->rx_queues[i];
+ struct virtnet_rx *rxvq = dev->data->rx_queues[i];
if (rxvq == NULL)
continue;
for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) {
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- struct virtqueue *txvq = dev->data->tx_queues[i];
+ struct virtnet_tx *txvq = dev->data->tx_queues[i];
if (txvq == NULL)
continue;
for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) {
return count;
}
-static void
+static int
virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
virtio_update_stats(dev, stats);
+
+ return 0;
}
static void
}
}
-static void
+static int
virtio_mac_table_set(struct virtio_hw *hw,
const struct virtio_net_ctrl_mac *uc,
const struct virtio_net_ctrl_mac *mc)
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
PMD_DRV_LOG(INFO, "host does not support mac table");
- return;
+ return -1;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_MAC;
err = virtio_send_command(hw->cvq, &ctrl, len, 2);
if (err != 0)
PMD_DRV_LOG(NOTICE, "mac table set failed: %d", err);
+ return err;
}
-static void
+static int
virtio_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
uint32_t index, uint32_t vmdq __rte_unused)
{
if (index >= VIRTIO_MAX_MAC_ADDRS) {
PMD_DRV_LOG(ERR, "mac address index %u out of range", index);
- return;
+ return -EINVAL;
}
uc = alloca(VIRTIO_MAX_MAC_ADDRS * ETHER_ADDR_LEN + sizeof(uc->entries));
memcpy(&tbl->macs[tbl->entries++], addr, ETHER_ADDR_LEN);
}
- virtio_mac_table_set(hw, uc, mc);
+ return virtio_mac_table_set(hw, uc, mc);
}
static void
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
}
+static int
+virtio_intr_enable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_enable(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
+static int
+virtio_intr_disable(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_disable(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
static int
virtio_negotiate_features(struct virtio_hw *hw, uint64_t req_features)
{
return 0;
}
+int
+virtio_dev_pause(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ rte_spinlock_lock(&hw->state_lock);
+
+ if (hw->started == 0) {
+ /* Device is just stopped. */
+ rte_spinlock_unlock(&hw->state_lock);
+ return -1;
+ }
+ hw->started = 0;
+ /*
+ * Prevent the worker threads from touching queues to avoid contention,
+ * 1 ms should be enough for the ongoing Tx function to finish.
+ */
+ rte_delay_ms(1);
+ return 0;
+}
+
+/*
+ * Recover hw state to let the worker threads continue.
+ */
+void
+virtio_dev_resume(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ hw->started = 1;
+ rte_spinlock_unlock(&hw->state_lock);
+}
+
/*
- * Process Virtio Config changed interrupt and call the callback
- * if link state changed.
+ * Should be called only after device is paused.
*/
+int
+virtio_inject_pkts(struct rte_eth_dev *dev, struct rte_mbuf **tx_pkts,
+ int nb_pkts)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_tx *txvq = dev->data->tx_queues[0];
+ int ret;
+
+ hw->inject_pkts = tx_pkts;
+ ret = dev->tx_pkt_burst(txvq, tx_pkts, nb_pkts);
+ hw->inject_pkts = NULL;
+
+ return ret;
+}
+
static void
-virtio_interrupt_handler(struct rte_intr_handle *handle,
- void *param)
+virtio_notify_peers(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq = dev->data->rx_queues[0];
+ struct rte_mbuf *rarp_mbuf;
+
+ rarp_mbuf = rte_net_make_rarp_packet(rxvq->mpool,
+ (struct ether_addr *)hw->mac_addr);
+ if (rarp_mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "failed to make RARP packet.");
+ return;
+ }
+
+ /* If virtio port just stopped, no need to send RARP */
+ if (virtio_dev_pause(dev) < 0) {
+ rte_pktmbuf_free(rarp_mbuf);
+ return;
+ }
+
+ virtio_inject_pkts(dev, &rarp_mbuf, 1);
+ virtio_dev_resume(dev);
+}
+
+static void
+virtio_ack_link_announce(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtio_pmd_ctrl ctrl;
+
+ ctrl.hdr.class = VIRTIO_NET_CTRL_ANNOUNCE;
+ ctrl.hdr.cmd = VIRTIO_NET_CTRL_ANNOUNCE_ACK;
+
+ virtio_send_command(hw->cvq, &ctrl, NULL, 0);
+}
+
+/*
+ * Process virtio config changed interrupt. Call the callback
+ * if link state changed, generate gratuitous RARP packet if
+ * the status indicates an ANNOUNCE.
+ */
+void
+virtio_interrupt_handler(void *param)
{
struct rte_eth_dev *dev = param;
struct virtio_hw *hw = dev->data->dev_private;
isr = vtpci_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
- if (rte_intr_enable(handle) < 0)
+ if (virtio_intr_enable(dev) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
}
+ if (isr & VIRTIO_NET_S_ANNOUNCE) {
+ virtio_notify_peers(dev);
+ virtio_ack_link_announce(dev);
+ }
}
+/* set rx and tx handlers according to what is supported */
static void
-rx_func_get(struct rte_eth_dev *eth_dev)
+set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
- else
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
+
+ if (hw->use_simple_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
}
/* Only support 1:1 queue/interrupt mapping so far.
* to change the config size from 20 to 24, or VIRTIO_MSI_QUEUE_VECTOR
* (22) will be ignored.
*/
- if (rte_intr_enable(dev->intr_handle) < 0) {
+ if (virtio_intr_enable(dev) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -1;
}
/* Reset the device although not necessary at startup */
vtpci_reset(hw);
+ if (hw->vqs) {
+ virtio_dev_free_mbufs(eth_dev);
+ virtio_free_queues(hw);
+ }
+
/* Tell the host we've noticed this device. */
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
- if (eth_dev->device) {
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ if (!hw->virtio_user_dev) {
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
}
/* If host does not support both status and MSI-X then disable LSC */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
+ hw->use_msix != VIRTIO_MSIX_NONE)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- rx_func_get(eth_dev);
-
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
} else {
PMD_INIT_LOG(DEBUG, "config->max_virtqueue_pairs=1");
hw->max_queue_pairs = 1;
+ hw->max_mtu = VIRTIO_MAX_RX_PKTLEN - ETHER_HDR_LEN -
+ VLAN_TAG_LEN - hw->vtnet_hdr_size;
}
ret = virtio_alloc_queues(eth_dev);
if (hw->modern) {
/*
* We don't have to re-parse the PCI config space, since
- * rte_eal_pci_map_device() makes sure the mapped address
+ * rte_pci_map_device() makes sure the mapped address
* in secondary process would equal to the one mapped in
* the primary process: error will be returned if that
* requirement is not met.
* (such as dev_cfg, common_cfg, etc.) parsed from the
* primary process, which is stored in shared memory.
*/
- if (rte_eal_pci_map_device(pci_dev)) {
+ if (rte_pci_map_device(pci_dev)) {
PMD_INIT_LOG(DEBUG, "failed to map pci device!");
return -1;
}
} else {
- if (rte_eal_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
+ if (rte_pci_ioport_map(pci_dev, 0, VTPCI_IO(hw)) < 0)
return -1;
}
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE;
int ret;
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
- hw);
+ ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
virtio_set_vtpci_ops(hw);
- if (hw->use_simple_rxtx) {
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else {
- rx_func_get(eth_dev);
- }
+ set_rxtx_funcs(eth_dev);
+
return 0;
}
* virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
*/
if (!hw->virtio_user_dev) {
- ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw,
- &dev_flags);
+ ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
- return ret;
+ goto out;
}
- eth_dev->data->dev_flags = dev_flags;
-
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
- return ret;
+ goto out;
/* Setup interrupt callback */
if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
virtio_interrupt_handler, eth_dev);
return 0;
+
+out:
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
}
static int
virtio_interrupt_handler,
eth_dev);
if (eth_dev->device)
- rte_eal_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
return 0;
}
-static struct eth_driver rte_virtio_pmd = {
- .pci_drv = {
- .driver = {
- .name = "net_virtio",
- },
- .id_table = pci_id_virtio_map,
- .drv_flags = 0,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
+static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
+ eth_virtio_dev_init);
+}
+
+static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+}
+
+static struct rte_pci_driver rte_virtio_pmd = {
+ .driver = {
+ .name = "net_virtio",
},
- .eth_dev_init = eth_virtio_dev_init,
- .eth_dev_uninit = eth_virtio_dev_uninit,
- .dev_private_size = sizeof(struct virtio_hw),
+ .id_table = pci_id_virtio_map,
+ .drv_flags = 0,
+ .probe = eth_virtio_pci_probe,
+ .remove = eth_virtio_pci_remove,
};
RTE_INIT(rte_virtio_pmd_init);
return;
}
- rte_eal_pci_register(&rte_virtio_pmd.pci_drv);
+ rte_pci_register(&rte_virtio_pmd);
}
/*
PMD_INIT_LOG(DEBUG, "configure");
req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
+
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ ret = virtio_init_device(dev, hw->req_guest_features);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* The name hw_ip_checksum is a bit confusing since it can be
+ * set by the application to request L3 and/or L4 checksums. In
+ * case of virtio, only L4 checksum is supported.
+ */
if (rxmode->hw_ip_checksum)
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
+
if (rxmode->enable_lro)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
if (rxmode->hw_ip_checksum &&
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
- PMD_DRV_LOG(NOTICE,
- "rx ip checksum not available on this host");
+ PMD_DRV_LOG(ERR,
+ "rx checksum not available on this host");
return -ENOTSUP;
}
if (rxmode->enable_lro &&
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
- PMD_DRV_LOG(NOTICE,
- "lro not available on this host");
+ !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
+ PMD_DRV_LOG(ERR,
+ "Large Receive Offload not available on this host");
return -ENOTSUP;
}
if (rxmode->hw_vlan_filter
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
return -ENOTSUP;
}
return -EBUSY;
}
+ rte_spinlock_init(&hw->state_lock);
+
+ hw->use_simple_rx = 1;
+ hw->use_simple_tx = 1;
+
+#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+#endif
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+
+ if (rxmode->hw_ip_checksum)
+ hw->use_simple_rx = 0;
+
return 0;
}
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq __rte_unused;
struct virtio_hw *hw = dev->data->dev_private;
+ int ret;
+
+ /* Finish the initialization of the queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ret = virtio_dev_rx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ret = virtio_dev_tx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc) {
*/
if (dev->data->dev_conf.intr_conf.lsc ||
dev->data->dev_conf.intr_conf.rxq) {
- rte_intr_disable(dev->intr_handle);
+ virtio_intr_disable(dev);
- if (rte_intr_enable(dev->intr_handle) < 0) {
+ if (virtio_intr_enable(dev) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
}
}
- /* Initialize Link state */
- virtio_dev_link_update(dev, 0);
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxvq = dev->data->rx_queues[i];
+ /* Flush the old packets */
+ virtqueue_rxvq_flush(rxvq->vq);
virtqueue_notify(rxvq->vq);
}
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_notify(txvq->vq);
+ }
+
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
VIRTQUEUE_DUMP(txvq->vq);
}
+ set_rxtx_funcs(dev);
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
return 0;
}
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct virtnet_rx *rxvq = dev->data->rx_queues[i];
+ if (rxvq == NULL || rxvq->vq == NULL)
+ continue;
+
PMD_INIT_LOG(DEBUG,
"Before freeing rxq[%d] used and unused buf", i);
VIRTQUEUE_DUMP(rxvq->vq);
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct virtnet_tx *txvq = dev->data->tx_queues[i];
+ if (txvq == NULL || txvq->vq == NULL)
+ continue;
+
PMD_INIT_LOG(DEBUG,
"Before freeing txq[%d] used and unused bufs",
i);
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
+ rte_spinlock_lock(&hw->state_lock);
if (intr_conf->lsc || intr_conf->rxq)
- rte_intr_disable(dev->intr_handle);
+ virtio_intr_disable(dev);
+ hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
+ rte_spinlock_unlock(&hw->state_lock);
}
static int
virtio_dev_atomic_read_link_status(dev, &link);
old = link;
link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = SPEED_10G;
+ link.link_speed = ETH_SPEED_NUM_10G;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
return (old.link_status == link.link_status) ? -1 : 0;
}
+static int
+virtio_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (mask & ETH_VLAN_FILTER_MASK) {
+ if (rxmode->hw_vlan_filter &&
+ !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+
+ PMD_DRV_LOG(NOTICE,
+ "vlan filtering not available on this host");
+
+ return -ENOTSUP;
+ }
+ }
+
+ if (mask & ETH_VLAN_STRIP_MASK)
+ hw->vlan_strip = rxmode->hw_vlan_strip;
+
+ return 0;
+}
+
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
+ dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
-RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio");
+RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(virtio_init_log);
+static void
+virtio_init_log(void)
+{
+ virtio_logtype_init = rte_log_register("pmd.virtio.init");
+ if (virtio_logtype_init >= 0)
+ rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
+ virtio_logtype_driver = rte_log_register("pmd.virtio.driver");
+ if (virtio_logtype_driver >= 0)
+ rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
+}