#include "virtio_logs.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
+#include "virtio_user/virtio_user_dev.h"
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
static int virtio_dev_configure(struct rte_eth_dev *dev);
static int virtio_dev_start(struct rte_eth_dev *dev);
static void virtio_dev_stop(struct rte_eth_dev *dev);
-static void virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
-static void virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
-static void virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
-static void virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
-static void virtio_dev_info_get(struct rte_eth_dev *dev,
+static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
+static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
+ int *vdpa,
+ uint32_t *speed,
+ int *vectorized);
+static int virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
int wait_to_complete);
static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev,
struct rte_eth_xstat_name *xstats_names,
unsigned limit);
-static void virtio_dev_stats_reset(struct rte_eth_dev *dev);
+static int virtio_dev_stats_reset(struct rte_eth_dev *dev);
static void virtio_dev_free_mbufs(struct rte_eth_dev *dev);
static int virtio_vlan_filter_set(struct rte_eth_dev *dev,
uint16_t vlan_id, int on);
static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = {
{"good_packets", offsetof(struct virtnet_tx, stats.packets)},
{"good_bytes", offsetof(struct virtnet_tx, stats.bytes)},
- {"errors", offsetof(struct virtnet_tx, stats.errors)},
{"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)},
{"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)},
{"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])},
return -EINVAL;
}
- if (!rte_is_power_of_2(vq_size)) {
- PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
+ if (!vtpci_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
+ PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
return -EINVAL;
}
hw->cvq = cvq;
}
- /* For virtio_user case (that is when hw->dev is NULL), we use
- * virtual address. And we need properly set _offset_, please see
+ /* For virtio_user case (that is when hw->virtio_user_dev is not NULL),
+ * we use virtual address. And we need properly set _offset_, please see
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
if (!hw->virtio_user_dev)
vtpci_reset(hw);
virtio_dev_free_mbufs(dev);
virtio_free_queues(hw);
+
+#ifdef RTE_VIRTIO_USER
+ if (hw->virtio_user_dev)
+ virtio_user_dev_uninit(hw->virtio_user_dev);
+ else
+#endif
+ if (dev->device) {
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(dev));
+ if (!hw->modern)
+ rte_pci_ioport_unmap(VTPCI_IO(hw));
+ }
}
-static void
+static int
virtio_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
- return;
+ return -ENOTSUP;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
- if (ret)
+ if (ret) {
PMD_INIT_LOG(ERR, "Failed to enable promisc");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
virtio_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
- return;
+ return -ENOTSUP;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
- if (ret)
+ if (ret) {
PMD_INIT_LOG(ERR, "Failed to disable promisc");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
virtio_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
- return;
+ return -ENOTSUP;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
- if (ret)
+ if (ret) {
PMD_INIT_LOG(ERR, "Failed to enable allmulticast");
+ return -EAGAIN;
+ }
+
+ return 0;
}
-static void
+static int
virtio_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_RX)) {
PMD_INIT_LOG(INFO, "host does not support rx control");
- return;
+ return -ENOTSUP;
}
ctrl.hdr.class = VIRTIO_NET_CTRL_RX;
dlen[0] = 1;
ret = virtio_send_command(hw->cvq, &ctrl, dlen, 1);
- if (ret)
+ if (ret) {
PMD_INIT_LOG(ERR, "Failed to disable allmulticast");
+ return -EAGAIN;
+ }
+
+ return 0;
}
#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
stats->opackets += txvq->stats.packets;
stats->obytes += txvq->stats.bytes;
- stats->oerrors += txvq->stats.errors;
if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
stats->q_opackets[i] = txvq->stats.packets;
return 0;
}
-static void
+static int
virtio_dev_stats_reset(struct rte_eth_dev *dev)
{
unsigned int i;
txvq->stats.packets = 0;
txvq->stats.bytes = 0;
- txvq->stats.errors = 0;
txvq->stats.multicast = 0;
txvq->stats.broadcast = 0;
memset(txvq->stats.size_bins, 0,
memset(rxvq->stats.size_bins, 0,
sizeof(rxvq->stats.size_bins[0]) * 8);
}
+
+ return 0;
}
static void
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
}
+static int
+virtio_intr_unmask(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ if (rte_intr_ack(dev->intr_handle) < 0)
+ return -1;
+
+ if (!hw->virtio_user_dev)
+ hw->use_msix = vtpci_msix_detect(RTE_ETH_DEV_TO_PCI(dev));
+
+ return 0;
+}
+
static int
virtio_intr_enable(struct rte_eth_dev *dev)
{
isr = vtpci_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
- if (virtio_intr_enable(dev) < 0)
+ if (virtio_intr_unmask(dev) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (isr & VIRTIO_PCI_ISR_CONFIG) {
{
struct virtio_hw *hw = eth_dev->data->dev_private;
+ eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
if (vtpci_packed_queue(hw)) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring %s Tx path on port %u",
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
}
} else {
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ if (hw->use_vec_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
} else if (hw->use_inorder_rx) {
return 0;
}
-
+#define SPEED_UNKNOWN 0xffffffff
+#define DUPLEX_UNKNOWN 0xff
/* reset device and renegotiate features if needed */
static int
virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
- if (!hw->virtio_user_dev) {
+ if (!hw->virtio_user_dev)
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
- rte_eth_copy_pci_info(eth_dev, pci_dev);
- }
/* If host does not support both status and MSI-X then disable LSC */
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) &&
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+ if (hw->speed == SPEED_UNKNOWN) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
+ config = &local_config;
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, speed),
+ &config->speed, sizeof(config->speed));
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, duplex),
+ &config->duplex, sizeof(config->duplex));
+ hw->speed = config->speed;
+ hw->duplex = config->duplex;
+ }
+ }
+ if (hw->speed == SPEED_UNKNOWN)
+ hw->speed = ETH_SPEED_NUM_10G;
+ if (hw->duplex == DUPLEX_UNKNOWN)
+ hw->duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
+ hw->speed, hw->duplex);
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
+ uint32_t speed = SPEED_UNKNOWN;
+ int vectorized = 0;
int ret;
- RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
+ if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
+ PMD_INIT_LOG(ERR,
+ "Not sufficient headroom required = %d, avail = %d",
+ (int)sizeof(struct virtio_net_hdr_mrg_rxbuf),
+ RTE_PKTMBUF_HEADROOM);
+
+ return -1;
+ }
eth_dev->dev_ops = &virtio_eth_dev_ops;
return 0;
}
+ ret = virtio_dev_devargs_parse(eth_dev->device->devargs,
+ NULL, &speed, &vectorized);
+ if (ret < 0)
+ return ret;
+ hw->speed = speed;
+ /*
+ * Pass the information to the rte_eth_dev_close() that it should also
+ * release the private port resources.
+ */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("virtio",
if (!hw->virtio_user_dev) {
ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
- goto out;
+ goto err_vtpci_init;
}
+ rte_spinlock_init(&hw->state_lock);
+
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
- goto out;
+ goto err_virtio_init;
+
+ if (vectorized) {
+ if (!vtpci_packed_queue(hw))
+ hw->use_vec_rx = 1;
+ }
+
+ hw->opened = true;
return 0;
-out:
+err_virtio_init:
+ if (!hw->virtio_user_dev) {
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
+ if (!hw->modern)
+ rte_pci_ioport_unmap(VTPCI_IO(hw));
+ }
+err_vtpci_init:
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
return ret;
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;
- if (eth_dev->device)
- rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
-
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
return 0;
}
+
static int vdpa_check_handler(__rte_unused const char *key,
- const char *value, __rte_unused void *opaque)
+ const char *value, void *ret_val)
{
- if (strcmp(value, "1"))
- return -1;
+ if (strcmp(value, "1") == 0)
+ *(int *)ret_val = 1;
+ else
+ *(int *)ret_val = 0;
+
+ return 0;
+}
+
+
+static uint32_t
+virtio_dev_speed_capa_get(uint32_t speed)
+{
+ switch (speed) {
+ case ETH_SPEED_NUM_10G:
+ return ETH_LINK_SPEED_10G;
+ case ETH_SPEED_NUM_20G:
+ return ETH_LINK_SPEED_20G;
+ case ETH_SPEED_NUM_25G:
+ return ETH_LINK_SPEED_25G;
+ case ETH_SPEED_NUM_40G:
+ return ETH_LINK_SPEED_40G;
+ case ETH_SPEED_NUM_50G:
+ return ETH_LINK_SPEED_50G;
+ case ETH_SPEED_NUM_56G:
+ return ETH_LINK_SPEED_56G;
+ case ETH_SPEED_NUM_100G:
+ return ETH_LINK_SPEED_100G;
+ default:
+ return 0;
+ }
+}
+
+static int vectorized_check_handler(__rte_unused const char *key,
+ const char *value, void *ret_val)
+{
+ if (strcmp(value, "1") == 0)
+ *(int *)ret_val = 1;
+ else
+ *(int *)ret_val = 0;
+
+ return 0;
+}
+
+#define VIRTIO_ARG_SPEED "speed"
+#define VIRTIO_ARG_VDPA "vdpa"
+#define VIRTIO_ARG_VECTORIZED "vectorized"
+
+
+static int
+link_speed_handler(const char *key __rte_unused,
+ const char *value, void *ret_val)
+{
+ uint32_t val;
+ if (!value || !ret_val)
+ return -EINVAL;
+ val = strtoul(value, NULL, 0);
+ /* validate input */
+ if (virtio_dev_speed_capa_get(val) == 0)
+ return -EINVAL;
+ *(uint32_t *)ret_val = val;
return 0;
}
+
static int
-vdpa_mode_selected(struct rte_devargs *devargs)
+virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
+ uint32_t *speed, int *vectorized)
{
struct rte_kvargs *kvlist;
- const char *key = "vdpa";
int ret = 0;
if (devargs == NULL)
return 0;
kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL)
+ if (kvlist == NULL) {
+ PMD_INIT_LOG(ERR, "error when parsing param");
return 0;
+ }
+ if (vdpa && rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) {
+ /* vdpa mode selected when there's a key-value pair:
+ * vdpa=1
+ */
+ ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA,
+ vdpa_check_handler, vdpa);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_VDPA);
+ goto exit;
+ }
+ }
+ if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ VIRTIO_ARG_SPEED,
+ link_speed_handler, speed);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_SPEED);
+ goto exit;
+ }
+ }
- if (!rte_kvargs_count(kvlist, key))
- goto exit;
-
- /* vdpa mode selected when there's a key-value pair: vdpa=1 */
- if (rte_kvargs_process(kvlist, key,
- vdpa_check_handler, NULL) < 0) {
- goto exit;
+ if (vectorized &&
+ rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ VIRTIO_ARG_VECTORIZED,
+ vectorized_check_handler, vectorized);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_VECTORIZED);
+ goto exit;
+ }
}
- ret = 1;
exit:
rte_kvargs_free(kvlist);
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
- if (rte_eal_iopl_init() != 0) {
- PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
- return 1;
- }
+ int vdpa = 0;
+ int ret = 0;
+ ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL,
+ NULL);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "devargs parsing is failed");
+ return ret;
+ }
/* virtio pmd skips probe if device needs to work in vdpa mode */
- if (vdpa_mode_selected(pci_dev->device.devargs))
+ if (vdpa == 1)
return 1;
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
static int eth_virtio_pci_remove(struct rte_pci_device *pci_dev)
{
- return rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+ int ret;
+
+ ret = rte_eth_dev_pci_generic_remove(pci_dev, eth_virtio_dev_uninit);
+ /* Port has already been released by close. */
+ if (ret == -ENODEV)
+ ret = 0;
+ return ret;
}
static struct rte_pci_driver rte_virtio_pmd = {
PMD_INIT_LOG(DEBUG, "configure");
req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
+ if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported Rx multi queue mode %d",
+ rxmode->mq_mode);
+ return -EINVAL;
+ }
+
+ if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported Tx multi queue mode %d",
+ txmode->mq_mode);
+ return -EINVAL;
+ }
+
if (dev->data->dev_conf.intr_conf.rxq) {
ret = virtio_init_device(dev, hw->req_guest_features);
if (ret < 0)
return -EBUSY;
}
- rte_spinlock_init(&hw->state_lock);
-
- hw->use_simple_rx = 1;
-
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
hw->use_inorder_rx = 1;
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
}
if (vtpci_packed_queue(hw)) {
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
hw->use_inorder_rx = 0;
}
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_simple_rx = 0;
+ hw->use_vec_rx = 0;
}
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_VLAN_STRIP))
- hw->use_simple_rx = 0;
-
- hw->opened = true;
+ hw->use_vec_rx = 0;
return 0;
}
struct virtio_hw *hw = dev->data->dev_private;
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = hw->duplex;
+ link.link_speed = hw->speed;
+ link.link_autoneg = ETH_LINK_AUTONEG;
if (!hw->started) {
link.link_status = ETH_LINK_DOWN;
return 0;
}
-static void
+static int
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
uint64_t tso_mask, host_features;
struct virtio_hw *hw = dev->data->dev_private;
-
- dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+ dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((host_features & tso_mask) == tso_mask)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+
+ return 0;
}
/*