#include <rte_common.h>
#include <rte_errno.h>
#include <rte_cpuflags.h>
+#include <rte_vect.h>
#include <rte_memory.h>
#include <rte_eal.h>
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
static int virtio_dev_configure(struct rte_eth_dev *dev);
static int virtio_dev_start(struct rte_eth_dev *dev);
-static void virtio_dev_stop(struct rte_eth_dev *dev);
+static int virtio_dev_stop(struct rte_eth_dev *dev);
static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
uint8_t stat_idx,
uint8_t is_rx);
-int virtio_logtype_init;
-int virtio_logtype_driver;
-
static void virtio_notify_peers(struct rte_eth_dev *dev);
static void virtio_ack_link_announce(struct rte_eth_dev *dev);
virtio_wmb(vq->hw->weak_barriers);
virtqueue_notify(vq);
- /* wait for used descriptors in virtqueue */
+ /* wait for used desc in virtqueue
+ * desc_is_used has a load-acquire or rte_io_rmb inside
+ */
while (!desc_is_used(&desc[head], vq))
usleep(100);
- virtio_rmb(vq->hw->weak_barriers);
-
/* now get used descriptors */
vq->vq_free_cnt += nb_descs;
vq->vq_used_cons_idx += nb_descs;
txr = hdr_mz->addr;
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
- struct vring_desc *start_dp = txr[i].tx_indir;
-
/* first indirect descriptor is always the tx header */
if (!vtpci_packed_queue(hw)) {
+ struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
start_dp->addr = txvq->virtio_net_hdr_mem
tx_hdr);
start_dp->len = hw->vtnet_hdr_size;
start_dp->flags = VRING_DESC_F_NEXT;
+ } else {
+ struct vring_packed_desc *start_dp =
+ txr[i].tx_packed_indir;
+ vring_desc_init_indirect_packed(start_dp,
+ RTE_DIM(txr[i].tx_packed_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
}
}
}
static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
-static void
+static int
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
if (!hw->opened)
- return;
+ return 0;
hw->opened = false;
/* reset the NIC */
if (!hw->modern)
rte_pci_ioport_unmap(VTPCI_IO(hw));
}
+
+ return 0;
}
static int
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
.rx_queue_release = virtio_dev_queue_release,
- .rx_descriptor_done = virtio_dev_rx_queue_done,
.tx_queue_setup = virtio_dev_tx_queue_setup,
.tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
hw->guest_features);
- if (hw->modern) {
- if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
- PMD_INIT_LOG(ERR,
- "VIRTIO_F_VERSION_1 features is not enabled.");
- return -1;
- }
+ if (hw->modern && !vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ PMD_INIT_LOG(ERR,
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+
+ if (hw->modern || hw->virtio_user_dev) {
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
PMD_INIT_LOG(ERR,
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
- _rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,
return 0;
}
-#define SPEED_UNKNOWN 0xffffffff
#define DUPLEX_UNKNOWN 0xff
/* reset device and renegotiate features if needed */
static int
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
- if (hw->speed == SPEED_UNKNOWN) {
+ if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
config = &local_config;
vtpci_read_dev_config(hw,
hw->duplex = config->duplex;
}
}
- if (hw->speed == SPEED_UNKNOWN)
- hw->speed = ETH_SPEED_NUM_10G;
if (hw->duplex == DUPLEX_UNKNOWN)
hw->duplex = ETH_LINK_FULL_DUPLEX;
PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- uint32_t speed = SPEED_UNKNOWN;
+ uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
int vectorized = 0;
int ret;
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
+ eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
if (ret < 0)
return ret;
hw->speed = speed;
- /*
- * Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("virtio",
if (!vtpci_packed_queue(hw)) {
hw->use_vec_rx = 1;
} else {
-#if !defined(CC_AVX512_SUPPORT)
+#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
+ hw->use_vec_rx = 1;
+ hw->use_vec_tx = 1;
+#else
PMD_DRV_LOG(INFO,
"building environment do not support packed ring vectorized");
-#else
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) {
- hw->use_vec_rx = 1;
- hw->use_vec_tx = 1;
- }
#endif
}
}
static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ int ret;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
- virtio_dev_stop(eth_dev);
+ ret = virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
- eth_dev->dev_ops = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->rx_pkt_burst = NULL;
-
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
- return 0;
+ return ret;
}
return ETH_LINK_SPEED_56G;
case ETH_SPEED_NUM_100G:
return ETH_LINK_SPEED_100G;
+ case ETH_SPEED_NUM_200G:
+ return ETH_LINK_SPEED_200G;
default:
return 0;
}
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
!vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
- !vtpci_with_feature(hw, VIRTIO_F_VERSION_1))) {
+ !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
+ PMD_DRV_LOG(INFO,
+ "disabled packed ring vectorized path for requirements not met");
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
+ }
+#elif defined(RTE_ARCH_ARM)
+ if ((hw->use_vec_rx || hw->use_vec_tx) &&
+ (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
+ !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
hw->use_vec_rx = 0;
}
if (hw->use_vec_rx) {
-#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
+#if defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
PMD_DRV_LOG(INFO,
"disabled split ring vectorized path for requirement not met");
"disabled split ring vectorized rx for offloading enabled");
hw->use_vec_rx = 0;
}
+
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
+ PMD_DRV_LOG(INFO,
+ "disabled split ring vectorized rx, max SIMD bitwidth too low");
+ hw->use_vec_rx = 0;
+ }
}
}
/*
* Stop device: disable interrupt and mark link down
*/
-static void
+static int
virtio_dev_stop(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
+ dev->data->dev_started = 0;
rte_spinlock_lock(&hw->state_lock);
if (!hw->started)
rte_eth_linkstatus_set(dev, &link);
out_unlock:
rte_spinlock_unlock(&hw->state_lock);
+
+ return 0;
}
static int
if (!hw->started) {
link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
&status, sizeof(status));
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
PMD_INIT_LOG(DEBUG, "Port %d is down",
dev->data->port_id);
} else {
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
-
-RTE_INIT(virtio_init_log)
-{
- virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
- if (virtio_logtype_init >= 0)
- rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
- virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
- if (virtio_logtype_driver >= 0)
- rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(virtio_logtype_init, pmd.net.virtio.init, NOTICE);
+RTE_LOG_REGISTER(virtio_logtype_driver, pmd.net.virtio.driver, NOTICE);