#include <rte_common.h>
#include <rte_errno.h>
#include <rte_cpuflags.h>
+#include <rte_vect.h>
#include <rte_memory.h>
#include <rte_eal.h>
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
static int virtio_dev_configure(struct rte_eth_dev *dev);
static int virtio_dev_start(struct rte_eth_dev *dev);
-static void virtio_dev_stop(struct rte_eth_dev *dev);
+static int virtio_dev_stop(struct rte_eth_dev *dev);
static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
virtio_wmb(vq->hw->weak_barriers);
virtqueue_notify(vq);
- /* wait for used descriptors in virtqueue */
+ /* wait for used desc in virtqueue
+ * desc_is_used has a load-acquire or rte_io_rmb inside
+ */
while (!desc_is_used(&desc[head], vq))
usleep(100);
- virtio_rmb(vq->hw->weak_barriers);
-
/* now get used descriptors */
vq->vq_free_cnt += nb_descs;
vq->vq_used_cons_idx += nb_descs;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
if (!vtpci_packed_queue(hw)) {
hw->use_vec_rx = 1;
} else {
-#if !defined(CC_AVX512_SUPPORT)
- PMD_DRV_LOG(INFO,
- "building environment do not support packed ring vectorized");
-#else
+#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
hw->use_vec_rx = 1;
hw->use_vec_tx = 1;
+#else
+ PMD_DRV_LOG(INFO,
+ "building environment do not support packed ring vectorized");
#endif
}
}
static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ int ret;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
- virtio_dev_stop(eth_dev);
+ ret = virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
- eth_dev->dev_ops = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->rx_pkt_burst = NULL;
-
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
- return 0;
+ return ret;
}
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
!vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
- !vtpci_with_feature(hw, VIRTIO_F_VERSION_1))) {
+ !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
+ PMD_DRV_LOG(INFO,
+ "disabled packed ring vectorized path for requirements not met");
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
+ }
+#elif defined(RTE_ARCH_ARM)
+ if ((hw->use_vec_rx || hw->use_vec_tx) &&
+ (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
+ !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
hw->use_vec_rx = 0;
"disabled split ring vectorized rx for offloading enabled");
hw->use_vec_rx = 0;
}
+
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
+ PMD_DRV_LOG(INFO,
+ "disabled split ring vectorized rx, max SIMD bitwidth too low");
+ hw->use_vec_rx = 0;
+ }
}
}
/*
* Stop device: disable interrupt and mark link down
*/
-static void
+static int
virtio_dev_stop(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
rte_eth_linkstatus_set(dev, &link);
out_unlock:
rte_spinlock_unlock(&hw->state_lock);
+
+ return 0;
}
static int