#include <rte_ether.h>
#include <rte_common.h>
#include <rte_errno.h>
+#include <rte_cpuflags.h>
#include <rte_memory.h>
#include <rte_eal.h>
static void virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
- __rte_unused int wait_to_complete);
+ int wait_to_complete);
static void virtio_set_hwaddr(struct virtio_hw *hw);
static void virtio_get_hwaddr(struct virtio_hw *hw);
uint16_t vlan_id, int on);
static int virtio_mac_addr_add(struct rte_eth_dev *dev,
struct ether_addr *mac_addr,
- uint32_t index, uint32_t vmdq __rte_unused);
+ uint32_t index, uint32_t vmdq);
static void virtio_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
static void virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
static int virtio_dev_queue_stats_mapping_set(
- __rte_unused struct rte_eth_dev *eth_dev,
- __rte_unused uint16_t queue_id,
- __rte_unused uint8_t stat_idx,
- __rte_unused uint8_t is_rx);
+ struct rte_eth_dev *eth_dev,
+ uint16_t queue_id,
+ uint8_t stat_idx,
+ uint8_t is_rx);
/*
* The set of PCI devices this driver supports
uint32_t head, i;
int k, sum = 0;
virtio_net_ctrl_ack status = ~0;
- struct virtio_pmd_ctrl result;
+ struct virtio_pmd_ctrl *result;
struct virtqueue *vq;
ctrl->status = status;
PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d",
vq->vq_free_cnt, vq->vq_desc_head_idx);
- memcpy(&result, cvq->virtio_net_hdr_mz->addr,
- sizeof(struct virtio_pmd_ctrl));
+ result = cvq->virtio_net_hdr_mz->addr;
- return result.status;
+ return result->status;
}
static int
}
}
- memset(mz->addr, 0, sizeof(mz->len));
+ memset(mz->addr, 0, mz->len);
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_virt_mem = mz->addr;
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
_rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC, NULL);
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
}
+/* set rx and tx handlers according to what is supported */
static void
-rx_func_get(struct rte_eth_dev *eth_dev)
+set_rxtx_funcs(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
- else
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
+
+ if (hw->use_simple_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
}
/* Only support 1:1 queue/interrupt mapping so far.
return -1;
if (!hw->virtio_user_dev) {
- pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
}
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- rx_func_get(eth_dev);
-
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf));
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
- ret = virtio_remap_pci(RTE_DEV_TO_PCI(eth_dev->device),
- hw);
+ ret = virtio_remap_pci(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
virtio_set_vtpci_ops(hw);
- if (hw->use_simple_rxtx) {
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_simple;
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else {
- rx_func_get(eth_dev);
- }
+ set_rxtx_funcs(eth_dev);
+
return 0;
}
* virtio_user_eth_dev_alloc() before eth_virtio_dev_init() is called.
*/
if (!hw->virtio_user_dev) {
- ret = vtpci_init(RTE_DEV_TO_PCI(eth_dev->device), hw);
+ ret = vtpci_init(RTE_ETH_DEV_TO_PCI(eth_dev), hw);
if (ret)
return ret;
}
virtio_interrupt_handler,
eth_dev);
if (eth_dev->device)
- rte_pci_unmap_device(RTE_DEV_TO_PCI(eth_dev->device));
+ rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
PMD_INIT_LOG(DEBUG, "configure");
req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
+
+ if (dev->data->dev_conf.intr_conf.rxq) {
+ ret = virtio_init_device(dev, hw->req_guest_features);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* The name hw_ip_checksum is a bit confusing since it can be
+ * set by the application to request L3 and/or L4 checksums. In
+ * case of virtio, only L4 checksum is supported.
+ */
if (rxmode->hw_ip_checksum)
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
+
if (rxmode->enable_lro)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
if (rxmode->hw_ip_checksum &&
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
- PMD_DRV_LOG(NOTICE,
- "rx ip checksum not available on this host");
+ PMD_DRV_LOG(ERR,
+ "rx checksum not available on this host");
return -ENOTSUP;
}
if (rxmode->enable_lro &&
(!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
- PMD_DRV_LOG(NOTICE,
- "lro not available on this host");
+ PMD_DRV_LOG(ERR,
+ "Large Receive Offload not available on this host");
return -ENOTSUP;
}
if (rxmode->hw_vlan_filter
&& !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
- PMD_DRV_LOG(NOTICE,
+ PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
return -ENOTSUP;
}
return -EBUSY;
}
+ hw->use_simple_rx = 1;
+ hw->use_simple_tx = 1;
+
+#if defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+#endif
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ hw->use_simple_rx = 0;
+ hw->use_simple_tx = 0;
+ }
+
+ if (rxmode->hw_ip_checksum)
+ hw->use_simple_rx = 0;
+
return 0;
}
struct virtnet_rx *rxvq;
struct virtnet_tx *txvq __rte_unused;
struct virtio_hw *hw = dev->data->dev_private;
+ int ret;
+
+ /* Finish the initialization of the queues */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ ret = virtio_dev_rx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ ret = virtio_dev_tx_queue_setup_finish(dev, i);
+ if (ret < 0)
+ return ret;
+ }
/* check if lsc interrupt feature is enabled */
if (dev->data->dev_conf.intr_conf.lsc) {
virtqueue_notify(rxvq->vq);
}
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txvq = dev->data->tx_queues[i];
+ virtqueue_notify(txvq->vq);
+ }
+
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
VIRTQUEUE_DUMP(txvq->vq);
}
+ set_rxtx_funcs(dev);
hw->started = 1;
/* Initialize Link state */
dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
- dev_info->pci_dev = dev->device ? RTE_DEV_TO_PCI(dev->device) : NULL;
+ dev_info->pci_dev = dev->device ? RTE_ETH_DEV_TO_PCI(dev) : NULL;
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
dev_info->max_tx_queues =