#include <rte_common.h>
#include <rte_errno.h>
#include <rte_cpuflags.h>
+#include <rte_vect.h>
#include <rte_memory.h>
#include <rte_eal.h>
static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev);
static int virtio_dev_configure(struct rte_eth_dev *dev);
static int virtio_dev_start(struct rte_eth_dev *dev);
-static void virtio_dev_stop(struct rte_eth_dev *dev);
+static int virtio_dev_stop(struct rte_eth_dev *dev);
static int virtio_dev_promiscuous_enable(struct rte_eth_dev *dev);
static int virtio_dev_promiscuous_disable(struct rte_eth_dev *dev);
static int virtio_dev_allmulticast_enable(struct rte_eth_dev *dev);
static int virtio_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
+static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
+ int *vdpa,
+ uint32_t *speed,
+ int *vectorized);
static int virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
uint8_t stat_idx,
uint8_t is_rx);
-int virtio_logtype_init;
-int virtio_logtype_driver;
-
static void virtio_notify_peers(struct rte_eth_dev *dev);
static void virtio_ack_link_announce(struct rte_eth_dev *dev);
virtio_wmb(vq->hw->weak_barriers);
virtqueue_notify(vq);
- /* wait for used descriptors in virtqueue */
+ /* wait for used desc in virtqueue
+ * desc_is_used has a load-acquire or rte_io_rmb inside
+ */
while (!desc_is_used(&desc[head], vq))
usleep(100);
- virtio_rmb(vq->hw->weak_barriers);
-
/* now get used descriptors */
vq->vq_free_cnt += nb_descs;
vq->vq_used_cons_idx += nb_descs;
virtqueue_notify(vq);
- rte_rmb();
- while (VIRTQUEUE_NUSED(vq) == 0) {
- rte_rmb();
+ while (virtqueue_nused(vq) == 0)
usleep(100);
- }
- while (VIRTQUEUE_NUSED(vq)) {
+ while (virtqueue_nused(vq)) {
uint32_t idx, desc_idx, used_idx;
struct vring_used_elem *uep;
return -EINVAL;
}
- if (!rte_is_power_of_2(vq_size)) {
- PMD_INIT_LOG(ERR, "virtqueue size is not powerof 2");
+ if (!vtpci_packed_queue(hw) && !rte_is_power_of_2(vq_size)) {
+ PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
return -EINVAL;
}
hw->cvq = cvq;
}
- /* For virtio_user case (that is when hw->dev is NULL), we use
- * virtual address. And we need properly set _offset_, please see
+ /* For virtio_user case (that is when hw->virtio_user_dev is not NULL),
+ * we use virtual address. And we need properly set _offset_, please see
* VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
*/
if (!hw->virtio_user_dev)
txr = hdr_mz->addr;
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
- struct vring_desc *start_dp = txr[i].tx_indir;
-
/* first indirect descriptor is always the tx header */
if (!vtpci_packed_queue(hw)) {
+ struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
start_dp->addr = txvq->virtio_net_hdr_mem
tx_hdr);
start_dp->len = hw->vtnet_hdr_size;
start_dp->flags = VRING_DESC_F_NEXT;
+ } else {
+ struct vring_packed_desc *start_dp =
+ txr[i].tx_packed_indir;
+ vring_desc_init_indirect_packed(start_dp,
+ RTE_DIM(txr[i].tx_packed_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
}
}
}
static void virtio_queues_unbind_intr(struct rte_eth_dev *dev);
-static void
+static int
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
if (!hw->opened)
- return;
+ return 0;
hw->opened = false;
/* reset the NIC */
if (!hw->modern)
rte_pci_ioport_unmap(VTPCI_IO(hw));
}
+
+ return 0;
}
static int
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
.rx_queue_release = virtio_dev_queue_release,
- .rx_descriptor_done = virtio_dev_rx_queue_done,
.tx_queue_setup = virtio_dev_tx_queue_setup,
.tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
PMD_INIT_LOG(DEBUG, "features after negotiate = %" PRIx64,
hw->guest_features);
- if (hw->modern) {
- if (!vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
- PMD_INIT_LOG(ERR,
- "VIRTIO_F_VERSION_1 features is not enabled.");
- return -1;
- }
+ if (hw->modern && !vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ PMD_INIT_LOG(ERR,
+ "VIRTIO_F_VERSION_1 features is not enabled.");
+ return -1;
+ }
+
+ if (hw->modern || hw->virtio_user_dev) {
vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
PMD_INIT_LOG(ERR,
if (isr & VIRTIO_PCI_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
- _rte_eth_dev_callback_process(dev,
- RTE_ETH_EVENT_INTR_LSC,
- NULL);
+ rte_eth_dev_callback_process(dev,
+ RTE_ETH_EVENT_INTR_LSC,
+ NULL);
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,
if (vtpci_packed_queue(hw)) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring %s Tx path on port %u",
- hw->use_inorder_tx ? "inorder" : "standard",
+ hw->use_vec_tx ? "vectorized" : "standard",
eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+ if (hw->use_vec_tx)
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
+ else
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
} else {
if (hw->use_inorder_tx) {
PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
}
if (vtpci_packed_queue(hw)) {
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (hw->use_vec_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring vectorized Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_pkts_packed_vec;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring mergeable buffer Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
}
} else {
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ if (hw->use_vec_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
} else if (hw->use_inorder_rx) {
return 0;
}
-
+#define DUPLEX_UNKNOWN 0xff
/* reset device and renegotiate features if needed */
static int
virtio_init_device(struct rte_eth_dev *eth_dev, uint64_t req_features)
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+ if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
+ config = &local_config;
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, speed),
+ &config->speed, sizeof(config->speed));
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, duplex),
+ &config->duplex, sizeof(config->duplex));
+ hw->speed = config->speed;
+ hw->duplex = config->duplex;
+ }
+ }
+ if (hw->duplex == DUPLEX_UNKNOWN)
+ hw->duplex = ETH_LINK_FULL_DUPLEX;
+ PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
+ hw->speed, hw->duplex);
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
+ uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+ int vectorized = 0;
int ret;
if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
+ eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
if (!hw->virtio_user_dev) {
return 0;
}
-
- /*
- * Pass the information to the rte_eth_dev_close() that it should also
- * release the private port resources.
- */
- eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
+ ret = virtio_dev_devargs_parse(eth_dev->device->devargs,
+ NULL, &speed, &vectorized);
+ if (ret < 0)
+ return ret;
+ hw->speed = speed;
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("virtio",
goto err_vtpci_init;
}
+ rte_spinlock_init(&hw->state_lock);
+
/* reset device and negotiate default features */
ret = virtio_init_device(eth_dev, VIRTIO_PMD_DEFAULT_GUEST_FEATURES);
if (ret < 0)
goto err_virtio_init;
+ if (vectorized) {
+ if (!vtpci_packed_queue(hw)) {
+ hw->use_vec_rx = 1;
+ } else {
+#if defined(CC_AVX512_SUPPORT) || defined(RTE_ARCH_ARM)
+ hw->use_vec_rx = 1;
+ hw->use_vec_tx = 1;
+#else
+ PMD_DRV_LOG(INFO,
+ "building environment do not support packed ring vectorized");
+#endif
+ }
+ }
+
hw->opened = true;
return 0;
static int
eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev)
{
+ int ret;
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
- virtio_dev_stop(eth_dev);
+ ret = virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
- eth_dev->dev_ops = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->rx_pkt_burst = NULL;
-
PMD_INIT_LOG(DEBUG, "dev_uninit completed");
- return 0;
+ return ret;
}
+
static int vdpa_check_handler(__rte_unused const char *key,
- const char *value, __rte_unused void *opaque)
+ const char *value, void *ret_val)
{
- if (strcmp(value, "1"))
- return -1;
+ if (strcmp(value, "1") == 0)
+ *(int *)ret_val = 1;
+ else
+ *(int *)ret_val = 0;
+
+ return 0;
+}
+
+
+static uint32_t
+virtio_dev_speed_capa_get(uint32_t speed)
+{
+ switch (speed) {
+ case ETH_SPEED_NUM_10G:
+ return ETH_LINK_SPEED_10G;
+ case ETH_SPEED_NUM_20G:
+ return ETH_LINK_SPEED_20G;
+ case ETH_SPEED_NUM_25G:
+ return ETH_LINK_SPEED_25G;
+ case ETH_SPEED_NUM_40G:
+ return ETH_LINK_SPEED_40G;
+ case ETH_SPEED_NUM_50G:
+ return ETH_LINK_SPEED_50G;
+ case ETH_SPEED_NUM_56G:
+ return ETH_LINK_SPEED_56G;
+ case ETH_SPEED_NUM_100G:
+ return ETH_LINK_SPEED_100G;
+ case ETH_SPEED_NUM_200G:
+ return ETH_LINK_SPEED_200G;
+ default:
+ return 0;
+ }
+}
+
+static int vectorized_check_handler(__rte_unused const char *key,
+ const char *value, void *ret_val)
+{
+ if (strcmp(value, "1") == 0)
+ *(int *)ret_val = 1;
+ else
+ *(int *)ret_val = 0;
+
+ return 0;
+}
+
+#define VIRTIO_ARG_SPEED "speed"
+#define VIRTIO_ARG_VDPA "vdpa"
+#define VIRTIO_ARG_VECTORIZED "vectorized"
+
+
+static int
+link_speed_handler(const char *key __rte_unused,
+ const char *value, void *ret_val)
+{
+ uint32_t val;
+ if (!value || !ret_val)
+ return -EINVAL;
+ val = strtoul(value, NULL, 0);
+ /* validate input */
+ if (virtio_dev_speed_capa_get(val) == 0)
+ return -EINVAL;
+ *(uint32_t *)ret_val = val;
return 0;
}
+
static int
-vdpa_mode_selected(struct rte_devargs *devargs)
+virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
+ uint32_t *speed, int *vectorized)
{
struct rte_kvargs *kvlist;
- const char *key = "vdpa";
int ret = 0;
if (devargs == NULL)
return 0;
kvlist = rte_kvargs_parse(devargs->args, NULL);
- if (kvlist == NULL)
+ if (kvlist == NULL) {
+ PMD_INIT_LOG(ERR, "error when parsing param");
return 0;
+ }
+ if (vdpa && rte_kvargs_count(kvlist, VIRTIO_ARG_VDPA) == 1) {
+ /* vdpa mode selected when there's a key-value pair:
+ * vdpa=1
+ */
+ ret = rte_kvargs_process(kvlist, VIRTIO_ARG_VDPA,
+ vdpa_check_handler, vdpa);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_VDPA);
+ goto exit;
+ }
+ }
+ if (speed && rte_kvargs_count(kvlist, VIRTIO_ARG_SPEED) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ VIRTIO_ARG_SPEED,
+ link_speed_handler, speed);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_SPEED);
+ goto exit;
+ }
+ }
- if (!rte_kvargs_count(kvlist, key))
- goto exit;
-
- /* vdpa mode selected when there's a key-value pair: vdpa=1 */
- if (rte_kvargs_process(kvlist, key,
- vdpa_check_handler, NULL) < 0) {
- goto exit;
+ if (vectorized &&
+ rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ VIRTIO_ARG_VECTORIZED,
+ vectorized_check_handler, vectorized);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_VECTORIZED);
+ goto exit;
+ }
}
- ret = 1;
exit:
rte_kvargs_free(kvlist);
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
+ int vdpa = 0;
+ int ret = 0;
+
+ ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL,
+ NULL);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "devargs parsing is failed");
+ return ret;
+ }
/* virtio pmd skips probe if device needs to work in vdpa mode */
- if (vdpa_mode_selected(pci_dev->device.devargs))
+ if (vdpa == 1)
return 1;
return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct virtio_hw),
return -EINVAL;
}
+ if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ PMD_DRV_LOG(ERR,
+ "Unsupported Tx multi queue mode %d",
+ txmode->mq_mode);
+ return -EINVAL;
+ }
+
if (dev->data->dev_conf.intr_conf.rxq) {
ret = virtio_init_device(dev, hw->req_guest_features);
if (ret < 0)
return -EBUSY;
}
- rte_spinlock_init(&hw->state_lock);
-
- hw->use_simple_rx = 1;
+ if (vtpci_packed_queue(hw)) {
+#if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
+ if ((hw->use_vec_rx || hw->use_vec_tx) &&
+ (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
+ !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
+ PMD_DRV_LOG(INFO,
+ "disabled packed ring vectorized path for requirements not met");
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
+ }
+#elif defined(RTE_ARCH_ARM)
+ if ((hw->use_vec_rx || hw->use_vec_tx) &&
+ (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
+ !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
+ PMD_DRV_LOG(INFO,
+ "disabled packed ring vectorized path for requirements not met");
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
+ }
+#else
+ hw->use_vec_rx = 0;
+ hw->use_vec_tx = 0;
+#endif
- if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
- hw->use_inorder_tx = 1;
- hw->use_inorder_rx = 1;
- hw->use_simple_rx = 0;
- }
+ if (hw->use_vec_rx) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_DRV_LOG(INFO,
+ "disabled packed ring vectorized rx for mrg_rxbuf enabled");
+ hw->use_vec_rx = 0;
+ }
- if (vtpci_packed_queue(hw)) {
- hw->use_simple_rx = 0;
- hw->use_inorder_rx = 0;
- }
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ PMD_DRV_LOG(INFO,
+ "disabled packed ring vectorized rx for TCP_LRO enabled");
+ hw->use_vec_rx = 0;
+ }
+ }
+ } else {
+ if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
+ hw->use_inorder_tx = 1;
+ hw->use_inorder_rx = 1;
+ hw->use_vec_rx = 0;
+ }
-#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
- if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
- hw->use_simple_rx = 0;
- }
+ if (hw->use_vec_rx) {
+#if defined RTE_ARCH_ARM
+ if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
+ PMD_DRV_LOG(INFO,
+ "disabled split ring vectorized path for requirement not met");
+ hw->use_vec_rx = 0;
+ }
#endif
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_simple_rx = 0;
- }
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_DRV_LOG(INFO,
+ "disabled split ring vectorized rx for mrg_rxbuf enabled");
+ hw->use_vec_rx = 0;
+ }
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_VLAN_STRIP))
- hw->use_simple_rx = 0;
+ if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_VLAN_STRIP)) {
+ PMD_DRV_LOG(INFO,
+ "disabled split ring vectorized rx for offloading enabled");
+ hw->use_vec_rx = 0;
+ }
+
+ if (rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) {
+ PMD_DRV_LOG(INFO,
+ "disabled split ring vectorized rx, max SIMD bitwidth too low");
+ hw->use_vec_rx = 0;
+ }
+ }
+ }
return 0;
}
/*
* Stop device: disable interrupt and mark link down
*/
-static void
+static int
virtio_dev_stop(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
+ dev->data->dev_started = 0;
rte_spinlock_lock(&hw->state_lock);
if (!hw->started)
rte_eth_linkstatus_set(dev, &link);
out_unlock:
rte_spinlock_unlock(&hw->state_lock);
+
+ return 0;
}
static int
struct virtio_hw *hw = dev->data->dev_private;
memset(&link, 0, sizeof(link));
- link.link_duplex = ETH_LINK_FULL_DUPLEX;
- link.link_speed = ETH_SPEED_NUM_10G;
- link.link_autoneg = ETH_LINK_FIXED;
+ link.link_duplex = hw->duplex;
+ link.link_speed = hw->speed;
+ link.link_autoneg = ETH_LINK_AUTONEG;
if (!hw->started) {
link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
&status, sizeof(status));
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
link.link_status = ETH_LINK_DOWN;
+ link.link_speed = ETH_SPEED_NUM_NONE;
PMD_INIT_LOG(DEBUG, "Port %d is down",
dev->data->port_id);
} else {
{
uint64_t tso_mask, host_features;
struct virtio_hw *hw = dev->data->dev_private;
-
- dev_info->speed_capa = ETH_LINK_SPEED_10G; /* fake value */
+ dev_info->speed_capa = virtio_dev_speed_capa_get(hw->speed);
dev_info->max_rx_queues =
RTE_MIN(hw->max_queue_pairs, VIRTIO_MAX_RX_QUEUES);
RTE_PMD_EXPORT_NAME(net_virtio, __COUNTER__);
RTE_PMD_REGISTER_PCI_TABLE(net_virtio, pci_id_virtio_map);
RTE_PMD_REGISTER_KMOD_DEP(net_virtio, "* igb_uio | uio_pci_generic | vfio-pci");
-
-RTE_INIT(virtio_init_log)
-{
- virtio_logtype_init = rte_log_register("pmd.net.virtio.init");
- if (virtio_logtype_init >= 0)
- rte_log_set_level(virtio_logtype_init, RTE_LOG_NOTICE);
- virtio_logtype_driver = rte_log_register("pmd.net.virtio.driver");
- if (virtio_logtype_driver >= 0)
- rte_log_set_level(virtio_logtype_driver, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(virtio_logtype_init, pmd.net.virtio.init, NOTICE);
+RTE_LOG_REGISTER(virtio_logtype_driver, pmd.net.virtio.driver, NOTICE);