#include <unistd.h>
#include <ethdev_driver.h>
-#include <ethdev_pci.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_branch_prediction.h>
-#include <rte_pci.h>
-#include <rte_bus_pci.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_arp.h>
#include <rte_errno.h>
#include <rte_cpuflags.h>
#include <rte_vect.h>
-
#include <rte_memory.h>
+#include <rte_eal_paging.h>
#include <rte_eal.h>
#include <rte_dev.h>
#include <rte_cycles.h>
#include <rte_kvargs.h>
#include "virtio_ethdev.h"
-#include "virtio_pci.h"
+#include "virtio.h"
#include "virtio_logs.h"
#include "virtqueue.h"
#include "virtio_rxtx.h"
+#include "virtio_rxtx_simple.h"
#include "virtio_user/virtio_user_dev.h"
static int virtio_dev_configure(struct rte_eth_dev *dev);
struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
{
- struct virtqueue *vq = cvq->vq;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
int head;
struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct virtio_pmd_ctrl *result;
"vq->vq_avail_idx=%d\n"
"vq->vq_used_cons_idx=%d\n"
"vq->vq_packed.cached_flags=0x%x\n"
- "vq->vq_packed.used_wrap_counter=%d\n",
+ "vq->vq_packed.used_wrap_counter=%d",
vq->vq_free_cnt,
vq->vq_avail_idx,
vq->vq_used_cons_idx,
int *dlen, int pkt_num)
{
struct virtio_pmd_ctrl *result;
- struct virtqueue *vq = cvq->vq;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
uint32_t head, i;
int k, sum = 0;
ctrl->status = status;
- if (!cvq || !cvq->vq) {
+ if (!cvq) {
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
rte_spinlock_lock(&cvq->lock);
- vq = cvq->vq;
+ vq = virtnet_cq_to_vq(cvq);
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
return 0;
}
-static void
-virtio_dev_queue_release(void *queue __rte_unused)
-{
- /* do nothing */
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
if (virtio_with_packed_queue(vq->hw)) {
vring_init_packed(&vq->vq_packed.ring, ring_mem,
- VIRTIO_PCI_VRING_ALIGN, size);
+ VIRTIO_VRING_ALIGN, size);
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
- vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
vring_desc_init_split(vr->desc, size);
}
/*
}
static int
-virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
+virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
{
char vq_name[VIRTQUEUE_MAX_NAME_SZ];
char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ];
struct virtqueue *vq;
size_t sz_hdr_mz = 0;
void *sw_ring = NULL;
- int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
+ int queue_type = virtio_get_queue_type(hw, queue_idx);
int ret;
int numa_node = dev->device->numa_node;
+ struct rte_mbuf *fake_mbuf = NULL;
PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
- vtpci_queue_idx, numa_node);
+ queue_idx, numa_node);
/*
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
- vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, vtpci_queue_idx);
+ vq_size = VIRTIO_OPS(hw)->get_queue_num(hw, queue_idx);
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
}
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
- dev->data->port_id, vtpci_queue_idx);
+ dev->data->port_id, queue_idx);
size = RTE_ALIGN_CEIL(sizeof(*vq) +
vq_size * sizeof(struct vq_desc_extra),
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
} else if (queue_type == VTNET_CQ) {
/* Allocate a page for control vq command, data and status */
- sz_hdr_mz = PAGE_SIZE;
+ sz_hdr_mz = rte_mem_page_size();
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
}
- hw->vqs[vtpci_queue_idx] = vq;
+ hw->vqs[queue_idx] = vq;
vq->hw = hw;
- vq->vq_queue_index = vtpci_queue_idx;
+ vq->vq_queue_index = queue_idx;
vq->vq_nentries = vq_size;
if (virtio_with_packed_queue(hw)) {
vq->vq_packed.used_wrap_counter = 1;
/*
* Reserve a memzone for vring elements
*/
- size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
- VIRTIO_PCI_VRING_ALIGN);
+ VIRTIO_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
mz = rte_memzone_lookup(vq_name);
if (mz == NULL) {
ret = -ENOMEM;
- goto fail_q_alloc;
+ goto free_vq;
}
}
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->iova;
+ if (hw->use_va)
+ vq->vq_ring_mem = (uintptr_t)mz->addr;
+ else
+ vq->vq_ring_mem = mz->iova;
+
vq->vq_ring_virt_mem = mz->addr;
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->iova);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
- (uint64_t)(uintptr_t)mz->addr);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
virtio_init_vring(vq);
if (sz_hdr_mz) {
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
- dev->data->port_id, vtpci_queue_idx);
+ dev->data->port_id, queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
RTE_CACHE_LINE_SIZE);
hdr_mz = rte_memzone_lookup(vq_hdr_name);
if (hdr_mz == NULL) {
ret = -ENOMEM;
- goto fail_q_alloc;
+ goto free_mz;
}
}
}
if (!sw_ring) {
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
ret = -ENOMEM;
- goto fail_q_alloc;
+ goto free_hdr_mz;
+ }
+
+ fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!fake_mbuf) {
+ PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+ ret = -ENOMEM;
+ goto free_sw_ring;
}
vq->sw_ring = sw_ring;
rxvq = &vq->rxq;
- rxvq->vq = vq;
rxvq->port_id = dev->data->port_id;
rxvq->mz = mz;
+ rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
txvq = &vq->txq;
- txvq->vq = vq;
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->iova;
+ if (hw->use_va)
+ txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
- cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->iova;
- memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE);
+ if (hw->use_va)
+ cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
+ memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
hw->cvq = cvq;
}
+ if (hw->use_va)
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
+ else
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
+
if (queue_type == VTNET_TQ) {
struct virtio_tx_region *txr;
unsigned int i;
if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto clean_vq;
}
return 0;
-fail_q_alloc:
+clean_vq:
+ hw->cvq = NULL;
+ rte_free(fake_mbuf);
+free_sw_ring:
rte_free(sw_ring);
+free_hdr_mz:
rte_memzone_free(hdr_mz);
+free_mz:
rte_memzone_free(mz);
+free_vq:
rte_free(vq);
+ hw->vqs[queue_idx] = NULL;
return ret;
}
queue_type = virtio_get_queue_type(hw, i);
if (queue_type == VTNET_RQ) {
+ rte_free(vq->rxq.fake_mbuf);
rte_free(vq->sw_ring);
rte_memzone_free(vq->rxq.mz);
} else if (queue_type == VTNET_TQ) {
virtio_dev_close(struct rte_eth_dev *dev)
{
struct virtio_hw *hw = dev->data->dev_private;
- struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+ struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
dev->intr_handle->intr_vec = NULL;
}
- vtpci_reset(hw);
+ virtio_reset(hw);
virtio_dev_free_mbufs(dev);
virtio_free_queues(hw);
return 0;
}
+uint16_t
+virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
+{
+ return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+}
+
+bool
+virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
+ bool rx_scatter_enabled, const char **error)
+{
+ if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
+ *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
+ uint16_t frame_size)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq;
+ struct virtqueue *vq;
+ unsigned int qidx;
+ uint16_t buf_size;
+ const char *error;
+
+ if (hw->vqs == NULL)
+ return true;
+
+ for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
+ vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX];
+ if (vq == NULL)
+ continue;
+
+ rxvq = &vq->rxq;
+ if (rxvq->mpool == NULL)
+ continue;
+ buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
+
+ if (!virtio_rx_check_scatter(frame_size, buf_size,
+ hw->rx_ol_scatter, &error)) {
+ PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
+ qidx, error);
+ return false;
+ }
+ }
+
+ return true;
+}
+
#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
static int
virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
return -EINVAL;
}
+
+ if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
+ PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
+ return -EINVAL;
+ }
+
+ hw->max_rx_pkt_len = frame_size;
+
return 0;
}
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
virtqueue_enable_intr(vq);
virtio_mb(hw->weak_barriers);
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
virtqueue_disable_intr(vq);
return 0;
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
- .rx_queue_release = virtio_dev_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- .tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
.vlan_filter_set = virtio_vlan_filter_set,
static void
virtio_set_hwaddr(struct virtio_hw *hw)
{
- vtpci_write_dev_config(hw,
+ virtio_write_dev_config(hw,
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
}
virtio_get_hwaddr(struct virtio_hw *hw)
{
if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
} else {
if (host_features & req_features & (1ULL << VIRTIO_NET_F_MTU)) {
struct virtio_net_config config;
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, mtu),
&config.mtu, sizeof(config.mtu));
return -1;
if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
- if (!(vtpci_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
+ if (!(virtio_get_status(hw) & VIRTIO_CONFIG_STATUS_FEATURES_OK)) {
PMD_INIT_LOG(ERR, "Failed to set FEATURES_OK status!");
return -1;
}
uint16_t status;
/* Read interrupt status which clears interrupt */
- isr = vtpci_isr(hw);
+ isr = virtio_get_isr(hw);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
if (virtio_intr_unmask(dev) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
- if (isr & VIRTIO_PCI_ISR_CONFIG) {
+ if (isr & VIRTIO_ISR_CONFIG) {
if (virtio_dev_link_update(dev, 0) == 0)
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if (status & VIRTIO_NET_S_ANNOUNCE) {
}
}
- /* Re-register callback to update max_intr */
- rte_intr_callback_unregister(dev->intr_handle,
- virtio_interrupt_handler,
- dev);
- rte_intr_callback_register(dev->intr_handle,
- virtio_interrupt_handler,
- dev);
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ /* Re-register callback to update max_intr */
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ }
/* DO NOT try to remove this! This function will enable msix, or QEMU
* will encounter SIGSEGV when DRIVER_OK is sent.
int ret;
/* Reset the device although not necessary at startup */
- vtpci_reset(hw);
+ virtio_reset(hw);
if (hw->vqs) {
virtio_dev_free_mbufs(eth_dev);
}
/* Tell the host we've noticed this device. */
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_ACK);
/* Tell the host we've known how to drive the device. */
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
return -1;
hw->weak_barriers = !virtio_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
/* If host does not support both status and MSI-X then disable LSC */
- if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) &&
- hw->use_msix != VIRTIO_MSIX_NONE)
+ if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->intr_lsc)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
else
eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rte_ether_addr_copy((struct rte_ether_addr *)hw->mac_addr,
ð_dev->data->mac_addrs[0]);
PMD_INIT_LOG(DEBUG,
- "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+ "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
- if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
+ if (hw->speed == RTE_ETH_SPEED_NUM_UNKNOWN) {
if (virtio_with_feature(hw, VIRTIO_NET_F_SPEED_DUPLEX)) {
config = &local_config;
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, speed),
&config->speed, sizeof(config->speed));
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, duplex),
&config->duplex, sizeof(config->duplex));
hw->speed = config->speed;
}
}
if (hw->duplex == DUPLEX_UNKNOWN)
- hw->duplex = ETH_LINK_FULL_DUPLEX;
+ hw->duplex = RTE_ETH_LINK_FULL_DUPLEX;
PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
hw->speed, hw->duplex);
if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, mac),
&config->mac, sizeof(config->mac));
if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&config->status, sizeof(config->status));
} else {
}
if (virtio_with_feature(hw, VIRTIO_NET_F_MQ)) {
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, max_virtqueue_pairs),
&config->max_virtqueue_pairs,
sizeof(config->max_virtqueue_pairs));
hw->max_queue_pairs = config->max_virtqueue_pairs;
if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, mtu),
&config->mtu,
sizeof(config->mtu));
config->max_virtqueue_pairs);
PMD_INIT_LOG(DEBUG, "config->status=%d", config->status);
PMD_INIT_LOG(DEBUG,
- "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X",
+ "PORT MAC: " RTE_ETHER_ADDR_PRT_FMT,
config->mac[0], config->mac[1],
config->mac[2], config->mac[3],
config->mac[4], config->mac[5]);
}
}
- vtpci_reinit_complete(hw);
+ virtio_reinit_complete(hw);
return 0;
}
eth_virtio_dev_init(struct rte_eth_dev *eth_dev)
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- uint32_t speed = ETH_SPEED_NUM_UNKNOWN;
+ uint32_t speed = RTE_ETH_SPEED_NUM_UNKNOWN;
int vectorized = 0;
int ret;
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
set_rxtx_funcs(eth_dev);
if (ret < 0)
return ret;
hw->speed = speed;
+ hw->duplex = DUPLEX_UNKNOWN;
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("virtio",
return -ENOMEM;
}
- hw->port_id = eth_dev->data->port_id;
rte_spinlock_init(&hw->state_lock);
/* reset device and negotiate default features */
virtio_dev_speed_capa_get(uint32_t speed)
{
switch (speed) {
- case ETH_SPEED_NUM_10G:
- return ETH_LINK_SPEED_10G;
- case ETH_SPEED_NUM_20G:
- return ETH_LINK_SPEED_20G;
- case ETH_SPEED_NUM_25G:
- return ETH_LINK_SPEED_25G;
- case ETH_SPEED_NUM_40G:
- return ETH_LINK_SPEED_40G;
- case ETH_SPEED_NUM_50G:
- return ETH_LINK_SPEED_50G;
- case ETH_SPEED_NUM_56G:
- return ETH_LINK_SPEED_56G;
- case ETH_SPEED_NUM_100G:
- return ETH_LINK_SPEED_100G;
- case ETH_SPEED_NUM_200G:
- return ETH_LINK_SPEED_200G;
+ case RTE_ETH_SPEED_NUM_10G:
+ return RTE_ETH_LINK_SPEED_10G;
+ case RTE_ETH_SPEED_NUM_20G:
+ return RTE_ETH_LINK_SPEED_20G;
+ case RTE_ETH_SPEED_NUM_25G:
+ return RTE_ETH_LINK_SPEED_25G;
+ case RTE_ETH_SPEED_NUM_40G:
+ return RTE_ETH_LINK_SPEED_40G;
+ case RTE_ETH_SPEED_NUM_50G:
+ return RTE_ETH_LINK_SPEED_50G;
+ case RTE_ETH_SPEED_NUM_56G:
+ return RTE_ETH_LINK_SPEED_56G;
+ case RTE_ETH_SPEED_NUM_100G:
+ return RTE_ETH_LINK_SPEED_100G;
+ case RTE_ETH_SPEED_NUM_200G:
+ return RTE_ETH_LINK_SPEED_200G;
default:
return 0;
}
PMD_INIT_LOG(DEBUG, "configure");
req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
- if (rxmode->mq_mode != ETH_MQ_RX_NONE) {
+ if (rxmode->mq_mode != RTE_ETH_MQ_RX_NONE) {
PMD_DRV_LOG(ERR,
"Unsupported Rx multi queue mode %d",
rxmode->mq_mode);
return -EINVAL;
}
- if (txmode->mq_mode != ETH_MQ_TX_NONE) {
+ if (txmode->mq_mode != RTE_ETH_MQ_TX_NONE) {
PMD_DRV_LOG(ERR,
"Unsupported Tx multi queue mode %d",
txmode->mq_mode);
return ret;
}
- if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
+ if (rxmode->mtu > hw->max_mtu)
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM))
+ hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
+
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)
req_features |=
(1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
- if (tx_offloads & (DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM))
+ if (tx_offloads & (RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_CSUM);
- if (tx_offloads & DEV_TX_OFFLOAD_TCP_TSO)
+ if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_TSO)
req_features |=
(1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
return ret;
}
- if ((rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM)) &&
+ if ((rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM)) &&
!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
PMD_DRV_LOG(ERR,
"rx checksum not available on this host");
return -ENOTSUP;
}
- if ((rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) &&
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) &&
(!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
PMD_DRV_LOG(ERR,
if (virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VQ))
virtio_dev_cq_start(dev);
- if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
hw->vlan_strip = 1;
- if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ hw->rx_ol_scatter = (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
+
+ if ((rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
"vlan filtering not available on this host");
hw->use_vec_rx = 0;
}
- if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized rx for TCP_LRO enabled");
hw->use_vec_rx = 0;
hw->use_vec_rx = 0;
}
- if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO |
- DEV_RX_OFFLOAD_VLAN_STRIP)) {
+ if (rx_offloads & (RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_TCP_LRO |
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)) {
PMD_DRV_LOG(INFO,
"disabled split ring vectorized rx for offloading enabled");
hw->use_vec_rx = 0;
virtio_dev_start(struct rte_eth_dev *dev)
{
uint16_t nb_queues, i;
- struct virtnet_rx *rxvq;
- struct virtnet_tx *txvq __rte_unused;
+ struct virtqueue *vq;
struct virtio_hw *hw = dev->data->dev_private;
int ret;
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxvq = dev->data->rx_queues[i];
+ vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
/* Flush the old packets */
- virtqueue_rxvq_flush(rxvq->vq);
- virtqueue_notify(rxvq->vq);
+ virtqueue_rxvq_flush(vq);
+ virtqueue_notify(vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txvq = dev->data->tx_queues[i];
- virtqueue_notify(txvq->vq);
+ vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+ virtqueue_notify(vq);
}
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxvq = dev->data->rx_queues[i];
- VIRTQUEUE_DUMP(rxvq->vq);
+ vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txvq = dev->data->tx_queues[i];
- VIRTQUEUE_DUMP(txvq->vq);
+ vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
set_rxtx_funcs(dev);
PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
}
+static void
+virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq;
+ int qidx;
+ void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
+
+ if (virtio_with_packed_queue(hw)) {
+ if (hw->use_vec_tx)
+ xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
+ else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
+ xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
+ else
+ xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
+ } else {
+ if (hw->use_inorder_tx)
+ xmit_cleanup = &virtio_xmit_cleanup_inorder;
+ else
+ xmit_cleanup = &virtio_xmit_cleanup;
+ }
+
+ for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
+ vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
+ if (vq != NULL)
+ xmit_cleanup(vq, virtqueue_nused(vq));
+ }
+}
+
/*
* Stop device: disable interrupt and mark link down
*/
{
struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
- struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
+ struct rte_eth_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf;
PMD_INIT_LOG(DEBUG, "stop");
dev->data->dev_started = 0;
goto out_unlock;
hw->started = 0;
+ virtio_tx_completed_cleanup(dev);
+
if (intr_conf->lsc || intr_conf->rxq) {
virtio_intr_disable(dev);
memset(&link, 0, sizeof(link));
link.link_duplex = hw->duplex;
link.link_speed = hw->speed;
- link.link_autoneg = ETH_LINK_AUTONEG;
+ link.link_autoneg = RTE_ETH_LINK_AUTONEG;
if (!hw->started) {
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
} else if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
- vtpci_read_dev_config(hw,
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
- link.link_status = ETH_LINK_DOWN;
- link.link_speed = ETH_SPEED_NUM_NONE;
+ link.link_status = RTE_ETH_LINK_DOWN;
+ link.link_speed = RTE_ETH_SPEED_NUM_NONE;
PMD_INIT_LOG(DEBUG, "Port %d is down",
dev->data->port_id);
} else {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
PMD_INIT_LOG(DEBUG, "Port %d is up",
dev->data->port_id);
}
} else {
- link.link_status = ETH_LINK_UP;
+ link.link_status = RTE_ETH_LINK_UP;
}
return rte_eth_linkstatus_set(dev, &link);
struct virtio_hw *hw = dev->data->dev_private;
uint64_t offloads = rxmode->offloads;
- if (mask & ETH_VLAN_FILTER_MASK) {
- if ((offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+ if ((offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) &&
!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(NOTICE,
}
}
- if (mask & ETH_VLAN_STRIP_MASK)
- hw->vlan_strip = !!(offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ if (mask & RTE_ETH_VLAN_STRIP_MASK)
+ hw->vlan_strip = !!(offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
return 0;
}
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
+ dev_info->max_mtu = hw->max_mtu;
host_features = VIRTIO_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_SCATTER;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
- DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM;
+ RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+ RTE_ETH_RX_OFFLOAD_UDP_CKSUM;
}
if (host_features & (1ULL << VIRTIO_NET_F_CTRL_VLAN))
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
tso_mask = (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
(1ULL << VIRTIO_NET_F_GUEST_TSO6);
if ((host_features & tso_mask) == tso_mask)
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_TCP_LRO;
+ dev_info->rx_offload_capa |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
- dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_VLAN_INSERT;
+ dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+ RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
if (host_features & (1ULL << VIRTIO_NET_F_CSUM)) {
dev_info->tx_offload_capa |=
- DEV_TX_OFFLOAD_UDP_CKSUM |
- DEV_TX_OFFLOAD_TCP_CKSUM;
+ RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+ RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
}
tso_mask = (1ULL << VIRTIO_NET_F_HOST_TSO4) |
(1ULL << VIRTIO_NET_F_HOST_TSO6);
if ((host_features & tso_mask) == tso_mask)
- dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
+ dev_info->tx_offload_capa |= RTE_ETH_TX_OFFLOAD_TCP_TSO;
+
+ if (host_features & (1ULL << VIRTIO_F_RING_PACKED)) {
+ /*
+ * According to 2.7 Packed Virtqueues,
+ * 2.7.10.1 Structure Size and Alignment:
+ * The Queue Size value does not have to be a power of 2.
+ */
+ dev_info->rx_desc_lim.nb_max = UINT16_MAX;
+ dev_info->tx_desc_lim.nb_max = UINT16_MAX;
+ } else {
+ /*
+ * According to 2.6 Split Virtqueues:
+ * Queue Size value is always a power of 2. The maximum Queue
+ * Size value is 32768.
+ */
+ dev_info->rx_desc_lim.nb_max = 32768;
+ dev_info->tx_desc_lim.nb_max = 32768;
+ }
+ /*
+ * Actual minimum is not the same for virtqueues of different kinds,
+ * but to avoid tangling the code with separate branches, rely on
+ * default thresholds since desc number must be at least of their size.
+ */
+ dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
+ RTE_VIRTIO_VPMD_RX_REARM_THRESH);
+ dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
+ dev_info->rx_desc_lim.nb_align = 1;
+ dev_info->tx_desc_lim.nb_align = 1;
return 0;
}
return 0;
}
-RTE_LOG_REGISTER(virtio_logtype_init, pmd.net.virtio.init, NOTICE);
-RTE_LOG_REGISTER(virtio_logtype_driver, pmd.net.virtio.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);