static int virtio_mac_addr_set(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static int virtio_intr_enable(struct rte_eth_dev *dev);
static int virtio_intr_disable(struct rte_eth_dev *dev);
static int virtio_dev_queue_stats_mapping_set(
struct virtio_hw_internal virtio_hw_internal[RTE_MAX_ETHPORTS];
-static int
-virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
- int *dlen, int pkt_num)
+static struct virtio_pmd_ctrl *
+virtio_send_command_packed(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
{
- uint32_t head, i;
- int k, sum = 0;
- virtio_net_ctrl_ack status = ~0;
+ struct virtqueue *vq = cvq->vq;
+ int head;
+ struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct virtio_pmd_ctrl *result;
- struct virtqueue *vq;
+ uint16_t flags;
+ int sum = 0;
+ int nb_descs = 0;
+ int k;
- ctrl->status = status;
+ /*
+ * Format is enforced in qemu code:
+ * One TX packet for header;
+ * At least one TX packet per argument;
+ * One RX packet for ACK.
+ */
+ head = vq->vq_avail_idx;
+ flags = vq->vq_packed.cached_flags;
+ desc[head].addr = cvq->virtio_net_hdr_mem;
+ desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_free_cnt--;
+ nb_descs++;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ }
- if (!cvq || !cvq->vq) {
- PMD_INIT_LOG(ERR, "Control queue is not supported.");
- return -1;
+ for (k = 0; k < pkt_num; k++) {
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr)
+ + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
+ desc[vq->vq_avail_idx].len = dlen[k];
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
+ vq->vq_packed.cached_flags;
+ sum += dlen[k];
+ vq->vq_free_cnt--;
+ nb_descs++;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ }
}
- rte_spinlock_lock(&cvq->lock);
- vq = cvq->vq;
- head = vq->vq_desc_head_idx;
+ desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
+ + sizeof(struct virtio_net_ctrl_hdr);
+ desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
+ desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+ vq->vq_packed.cached_flags;
+ vq->vq_free_cnt--;
+ nb_descs++;
+ if (++vq->vq_avail_idx >= vq->vq_nentries) {
+ vq->vq_avail_idx -= vq->vq_nentries;
+ vq->vq_packed.cached_flags ^=
+ VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
+ }
- PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
- "vq->hw->cvq = %p vq = %p",
- vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+ virtio_wmb(vq->hw->weak_barriers);
+ desc[head].flags = VRING_DESC_F_NEXT | flags;
- if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
- rte_spinlock_unlock(&cvq->lock);
- return -1;
+ virtio_wmb(vq->hw->weak_barriers);
+ virtqueue_notify(vq);
+
+ /* wait for used descriptors in virtqueue */
+ while (!desc_is_used(&desc[head], vq))
+ usleep(100);
+
+ virtio_rmb(vq->hw->weak_barriers);
+
+ /* now get used descriptors */
+ vq->vq_free_cnt += nb_descs;
+ vq->vq_used_cons_idx += nb_descs;
+ if (vq->vq_used_cons_idx >= vq->vq_nentries) {
+ vq->vq_used_cons_idx -= vq->vq_nentries;
+ vq->vq_packed.used_wrap_counter ^= 1;
}
- memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
- sizeof(struct virtio_pmd_ctrl));
+ PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
+ "vq->vq_avail_idx=%d\n"
+ "vq->vq_used_cons_idx=%d\n"
+ "vq->vq_packed.cached_flags=0x%x\n"
+ "vq->vq_packed.used_wrap_counter=%d\n",
+ vq->vq_free_cnt,
+ vq->vq_avail_idx,
+ vq->vq_used_cons_idx,
+ vq->vq_packed.cached_flags,
+ vq->vq_packed.used_wrap_counter);
+
+ result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
+static struct virtio_pmd_ctrl *
+virtio_send_command_split(struct virtnet_ctl *cvq,
+ struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq = cvq->vq;
+ uint32_t head, i;
+ int k, sum = 0;
+
+ head = vq->vq_desc_head_idx;
/*
* Format is enforced in qemu code:
* At least one TX packet per argument;
* One RX packet for ACK.
*/
- vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
- vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+ vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+ vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
vq->vq_free_cnt--;
- i = vq->vq_ring.desc[head].next;
+ i = vq->vq_split.ring.desc[head].next;
for (k = 0; k < pkt_num; k++) {
- vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
- vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr)
+ sizeof(ctrl->status) + sizeof(uint8_t)*sum;
- vq->vq_ring.desc[i].len = dlen[k];
+ vq->vq_split.ring.desc[i].len = dlen[k];
sum += dlen[k];
vq->vq_free_cnt--;
- i = vq->vq_ring.desc[i].next;
+ i = vq->vq_split.ring.desc[i].next;
}
- vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
- vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+ vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
+ sizeof(struct virtio_net_ctrl_hdr);
- vq->vq_ring.desc[i].len = sizeof(ctrl->status);
+ vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
vq->vq_free_cnt--;
- vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
+ vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
vq_update_avail_ring(vq, head);
vq_update_avail_idx(vq);
used_idx = (uint32_t)(vq->vq_used_cons_idx
& (vq->vq_nentries - 1));
- uep = &vq->vq_ring.used->ring[used_idx];
+ uep = &vq->vq_split.ring.used->ring[used_idx];
idx = (uint32_t) uep->id;
desc_idx = idx;
- while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
- desc_idx = vq->vq_ring.desc[desc_idx].next;
+ while (vq->vq_split.ring.desc[desc_idx].flags &
+ VRING_DESC_F_NEXT) {
+ desc_idx = vq->vq_split.ring.desc[desc_idx].next;
vq->vq_free_cnt++;
}
- vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+ vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
vq->vq_desc_head_idx = idx;
vq->vq_used_cons_idx++;
vq->vq_free_cnt, vq->vq_desc_head_idx);
result = cvq->virtio_net_hdr_mz->addr;
+ return result;
+}
+
+static int
+virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
+ int *dlen, int pkt_num)
+{
+ virtio_net_ctrl_ack status = ~0;
+ struct virtio_pmd_ctrl *result;
+ struct virtqueue *vq;
+
+ ctrl->status = status;
+
+ if (!cvq || !cvq->vq) {
+ PMD_INIT_LOG(ERR, "Control queue is not supported.");
+ return -1;
+ }
+
+ rte_spinlock_lock(&cvq->lock);
+ vq = cvq->vq;
+
+ PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
+ "vq->hw->cvq = %p vq = %p",
+ vq->vq_desc_head_idx, status, vq->hw->cvq, vq);
+
+ if (vq->vq_free_cnt < pkt_num + 2 || pkt_num < 1) {
+ rte_spinlock_unlock(&cvq->lock);
+ return -1;
+ }
+
+ memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
+ sizeof(struct virtio_pmd_ctrl));
+
+ if (vtpci_packed_queue(vq->hw))
+ result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
+ else
+ result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
rte_spinlock_unlock(&cvq->lock);
return result->status;
virtio_init_vring(struct virtqueue *vq)
{
int size = vq->vq_nentries;
- struct vring *vr = &vq->vq_ring;
uint8_t *ring_mem = vq->vq_ring_virt_mem;
PMD_INIT_FUNC_TRACE();
- /*
- * Reinitialise since virtio port might have been stopped and restarted
- */
memset(ring_mem, 0, vq->vq_ring_size);
- vring_init(vr, size, ring_mem, VIRTIO_PCI_VRING_ALIGN);
+
vq->vq_used_cons_idx = 0;
vq->vq_desc_head_idx = 0;
vq->vq_avail_idx = 0;
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
+ if (vtpci_packed_queue(vq->hw)) {
+ vring_init_packed(&vq->vq_packed.ring, ring_mem,
+ VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_packed(vq, size);
+ } else {
+ struct vring *vr = &vq->vq_split.ring;
- vring_desc_init(vr->desc, size);
-
+ vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_desc_init_split(vr->desc, size);
+ }
/*
* Disable device(host) interrupting guest
*/
void *sw_ring = NULL;
int queue_type = virtio_get_queue_type(hw, vtpci_queue_idx);
int ret;
+ int numa_node = dev->device->numa_node;
- PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx);
+ PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
+ vtpci_queue_idx, numa_node);
/*
* Read the virtqueue size from the Queue Size field
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
- SOCKET_ID_ANY);
+ numa_node);
if (vq == NULL) {
PMD_INIT_LOG(ERR, "can not allocate vq");
return -ENOMEM;
vq->hw = hw;
vq->vq_queue_index = vtpci_queue_idx;
vq->vq_nentries = vq_size;
+ if (vtpci_packed_queue(hw)) {
+ vq->vq_packed.used_wrap_counter = 1;
+ vq->vq_packed.cached_flags = VRING_DESC_F_AVAIL(1);
+ vq->vq_packed.event_flags_shadow = 0;
+ if (queue_type == VTNET_RQ)
+ vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
+ }
/*
* Reserve a memzone for vring elements
*/
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
if (rte_errno == EEXIST)
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
dev->data->port_id, vtpci_queue_idx);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
- SOCKET_ID_ANY, RTE_MEMZONE_IOVA_CONTIG,
+ numa_node, RTE_MEMZONE_IOVA_CONTIG,
RTE_CACHE_LINE_SIZE);
if (hdr_mz == NULL) {
if (rte_errno == EEXIST)
sizeof(vq->sw_ring[0]);
sw_ring = rte_zmalloc_socket("sw_ring", sz_sw,
- RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY);
+ RTE_CACHE_LINE_SIZE, numa_node);
if (!sw_ring) {
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
ret = -ENOMEM;
for (i = 0; i < vq_size; i++) {
struct vring_desc *start_dp = txr[i].tx_indir;
- vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir));
-
/* first indirect descriptor is always the tx header */
- start_dp->addr = txvq->virtio_net_hdr_mem
- + i * sizeof(*txr)
- + offsetof(struct virtio_tx_region, tx_hdr);
-
- start_dp->len = hw->vtnet_hdr_size;
- start_dp->flags = VRING_DESC_F_NEXT;
+ if (!vtpci_packed_queue(hw)) {
+ vring_desc_init_split(start_dp,
+ RTE_DIM(txr[i].tx_indir));
+ start_dp->addr = txvq->virtio_net_hdr_mem
+ + i * sizeof(*txr)
+ + offsetof(struct virtio_tx_region,
+ tx_hdr);
+ start_dp->len = hw->vtnet_hdr_size;
+ start_dp->flags = VRING_DESC_F_NEXT;
+ }
}
}
PMD_INIT_LOG(DEBUG, "virtio_dev_close");
+ if (!hw->opened)
+ return;
+ hw->opened = false;
+
/* reset the NIC */
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
VTPCI_OPS(hw)->set_config_irq(hw, VIRTIO_MSI_NO_VECTOR);
static int
virtio_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
struct virtqueue *vq = rxvq->vq;
virtqueue_enable_intr(vq);
+ virtio_mb(hw->weak_barriers);
return 0;
}
.mac_addr_set = virtio_mac_addr_set,
};
+/*
+ * dev_ops for virtio-user in secondary processes, as we just have
+ * some limited supports currently.
+ */
+const struct eth_dev_ops virtio_user_secondary_eth_dev_ops = {
+ .dev_infos_get = virtio_dev_info_get,
+ .stats_get = virtio_dev_stats_get,
+ .xstats_get = virtio_dev_xstats_get,
+ .xstats_get_names = virtio_dev_xstats_get_names,
+ .stats_reset = virtio_dev_stats_reset,
+ .xstats_reset = virtio_dev_stats_reset,
+ /* collect stats per queue */
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
+};
+
static void
virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct rte_eth_dev *dev = param;
struct virtio_hw *hw = dev->data->dev_private;
uint8_t isr;
+ uint16_t status;
/* Read interrupt status which clears interrupt */
isr = vtpci_isr(hw);
_rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
- }
- if (isr & VIRTIO_NET_S_ANNOUNCE) {
- virtio_notify_peers(dev);
- if (hw->cvq)
- virtio_ack_link_announce(dev);
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ vtpci_read_dev_config(hw,
+ offsetof(struct virtio_net_config, status),
+ &status, sizeof(status));
+ if (status & VIRTIO_NET_S_ANNOUNCE) {
+ virtio_notify_peers(dev);
+ if (hw->cvq)
+ virtio_ack_link_announce(dev);
+ }
+ }
}
}
{
struct virtio_hw *hw = eth_dev->data->dev_private;
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
- } else if (hw->use_inorder_rx) {
- PMD_INIT_LOG(INFO,
- "virtio: using inorder mergeable buffer Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts_inorder;
- } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ if (vtpci_packed_queue(hw)) {
PMD_INIT_LOG(INFO,
- "virtio: using mergeable buffer Rx path on port %u",
+ "virtio: using packed ring %s Tx path on port %u",
+ hw->use_inorder_tx ? "inorder" : "standard",
eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
} else {
- PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
- eth_dev->data->port_id);
- eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
}
- if (hw->use_inorder_tx) {
- PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ if (vtpci_packed_queue(hw)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_mergeable_pkts_packed;
+ } else {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
} else {
- PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
- eth_dev->data->port_id);
- eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ if (hw->use_simple_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
}
+
}
/* Only support 1:1 queue/interrupt mapping so far.
if (virtio_negotiate_features(hw, req_features) < 0)
return -1;
+ hw->weak_barriers = !vtpci_with_feature(hw, VIRTIO_F_ORDER_PLATFORM);
+
if (!hw->virtio_user_dev) {
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pci_dev);
/* Setting up rx_header size for the device */
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1))
+ vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
if (ret < 0)
goto out;
- /* Setup interrupt callback */
- if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_register(eth_dev->intr_handle,
- virtio_interrupt_handler, eth_dev);
-
return 0;
out:
PMD_INIT_FUNC_TRACE();
if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- return -EPERM;
+ return 0;
virtio_dev_stop(eth_dev);
virtio_dev_close(eth_dev);
eth_dev->tx_pkt_burst = NULL;
eth_dev->rx_pkt_burst = NULL;
- rte_free(eth_dev->data->mac_addrs);
- eth_dev->data->mac_addrs = NULL;
-
- /* reset interrupt callback */
- if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- rte_intr_callback_unregister(eth_dev->intr_handle,
- virtio_interrupt_handler,
- eth_dev);
if (eth_dev->device)
rte_pci_unmap_device(RTE_ETH_DEV_TO_PCI(eth_dev));
static int eth_virtio_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
struct rte_pci_device *pci_dev)
{
+ if (rte_eal_iopl_init() != 0) {
+ PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
+ return 1;
+ }
+
/* virtio pmd skips probe if device needs to work in vdpa mode */
if (vdpa_mode_selected(pci_dev->device.devargs))
return 1;
RTE_INIT(rte_virtio_pmd_init)
{
- if (rte_eal_iopl_init() != 0) {
- PMD_INIT_LOG(ERR, "IOPL call failed - cannot use virtio PMD");
- return;
- }
-
+ rte_eal_iopl_init();
rte_pci_register(&rte_virtio_pmd);
}
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
const struct rte_eth_txmode *txmode = &dev->data->dev_conf.txmode;
struct virtio_hw *hw = dev->data->dev_private;
+ uint32_t ether_hdr_len = ETHER_HDR_LEN + VLAN_TAG_LEN +
+ hw->vtnet_hdr_size;
uint64_t rx_offloads = rxmode->offloads;
uint64_t tx_offloads = txmode->offloads;
uint64_t req_features;
return ret;
}
+ if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
+ req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
- hw->use_inorder_rx = 1;
- hw->use_simple_rx = 0;
- } else {
- hw->use_inorder_rx = 0;
- }
+ hw->use_inorder_rx = 1;
+ hw->use_simple_rx = 0;
+ }
+
+ if (vtpci_packed_queue(hw)) {
+ hw->use_simple_rx = 0;
+ hw->use_inorder_rx = 0;
}
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
DEV_RX_OFFLOAD_VLAN_STRIP))
hw->use_simple_rx = 0;
+ hw->opened = true;
+
return 0;
}
dev->data->dev_conf.intr_conf.rxq) {
virtio_intr_disable(dev);
+ /* Setup interrupt callback */
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
+ rte_intr_callback_register(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+
if (virtio_intr_enable(dev) < 0) {
PMD_DRV_LOG(ERR, "interrupt enable failed");
return -EIO;
}
set_rxtx_funcs(dev);
- hw->started = 1;
+ hw->started = true;
/* Initialize Link state */
virtio_dev_link_update(dev, 0);
PMD_INIT_LOG(DEBUG, "stop");
rte_spinlock_lock(&hw->state_lock);
- if (intr_conf->lsc || intr_conf->rxq)
+ if (!hw->started)
+ goto out_unlock;
+ hw->started = false;
+
+ if (intr_conf->lsc || intr_conf->rxq) {
virtio_intr_disable(dev);
- hw->started = 0;
+ /* Reset interrupt callback */
+ if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
+ rte_intr_callback_unregister(dev->intr_handle,
+ virtio_interrupt_handler,
+ dev);
+ }
+ }
+
memset(&link, 0, sizeof(link));
rte_eth_linkstatus_set(dev, &link);
+out_unlock:
rte_spinlock_unlock(&hw->state_lock);
}
link.link_speed = ETH_SPEED_NUM_10G;
link.link_autoneg = ETH_LINK_FIXED;
- if (hw->started == 0) {
+ if (!hw->started) {
link.link_status = ETH_LINK_DOWN;
} else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
host_features = VTPCI_OPS(hw)->get_features(hw);
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_CRC_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |