"vq->vq_avail_idx=%d\n"
"vq->vq_used_cons_idx=%d\n"
"vq->vq_packed.cached_flags=0x%x\n"
- "vq->vq_packed.used_wrap_counter=%d\n",
+ "vq->vq_packed.used_wrap_counter=%d",
vq->vq_free_cnt,
vq->vq_avail_idx,
vq->vq_used_cons_idx,
return 0;
}
-static void
-virtio_dev_queue_release(void *queue __rte_unused)
-{
- /* do nothing */
-}
-
static uint16_t
virtio_get_nr_vq(struct virtio_hw *hw)
{
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->iova;
+ if (hw->use_va)
+ vq->vq_ring_mem = (uintptr_t)mz->addr;
+ else
+ vq->vq_ring_mem = mz->iova;
+
vq->vq_ring_virt_mem = mz->addr;
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->iova);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
- (uint64_t)(uintptr_t)mz->addr);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
virtio_init_vring(vq);
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->iova;
+ if (hw->use_va)
+ txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->iova;
+ if (hw->use_va)
+ cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
hw->cvq = cvq;
}
+ if (hw->use_va)
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
+ else
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
+
if (queue_type == VTNET_TQ) {
struct virtio_tx_region *txr;
unsigned int i;
.rx_queue_setup = virtio_dev_rx_queue_setup,
.rx_queue_intr_enable = virtio_dev_rx_queue_intr_enable,
.rx_queue_intr_disable = virtio_dev_rx_queue_intr_disable,
- .rx_queue_release = virtio_dev_queue_release,
.tx_queue_setup = virtio_dev_tx_queue_setup,
- .tx_queue_release = virtio_dev_queue_release,
/* collect stats per queue */
.queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
.vlan_filter_set = virtio_vlan_filter_set,
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
- eth_dev->rx_descriptor_done = virtio_dev_rx_queue_done;
if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
set_rxtx_funcs(eth_dev);
PMD_INIT_LOG(DEBUG, "%d mbufs freed", mbuf_num);
}
+static void
+virtio_tx_completed_cleanup(struct rte_eth_dev *dev)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtqueue *vq;
+ int qidx;
+ void (*xmit_cleanup)(struct virtqueue *vq, uint16_t nb_used);
+
+ if (virtio_with_packed_queue(hw)) {
+ if (hw->use_vec_tx)
+ xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
+ else if (virtio_with_feature(hw, VIRTIO_F_IN_ORDER))
+ xmit_cleanup = &virtio_xmit_cleanup_inorder_packed;
+ else
+ xmit_cleanup = &virtio_xmit_cleanup_normal_packed;
+ } else {
+ if (hw->use_inorder_tx)
+ xmit_cleanup = &virtio_xmit_cleanup_inorder;
+ else
+ xmit_cleanup = &virtio_xmit_cleanup;
+ }
+
+ for (qidx = 0; qidx < hw->max_queue_pairs; qidx++) {
+ vq = hw->vqs[2 * qidx + VTNET_SQ_TQ_QUEUE_IDX];
+ if (vq != NULL)
+ xmit_cleanup(vq, virtqueue_nused(vq));
+ }
+}
+
/*
* Stop device: disable interrupt and mark link down
*/
goto out_unlock;
hw->started = 0;
+ virtio_tx_completed_cleanup(dev);
+
if (intr_conf->lsc || intr_conf->rxq) {
virtio_intr_disable(dev);
* The Queue Size value does not have to be a power of 2.
*/
dev_info->rx_desc_lim.nb_max = UINT16_MAX;
+ dev_info->tx_desc_lim.nb_max = UINT16_MAX;
} else {
/*
* According to 2.6 Split Virtqueues:
* Size value is 32768.
*/
dev_info->rx_desc_lim.nb_max = 32768;
+ dev_info->tx_desc_lim.nb_max = 32768;
}
/*
* Actual minimum is not the same for virtqueues of different kinds,
*/
dev_info->rx_desc_lim.nb_min = RTE_MAX(DEFAULT_RX_FREE_THRESH,
RTE_VIRTIO_VPMD_RX_REARM_THRESH);
+ dev_info->tx_desc_lim.nb_min = DEFAULT_TX_FREE_THRESH;
dev_info->rx_desc_lim.nb_align = 1;
+ dev_info->tx_desc_lim.nb_align = 1;
return 0;
}