uint16_t max_queue_pairs;
uint64_t req_guest_features;
struct virtnet_ctl *cvq;
+ bool use_va;
};
struct virtio_ops {
memset(mz->addr, 0, mz->len);
- vq->vq_ring_mem = mz->iova;
+ if (hw->use_va)
+ vq->vq_ring_mem = (uintptr_t)mz->addr;
+ else
+ vq->vq_ring_mem = mz->iova;
+
vq->vq_ring_virt_mem = mz->addr;
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64,
- (uint64_t)mz->iova);
- PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
- (uint64_t)(uintptr_t)mz->addr);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
+ PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
virtio_init_vring(vq);
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
- txvq->virtio_net_hdr_mem = hdr_mz->iova;
+ if (hw->use_va)
+ txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else
+ txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
- cvq->virtio_net_hdr_mem = hdr_mz->iova;
+ if (hw->use_va)
+ cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+ else
+ cvq->virtio_net_hdr_mem = hdr_mz->iova;
memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
hw->cvq = cvq;
}
+ if (hw->use_va)
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
+ else
+ vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
+
if (queue_type == VTNET_TQ) {
struct virtio_tx_region *txr;
unsigned int i;
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
- start_dp[idx].addr = cookies[i]->buf_iova +
+ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
start_dp[idx].len = cookies[i]->buf_len -
RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr = cookie[i]->buf_iova +
+ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len = cookie[i]->buf_len -
- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
+ hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_WRITE;
vq->vq_desc_head_idx = start_dp[idx].next;
vq_update_avail_ring(vq, idx);
uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
- dp->addr = cookie->buf_iova +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- dp->len = cookie->buf_len -
- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
- virtqueue_store_flags_packed(dp, flags,
- hw->weak_barriers);
+ virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
else
virtqueue_xmit_offload(hdr, cookies[i]);
- start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
- start_dp[idx].len = cookies[i]->data_len + head_size;
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+ start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
else
virtqueue_xmit_offload(hdr, cookie);
- dp->addr = rte_mbuf_data_iova(cookie) - head_size;
- dp->len = cookie->data_len + head_size;
- dp->id = id;
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+ dp->len = cookie->data_len + head_size;
+ dp->id = id;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
virtqueue_xmit_offload(hdr, cookie);
do {
- start_dp[idx].addr = rte_mbuf_data_iova(cookie);
- start_dp[idx].len = cookie->data_len;
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;
start_dp[idx].len += head_size;
dxp = &vq->vq_descx[idx + i];
dxp->cookie = (void *)cookie[total_num + i];
- addr = cookie[total_num + i]->buf_iova +
+ addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
start_dp[idx + i].addr = addr;
start_dp[idx + i].len = cookie[total_num + i]->buf_len
}
__m512i descs_base = _mm512_set_epi64(tx_pkts[3]->data_len,
- tx_pkts[3]->buf_iova,
+ VIRTIO_MBUF_ADDR(tx_pkts[3], vq),
tx_pkts[2]->data_len,
- tx_pkts[2]->buf_iova,
+ VIRTIO_MBUF_ADDR(tx_pkts[2], vq),
tx_pkts[1]->data_len,
- tx_pkts[1]->buf_iova,
+ VIRTIO_MBUF_ADDR(tx_pkts[1], vq),
tx_pkts[0]->data_len,
- tx_pkts[0]->buf_iova);
+ VIRTIO_MBUF_ADDR(tx_pkts[0], vq));
/* id offset and data offset */
__m512i data_offsets = _mm512_set_epi64((uint64_t)3 << ID_BITS_OFFSET,
uint64x2x2_t desc[PACKED_BATCH_SIZE / 2];
uint64x2_t base_addr0 = {
- tx_pkts[0]->buf_iova + tx_pkts[0]->data_off,
- tx_pkts[1]->buf_iova + tx_pkts[1]->data_off
+ VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off,
+ VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off
};
uint64x2_t base_addr1 = {
- tx_pkts[2]->buf_iova + tx_pkts[2]->data_off,
- tx_pkts[3]->buf_iova + tx_pkts[3]->data_off
+ VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off,
+ VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off
};
desc[0].val[0] = base_addr0;
p = (uintptr_t)&sw_ring[i]->rearm_data;
*(uint64_t *)p = rxvq->mbuf_initializer;
- start_dp[i].addr = sw_ring[i]->buf_iova +
+ start_dp[i].addr = VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
start_dp[i].len = sw_ring[i]->buf_len -
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
goto end;
}
+ /*
+ * Virtio-user requires using virtual addresses for the descriptors
+ * buffers, whatever other devices require
+ */
+ hw->use_va = true;
+
/* previously called by pci probing for physical dev */
if (eth_virtio_dev_init(eth_dev) < 0) {
PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
.remove = virtio_user_pmd_remove,
.dma_map = virtio_user_pmd_dma_map,
.dma_unmap = virtio_user_pmd_dma_unmap,
- .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
};
RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
#define VIRTQUEUE_MAX_NAME_SZ 32
+/**
+ * Return the IOVA (or virtual address in case of virtio-user) of mbuf
+ * data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+ ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+ (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
+
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define VTNET_SQ_CQ_QUEUE_IDX 2
void *vq_ring_virt_mem; /**< linear address of vring*/
unsigned int vq_ring_size;
+ uint16_t mbuf_addr_offset;
union {
struct virtnet_rx rxq;
do {
uint16_t flags;
- start_dp[idx].addr = rte_mbuf_data_iova(cookie);
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;