hw->cvq = cvq;
}
- /* For virtio_user case (that is when hw->virtio_user_dev is not NULL),
- * we use virtual address. And we need properly set _offset_, please see
- * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information.
- */
- if (hw->bus_type == VIRTIO_BUS_PCI_LEGACY || hw->bus_type == VIRTIO_BUS_PCI_MODERN) {
- vq->offset = offsetof(struct rte_mbuf, buf_iova);
- } else if (hw->bus_type == VIRTIO_BUS_USER) {
- vq->vq_ring_mem = (uintptr_t)mz->addr;
- vq->offset = offsetof(struct rte_mbuf, buf_addr);
- if (queue_type == VTNET_TQ)
- txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
- else if (queue_type == VTNET_CQ)
- cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
- }
-
if (queue_type == VTNET_TQ) {
struct virtio_tx_region *txr;
unsigned int i;
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
- start_dp[idx].addr =
- VIRTIO_MBUF_ADDR(cookies[i], vq) +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len =
- cookies[i]->buf_len -
- RTE_PKTMBUF_HEADROOM +
- hw->vtnet_hdr_size;
+ start_dp[idx].addr = cookies[i]->buf_iova +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len = cookies[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_WRITE;
vq_update_avail_ring(vq, idx);
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr =
- VIRTIO_MBUF_ADDR(cookie[i], vq) +
+ start_dp[idx].addr = cookie[i]->buf_iova +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len =
- cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
- hw->vtnet_hdr_size;
+ start_dp[idx].len = cookie[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_WRITE;
vq->vq_desc_head_idx = start_dp[idx].next;
vq_update_avail_ring(vq, idx);
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM
- + hw->vtnet_hdr_size;
+ start_dp[idx].addr = cookie[i]->buf_iova +
+ RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ start_dp[idx].len = cookie[i]->buf_len -
+ RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
vq->vq_desc_head_idx = dxp->next;
if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
else
virtqueue_xmit_offload(hdr, cookies[i], true);
- start_dp[idx].addr =
- VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+ start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
else
virtqueue_xmit_offload(hdr, cookie, true);
- dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+ dp->addr = rte_mbuf_data_iova(cookie) - head_size;
dp->len = cookie->data_len + head_size;
dp->id = id;
virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
do {
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].addr = rte_mbuf_data_iova(cookie);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;
dxp = &vq->vq_descx[idx + i];
dxp->cookie = (void *)cookie[total_num + i];
- addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
+ addr = cookie[total_num + i]->buf_iova +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
start_dp[idx + i].addr = addr;
start_dp[idx + i].len = cookie[total_num + i]->buf_len
}
__m512i descs_base = _mm512_set_epi64(tx_pkts[3]->data_len,
- VIRTIO_MBUF_ADDR(tx_pkts[3], vq),
+ tx_pkts[3]->buf_iova,
tx_pkts[2]->data_len,
- VIRTIO_MBUF_ADDR(tx_pkts[2], vq),
+ tx_pkts[2]->buf_iova,
tx_pkts[1]->data_len,
- VIRTIO_MBUF_ADDR(tx_pkts[1], vq),
+ tx_pkts[1]->buf_iova,
tx_pkts[0]->data_len,
- VIRTIO_MBUF_ADDR(tx_pkts[0], vq));
+ tx_pkts[0]->buf_iova);
/* id offset and data offset */
__m512i data_offsets = _mm512_set_epi64((uint64_t)3 << ID_BITS_OFFSET,
uint64x2x2_t desc[PACKED_BATCH_SIZE / 2];
uint64x2_t base_addr0 = {
- VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off,
- VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off
+ tx_pkts[0]->buf_iova + tx_pkts[0]->data_off,
+ tx_pkts[1]->buf_iova + tx_pkts[1]->data_off
};
uint64x2_t base_addr1 = {
- VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off,
- VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off
+ tx_pkts[2]->buf_iova + tx_pkts[2]->data_off,
+ tx_pkts[3]->buf_iova + tx_pkts[3]->data_off
};
desc[0].val[0] = base_addr0;
p = (uintptr_t)&sw_ring[i]->rearm_data;
*(uint64_t *)p = rxvq->mbuf_initializer;
- start_dp[i].addr =
- VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
+ start_dp[i].addr = sw_ring[i]->buf_iova +
RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
start_dp[i].len = sw_ring[i]->buf_len -
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
.remove = virtio_user_pmd_remove,
.dma_map = virtio_user_pmd_dma_map,
.dma_unmap = virtio_user_pmd_dma_unmap,
+ .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
};
RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
#define VIRTQUEUE_MAX_NAME_SZ 32
-#ifdef RTE_VIRTIO_USER
-/**
- * Return the physical address (or virtual address in case of
- * virtio-user) of mbuf data buffer.
- *
- * The address is firstly casted to the word size (sizeof(uintptr_t))
- * before casting it to uint64_t. This is to make it work with different
- * combination of word size (64 bit and 32 bit) and virtio device
- * (virtio-pci and virtio-user).
- */
-#define VIRTIO_MBUF_ADDR(mb, vq) \
- ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset)))
-#else
-#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova)
-#endif
-
-/**
- * Return the physical address (or virtual address in case of
- * virtio-user) of mbuf data buffer, taking care of mbuf data offset
- */
-#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
- (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
-
#define VTNET_SQ_RQ_QUEUE_IDX 0
#define VTNET_SQ_TQ_QUEUE_IDX 1
#define VTNET_SQ_CQ_QUEUE_IDX 2
do {
uint16_t flags;
- start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].addr = rte_mbuf_data_iova(cookie);
start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;