From: Maxime Coquelin Date: Tue, 26 Jan 2021 10:16:05 +0000 (+0100) Subject: net/virtio: force IOVA as VA mode for virtio-user X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=17043a2909bb366c57640b5816a86f02d67f807b;p=dpdk.git net/virtio: force IOVA as VA mode for virtio-user At least Vhost-user backend of Virtio-user PMD requires IOVA as VA mode. Until now, it was implemented as a hack by forcing to use mbuf's buf_addr field instead of buf_iova. This patch removes all this logic and just fails probing if IOVA as VA mode is not selected. It simplifies the code overall, and removes some bus-specific logic from generic virtio_ethdev.c. Signed-off-by: Maxime Coquelin Reviewed-by: Chenbo Xia --- diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 83253ed27a..bf5b0119bf 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -576,21 +576,6 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx) hw->cvq = cvq; } - /* For virtio_user case (that is when hw->virtio_user_dev is not NULL), - * we use virtual address. And we need properly set _offset_, please see - * VIRTIO_MBUF_DATA_DMA_ADDR in virtqueue.h for more information. - */ - if (hw->bus_type == VIRTIO_BUS_PCI_LEGACY || hw->bus_type == VIRTIO_BUS_PCI_MODERN) { - vq->offset = offsetof(struct rte_mbuf, buf_iova); - } else if (hw->bus_type == VIRTIO_BUS_USER) { - vq->vq_ring_mem = (uintptr_t)mz->addr; - vq->offset = offsetof(struct rte_mbuf, buf_addr); - if (queue_type == VTNET_TQ) - txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; - else if (queue_type == VTNET_CQ) - cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; - } - if (queue_type == VTNET_TQ) { struct virtio_tx_region *txr; unsigned int i; diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index fb7382eff7..cbc846dd0e 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -271,13 +271,10 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq, dxp->cookie = (void *)cookies[i]; dxp->ndescs = 1; - start_dp[idx].addr = - VIRTIO_MBUF_ADDR(cookies[i], vq) + - RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; - start_dp[idx].len = - cookies[i]->buf_len - - RTE_PKTMBUF_HEADROOM + - hw->vtnet_hdr_size; + start_dp[idx].addr = cookies[i]->buf_iova + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + start_dp[idx].len = cookies[i]->buf_len - + RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; start_dp[idx].flags = VRING_DESC_F_WRITE; vq_update_avail_ring(vq, idx); @@ -313,12 +310,10 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie, dxp->cookie = (void *)cookie[i]; dxp->ndescs = 1; - start_dp[idx].addr = - VIRTIO_MBUF_ADDR(cookie[i], vq) + + start_dp[idx].addr = cookie[i]->buf_iova + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; - start_dp[idx].len = - cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM + - hw->vtnet_hdr_size; + start_dp[idx].len = cookie[i]->buf_len - + RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; start_dp[idx].flags = VRING_DESC_F_WRITE; vq->vq_desc_head_idx = start_dp[idx].next; vq_update_avail_ring(vq, idx); @@ -355,10 +350,10 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq, dxp->cookie = (void *)cookie[i]; dxp->ndescs = 1; - start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) + - RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; - start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM - + hw->vtnet_hdr_size; + start_dp[idx].addr = cookie[i]->buf_iova + + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; + start_dp[idx].len = cookie[i]->buf_len - + RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; vq->vq_desc_head_idx = dxp->next; if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) @@ -455,8 +450,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq, else virtqueue_xmit_offload(hdr, cookies[i], true); - start_dp[idx].addr = - VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size; + start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size; start_dp[idx].len = cookies[i]->data_len + head_size; start_dp[idx].flags = 0; @@ -503,7 +497,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq, else virtqueue_xmit_offload(hdr, cookie, true); - dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size; + dp->addr = rte_mbuf_data_iova(cookie) - head_size; dp->len = cookie->data_len + head_size; dp->id = id; @@ -590,7 +584,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); do { - start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + start_dp[idx].addr = rte_mbuf_data_iova(cookie); start_dp[idx].len = cookie->data_len; if (prepend_header) { start_dp[idx].addr -= head_size; diff --git a/drivers/net/virtio/virtio_rxtx_packed.h b/drivers/net/virtio/virtio_rxtx_packed.h index 016b6fb247..c0fad41c3d 100644 --- a/drivers/net/virtio/virtio_rxtx_packed.h +++ b/drivers/net/virtio/virtio_rxtx_packed.h @@ -288,7 +288,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq, dxp = &vq->vq_descx[idx + i]; dxp->cookie = (void *)cookie[total_num + i]; - addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) + + addr = cookie[total_num + i]->buf_iova + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size; start_dp[idx + i].addr = addr; start_dp[idx + i].len = cookie[total_num + i]->buf_len diff --git a/drivers/net/virtio/virtio_rxtx_packed_avx.h b/drivers/net/virtio/virtio_rxtx_packed_avx.h index f831828845..2c610d57de 100644 --- a/drivers/net/virtio/virtio_rxtx_packed_avx.h +++ b/drivers/net/virtio/virtio_rxtx_packed_avx.h @@ -71,13 +71,13 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq, } __m512i descs_base = _mm512_set_epi64(tx_pkts[3]->data_len, - VIRTIO_MBUF_ADDR(tx_pkts[3], vq), + tx_pkts[3]->buf_iova, tx_pkts[2]->data_len, - VIRTIO_MBUF_ADDR(tx_pkts[2], vq), + tx_pkts[2]->buf_iova, tx_pkts[1]->data_len, - VIRTIO_MBUF_ADDR(tx_pkts[1], vq), + tx_pkts[1]->buf_iova, tx_pkts[0]->data_len, - VIRTIO_MBUF_ADDR(tx_pkts[0], vq)); + tx_pkts[0]->buf_iova); /* id offset and data offset */ __m512i data_offsets = _mm512_set_epi64((uint64_t)3 << ID_BITS_OFFSET, diff --git a/drivers/net/virtio/virtio_rxtx_packed_neon.h b/drivers/net/virtio/virtio_rxtx_packed_neon.h index 01c77b712d..ae2d099c0b 100644 --- a/drivers/net/virtio/virtio_rxtx_packed_neon.h +++ b/drivers/net/virtio/virtio_rxtx_packed_neon.h @@ -97,12 +97,12 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq, uint64x2x2_t desc[PACKED_BATCH_SIZE / 2]; uint64x2_t base_addr0 = { - VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off, - VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off + tx_pkts[0]->buf_iova + tx_pkts[0]->data_off, + tx_pkts[1]->buf_iova + tx_pkts[1]->data_off }; uint64x2_t base_addr1 = { - VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off, - VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off + tx_pkts[2]->buf_iova + tx_pkts[2]->data_off, + tx_pkts[3]->buf_iova + tx_pkts[3]->data_off }; desc[0].val[0] = base_addr0; diff --git a/drivers/net/virtio/virtio_rxtx_simple.h b/drivers/net/virtio/virtio_rxtx_simple.h index 3d1296a23c..f2a5aedf97 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.h +++ b/drivers/net/virtio/virtio_rxtx_simple.h @@ -43,8 +43,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq) p = (uintptr_t)&sw_ring[i]->rearm_data; *(uint64_t *)p = rxvq->mbuf_initializer; - start_dp[i].addr = - VIRTIO_MBUF_ADDR(sw_ring[i], vq) + + start_dp[i].addr = sw_ring[i]->buf_iova + RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size; start_dp[i].len = sw_ring[i]->buf_len - RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index 574521124a..968bc8eb12 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -953,6 +953,7 @@ static struct rte_vdev_driver virtio_user_driver = { .remove = virtio_user_pmd_remove, .dma_map = virtio_user_pmd_dma_map, .dma_unmap = virtio_user_pmd_dma_unmap, + .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA, }; RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver); diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index d78b943443..7611317581 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -113,29 +113,6 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, #define VIRTQUEUE_MAX_NAME_SZ 32 -#ifdef RTE_VIRTIO_USER -/** - * Return the physical address (or virtual address in case of - * virtio-user) of mbuf data buffer. - * - * The address is firstly casted to the word size (sizeof(uintptr_t)) - * before casting it to uint64_t. This is to make it work with different - * combination of word size (64 bit and 32 bit) and virtio device - * (virtio-pci and virtio-user). - */ -#define VIRTIO_MBUF_ADDR(mb, vq) \ - ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset))) -#else -#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova) -#endif - -/** - * Return the physical address (or virtual address in case of - * virtio-user) of mbuf data buffer, taking care of mbuf data offset - */ -#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \ - (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off) - #define VTNET_SQ_RQ_QUEUE_IDX 0 #define VTNET_SQ_TQ_QUEUE_IDX 1 #define VTNET_SQ_CQ_QUEUE_IDX 2 @@ -763,7 +740,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, do { uint16_t flags; - start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + start_dp[idx].addr = rte_mbuf_data_iova(cookie); start_dp[idx].len = cookie->data_len; if (prepend_header) { start_dp[idx].addr -= head_size;