]> git.droids-corp.org - dpdk.git/commitdiff
net/virtio: revert forcing IOVA as VA mode for virtio-user
authorMaxime Coquelin <maxime.coquelin@redhat.com>
Thu, 30 Sep 2021 08:12:59 +0000 (10:12 +0200)
committerFerruh Yigit <ferruh.yigit@intel.com>
Thu, 30 Sep 2021 10:58:09 +0000 (12:58 +0200)
This patch removes the simplification in Virtio descriptors
handling, where their buffer addresses are IOVAs for Virtio
PCI devices, and VA-only for Virtio-user devices, which
added a requirement on Virtio-user that it only supported
IOVA as VA.

This change introduced a regression for applications using
Virtio-user and other physical PMDs that require IOVA as PA
because they don't use an IOMMU.

This patch reverts to the old behaviour, but needed to be
reworked because of the refactoring that happened in v21.02.

Fixes: 17043a2909bb ("net/virtio: force IOVA as VA mode for virtio-user")
Cc: stable@dpdk.org
Reported-by: Olivier Matz <olivier.matz@6wind.com>
Signed-off-by: Maxime Coquelin <maxime.coquelin@redhat.com>
Tested-by: Olivier Matz <olivier.matz@6wind.com>
Reviewed-by: David Marchand <david.marchand@redhat.com>
drivers/net/virtio/virtio.h
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_rxtx_packed.h
drivers/net/virtio/virtio_rxtx_packed_avx.h
drivers/net/virtio/virtio_rxtx_packed_neon.h
drivers/net/virtio/virtio_rxtx_simple.h
drivers/net/virtio/virtio_user_ethdev.c
drivers/net/virtio/virtqueue.h

index 525e2dad4cf998dfda31f6bdbd8d94d0a26c31c0..e78b2e429e09b5971f7552a459789dcf9a7b58fe 100644 (file)
@@ -192,6 +192,7 @@ struct virtio_hw {
        uint16_t max_queue_pairs;
        uint64_t req_guest_features;
        struct virtnet_ctl *cvq;
+       bool use_va;
 };
 
 struct virtio_ops {
index b08109c61cf3a90c54d39d18b3b509ffd45405c7..b60eeb24abe7da63647a9353159b390ca36b7b18 100644 (file)
@@ -515,12 +515,14 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
 
        memset(mz->addr, 0, mz->len);
 
-       vq->vq_ring_mem = mz->iova;
+       if (hw->use_va)
+               vq->vq_ring_mem = (uintptr_t)mz->addr;
+       else
+               vq->vq_ring_mem = mz->iova;
+
        vq->vq_ring_virt_mem = mz->addr;
-       PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem:      0x%" PRIx64,
-                    (uint64_t)mz->iova);
-       PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64,
-                    (uint64_t)(uintptr_t)mz->addr);
+       PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, vq->vq_ring_mem);
+       PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: %p", vq->vq_ring_virt_mem);
 
        virtio_init_vring(vq);
 
@@ -570,17 +572,28 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t queue_idx)
                txvq->port_id = dev->data->port_id;
                txvq->mz = mz;
                txvq->virtio_net_hdr_mz = hdr_mz;
-               txvq->virtio_net_hdr_mem = hdr_mz->iova;
+               if (hw->use_va)
+                       txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+               else
+                       txvq->virtio_net_hdr_mem = hdr_mz->iova;
        } else if (queue_type == VTNET_CQ) {
                cvq = &vq->cq;
                cvq->mz = mz;
                cvq->virtio_net_hdr_mz = hdr_mz;
-               cvq->virtio_net_hdr_mem = hdr_mz->iova;
+               if (hw->use_va)
+                       cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr;
+               else
+                       cvq->virtio_net_hdr_mem = hdr_mz->iova;
                memset(cvq->virtio_net_hdr_mz->addr, 0, rte_mem_page_size());
 
                hw->cvq = cvq;
        }
 
+       if (hw->use_va)
+               vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_addr);
+       else
+               vq->mbuf_addr_offset = offsetof(struct rte_mbuf, buf_iova);
+
        if (queue_type == VTNET_TQ) {
                struct virtio_tx_region *txr;
                unsigned int i;
index b9d7c8d18f367f31ff0b8a41e43418561098cf6b..e8e6ed20a5ac9da65c8411cba95329de9bcc794c 100644 (file)
@@ -271,7 +271,7 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
                dxp->cookie = (void *)cookies[i];
                dxp->ndescs = 1;
 
-               start_dp[idx].addr = cookies[i]->buf_iova +
+               start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
                        RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
                start_dp[idx].len = cookies[i]->buf_len -
                        RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
@@ -310,10 +310,10 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
                dxp->cookie = (void *)cookie[i];
                dxp->ndescs = 1;
 
-               start_dp[idx].addr = cookie[i]->buf_iova +
+               start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
                        RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-               start_dp[idx].len = cookie[i]->buf_len -
-                       RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+               start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
+                       hw->vtnet_hdr_size;
                start_dp[idx].flags = VRING_DESC_F_WRITE;
                vq->vq_desc_head_idx = start_dp[idx].next;
                vq_update_avail_ring(vq, idx);
@@ -336,13 +336,10 @@ virtqueue_refill_single_packed(struct virtqueue *vq,
        uint16_t flags = vq->vq_packed.cached_flags;
        struct virtio_hw *hw = vq->hw;
 
-       dp->addr = cookie->buf_iova +
-                       RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
-       dp->len = cookie->buf_len -
-               RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+       dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+       dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
 
-       virtqueue_store_flags_packed(dp, flags,
-                                    hw->weak_barriers);
+       virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
 
        if (++vq->vq_avail_idx >= vq->vq_nentries) {
                vq->vq_avail_idx -= vq->vq_nentries;
@@ -482,8 +479,8 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                else
                        virtqueue_xmit_offload(hdr, cookies[i]);
 
-               start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - head_size;
-               start_dp[idx].len   = cookies[i]->data_len + head_size;
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+               start_dp[idx].len = cookies[i]->data_len + head_size;
                start_dp[idx].flags = 0;
 
 
@@ -529,9 +526,9 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        else
                virtqueue_xmit_offload(hdr, cookie);
 
-       dp->addr = rte_mbuf_data_iova(cookie) - head_size;
-       dp->len  = cookie->data_len + head_size;
-       dp->id   = id;
+       dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+       dp->len = cookie->data_len + head_size;
+       dp->id = id;
 
        if (++vq->vq_avail_idx >= vq->vq_nentries) {
                vq->vq_avail_idx -= vq->vq_nentries;
@@ -617,8 +614,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                virtqueue_xmit_offload(hdr, cookie);
 
        do {
-               start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
-               start_dp[idx].len   = cookie->data_len;
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+               start_dp[idx].len = cookie->data_len;
                if (prepend_header) {
                        start_dp[idx].addr -= head_size;
                        start_dp[idx].len += head_size;
index 1d1db60da8b74e1cf0c94bc24ec8bb8e0e695dab..77e5cb37e797b3a73c872a099193e68e62e0fdef 100644 (file)
@@ -288,7 +288,7 @@ virtio_recv_refill_packed_vec(struct virtnet_rx *rxvq,
                        dxp = &vq->vq_descx[idx + i];
                        dxp->cookie = (void *)cookie[total_num + i];
 
-                       addr = cookie[total_num + i]->buf_iova +
+                       addr = VIRTIO_MBUF_ADDR(cookie[total_num + i], vq) +
                                RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
                        start_dp[idx + i].addr = addr;
                        start_dp[idx + i].len = cookie[total_num + i]->buf_len
index c819d2e4f2825caa6154900e75f3a2d66ed891b7..8cb71f3fe62fca7f8140209af0010ef6aec0c5ab 100644 (file)
@@ -71,13 +71,13 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
        }
 
        __m512i descs_base = _mm512_set_epi64(tx_pkts[3]->data_len,
-                       tx_pkts[3]->buf_iova,
+                       VIRTIO_MBUF_ADDR(tx_pkts[3], vq),
                        tx_pkts[2]->data_len,
-                       tx_pkts[2]->buf_iova,
+                       VIRTIO_MBUF_ADDR(tx_pkts[2], vq),
                        tx_pkts[1]->data_len,
-                       tx_pkts[1]->buf_iova,
+                       VIRTIO_MBUF_ADDR(tx_pkts[1], vq),
                        tx_pkts[0]->data_len,
-                       tx_pkts[0]->buf_iova);
+                       VIRTIO_MBUF_ADDR(tx_pkts[0], vq));
 
        /* id offset and data offset */
        __m512i data_offsets = _mm512_set_epi64((uint64_t)3 << ID_BITS_OFFSET,
index f19e6186357a6844f6cac61f9542dfd5e890c915..c222ebf00ca020b97f86b360513da955bca347bf 100644 (file)
@@ -97,12 +97,12 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
 
        uint64x2x2_t desc[PACKED_BATCH_SIZE / 2];
        uint64x2_t base_addr0 = {
-               tx_pkts[0]->buf_iova + tx_pkts[0]->data_off,
-               tx_pkts[1]->buf_iova + tx_pkts[1]->data_off
+               VIRTIO_MBUF_ADDR(tx_pkts[0], vq) + tx_pkts[0]->data_off,
+               VIRTIO_MBUF_ADDR(tx_pkts[1], vq) + tx_pkts[1]->data_off
        };
        uint64x2_t base_addr1 = {
-               tx_pkts[2]->buf_iova + tx_pkts[2]->data_off,
-               tx_pkts[3]->buf_iova + tx_pkts[3]->data_off
+               VIRTIO_MBUF_ADDR(tx_pkts[2], vq) + tx_pkts[2]->data_off,
+               VIRTIO_MBUF_ADDR(tx_pkts[3], vq) + tx_pkts[3]->data_off
        };
 
        desc[0].val[0] = base_addr0;
index f258771fcf51dc71d6567223cc78cf26f1b8046b..d8f96e0434fe9fb2d39390c52afcedeef45fca48 100644 (file)
@@ -43,7 +43,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
                p = (uintptr_t)&sw_ring[i]->rearm_data;
                *(uint64_t *)p = rxvq->mbuf_initializer;
 
-               start_dp[i].addr = sw_ring[i]->buf_iova +
+               start_dp[i].addr = VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
                        RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
                start_dp[i].len = sw_ring[i]->buf_len -
                        RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
index 688c1104d5d5ae0402258ec88fb7424a535c164d..0271098f0da0c36f9f3efca00103f240cd3f0f6d 100644 (file)
@@ -657,6 +657,12 @@ virtio_user_pmd_probe(struct rte_vdev_device *vdev)
                goto end;
        }
 
+       /*
+        * Virtio-user requires using virtual addresses for the descriptors
+        * buffers, whatever other devices require
+        */
+       hw->use_va = true;
+
        /* previously called by pci probing for physical dev */
        if (eth_virtio_dev_init(eth_dev) < 0) {
                PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails");
@@ -769,7 +775,6 @@ static struct rte_vdev_driver virtio_user_driver = {
        .remove = virtio_user_pmd_remove,
        .dma_map = virtio_user_pmd_dma_map,
        .dma_unmap = virtio_user_pmd_dma_unmap,
-       .drv_flags = RTE_VDEV_DRV_NEED_IOVA_AS_VA,
 };
 
 RTE_PMD_REGISTER_VDEV(net_virtio_user, virtio_user_driver);
index d0c48ca415312304016e79c2bf0d00a8a156270d..5baac221f7923fb40b5548db3dee9e09ac71438b 100644 (file)
@@ -113,6 +113,25 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp,
 
 #define VIRTQUEUE_MAX_NAME_SZ 32
 
+/**
+ * Return the IOVA (or virtual address in case of virtio-user) of mbuf
+ * data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+       ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+       (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
+
 #define VTNET_SQ_RQ_QUEUE_IDX 0
 #define VTNET_SQ_TQ_QUEUE_IDX 1
 #define VTNET_SQ_CQ_QUEUE_IDX 2
@@ -255,6 +274,7 @@ struct virtqueue {
 
        void *vq_ring_virt_mem;  /**< linear address of vring*/
        unsigned int vq_ring_size;
+       uint16_t mbuf_addr_offset;
 
        union {
                struct virtnet_rx rxq;
@@ -739,7 +759,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        do {
                uint16_t flags;
 
-               start_dp[idx].addr = rte_mbuf_data_iova(cookie);
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
                start_dp[idx].len  = cookie->data_len;
                if (prepend_header) {
                        start_dp[idx].addr -= head_size;