vhost: add support for non-contiguous indirect descs tables
[dpdk.git] / lib / librte_vhost / virtio_net.c
index f8732df..e43df8c 100644 (file)
@@ -1,34 +1,5 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Intel Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2016 Intel Corporation
  */
 
 #include <stdint.h>
 #include <rte_udp.h>
 #include <rte_sctp.h>
 #include <rte_arp.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
 
+#include "iotlb.h"
 #include "vhost.h"
 
 #define MAX_PKT_BURST 32
@@ -57,6 +31,46 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
        return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
 }
 
+static __rte_always_inline struct vring_desc *
+alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                                        struct vring_desc *desc)
+{
+       struct vring_desc *idesc;
+       uint64_t src, dst;
+       uint64_t len, remain = desc->len;
+       uint64_t desc_addr = desc->addr;
+
+       idesc = rte_malloc(__func__, desc->len, 0);
+       if (unlikely(!idesc))
+               return 0;
+
+       dst = (uint64_t)(uintptr_t)idesc;
+
+       while (remain) {
+               len = remain;
+               src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
+                               VHOST_ACCESS_RO);
+               if (unlikely(!src || !len)) {
+                       rte_free(idesc);
+                       return 0;
+               }
+
+               rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+               remain -= len;
+               dst += len;
+               desc_addr += len;
+       }
+
+       return idesc;
+}
+
+static __rte_always_inline void
+free_ind_table(struct vring_desc *idesc)
+{
+       rte_free(idesc);
+}
+
 static __rte_always_inline void
 do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
                          uint16_t to, uint16_t from, uint16_t size)
@@ -187,6 +201,11 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
                net_hdr->gso_size = m_buf->tso_segsz;
                net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
                                        + m_buf->l4_len;
+       } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
+               net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+               net_hdr->gso_size = m_buf->tso_segsz;
+               net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
+                       m_buf->l4_len;
        } else {
                ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
                ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
@@ -202,6 +221,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
+       uint64_t dlen;
        struct vring_desc *desc;
        uint64_t desc_addr;
        /* A counter to avoid desc dead loop chain */
@@ -211,13 +231,16 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
        int error = 0;
 
        desc = &descs[desc_idx];
-       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+       dlen = desc->len;
+       desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+                                       &dlen, VHOST_ACCESS_RW);
        /*
         * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
         * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
         * otherwise stores offset on the stack instead of in a register.
         */
-       if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
+       if (unlikely(dlen != desc->len || desc->len < dev->vhost_hlen) ||
+                       !desc_addr) {
                error = -1;
                goto out;
        }
@@ -255,8 +278,11 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        }
 
                        desc = &descs[desc->next];
-                       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
-                       if (unlikely(!desc_addr)) {
+                       dlen = desc->len;
+                       desc_addr = vhost_iova_to_vva(dev, vq, desc->addr,
+                                                       &dlen,
+                                                       VHOST_ACCESS_RW);
+                       if (unlikely(!desc_addr || dlen != desc->len)) {
                                error = -1;
                                goto out;
                        }
@@ -314,7 +340,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        uint16_t used_idx;
        uint32_t i, sz;
 
-       LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+       VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
@@ -322,8 +348,21 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        }
 
        vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+
        if (unlikely(vq->enabled == 0))
-               return 0;
+               goto out_access_unlock;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_lock(vq);
+
+       if (unlikely(vq->access_ok == 0)) {
+               if (unlikely(vring_translate(dev, vq) < 0)) {
+                       count = 0;
+                       goto out;
+               }
+       }
 
        avail_idx = *((volatile uint16_t *)&vq->avail->idx);
        start_idx = vq->last_used_idx;
@@ -331,9 +370,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        count = RTE_MIN(count, free_entries);
        count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
        if (count == 0)
-               return 0;
+               goto out;
 
-       LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
+       VHOST_LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
                dev->vid, start_idx, start_idx + count);
 
        vq->batch_copy_nb_elems = 0;
@@ -353,18 +392,34 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 
        rte_prefetch0(&vq->desc[desc_indexes[0]]);
        for (i = 0; i < count; i++) {
+               struct vring_desc *idesc = NULL;
                uint16_t desc_idx = desc_indexes[i];
                int err;
 
                if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+                       uint64_t dlen = vq->desc[desc_idx].len;
                        descs = (struct vring_desc *)(uintptr_t)
-                               rte_vhost_gpa_to_vva(dev->mem,
-                                       vq->desc[desc_idx].addr);
+                               vhost_iova_to_vva(dev,
+                                               vq, vq->desc[desc_idx].addr,
+                                               &dlen, VHOST_ACCESS_RO);
                        if (unlikely(!descs)) {
                                count = i;
                                break;
                        }
 
+                       if (unlikely(dlen < vq->desc[desc_idx].len)) {
+                               /*
+                                * The indirect desc table is not contiguous
+                                * in process VA space, we have to copy it.
+                                */
+                               idesc = alloc_copy_ind_table(dev, vq,
+                                                       &vq->desc[desc_idx]);
+                               if (unlikely(!idesc))
+                                       break;
+
+                               descs = idesc;
+                       }
+
                        desc_idx = 0;
                        sz = vq->desc[desc_idx].len / sizeof(*descs);
                } else {
@@ -374,15 +429,16 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
 
                err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz);
                if (unlikely(err)) {
-                       used_idx = (start_idx + i) & (vq->size - 1);
-                       vq->used->ring[used_idx].len = dev->vhost_hlen;
-                       vhost_log_used_vring(dev, vq,
-                               offsetof(struct vring_used, ring[used_idx]),
-                               sizeof(vq->used->ring[used_idx]));
+                       count = i;
+                       free_ind_table(idesc);
+                       break;
                }
 
                if (i + 1 < count)
                        rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
+
+               if (unlikely(!!idesc))
+                       free_ind_table(idesc);
        }
 
        do_data_copy_enqueue(dev, vq);
@@ -395,13 +451,14 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                offsetof(struct vring_used, idx),
                sizeof(vq->used->idx));
 
-       /* flush used->idx update before we read avail->flags. */
-       rte_mb();
+       vhost_vring_call(dev, vq);
+out:
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
 
-       /* Kick the guest if necessary. */
-       if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
-                       && (vq->callfd >= 0))
-               eventfd_write(vq->callfd, (eventfd_t)1);
        return count;
 }
 
@@ -414,22 +471,41 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint32_t vec_id = *vec_idx;
        uint32_t len    = 0;
+       uint64_t dlen;
        struct vring_desc *descs = vq->desc;
+       struct vring_desc *idesc = NULL;
 
        *desc_chain_head = idx;
 
        if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+               dlen = vq->desc[idx].len;
                descs = (struct vring_desc *)(uintptr_t)
-                       rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
+                       vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
+                                               &dlen,
+                                               VHOST_ACCESS_RO);
                if (unlikely(!descs))
                        return -1;
 
+               if (unlikely(dlen < vq->desc[idx].len)) {
+                       /*
+                        * The indirect desc table is not contiguous
+                        * in process VA space, we have to copy it.
+                        */
+                       idesc = alloc_copy_ind_table(dev, vq, &vq->desc[idx]);
+                       if (unlikely(!idesc))
+                               return -1;
+
+                       descs = idesc;
+               }
+
                idx = 0;
        }
 
        while (1) {
-               if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
+               if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) {
+                       free_ind_table(idesc);
                        return -1;
+               }
 
                len += descs[idx].len;
                buf_vec[vec_id].buf_addr = descs[idx].addr;
@@ -446,6 +522,9 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        *desc_chain_len = len;
        *vec_idx = vec_id;
 
+       if (unlikely(!!idesc))
+               free_ind_table(idesc);
+
        return 0;
 }
 
@@ -504,6 +583,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint32_t mbuf_offset, mbuf_avail;
        uint32_t desc_offset, desc_avail;
        uint32_t cpy_len;
+       uint64_t dlen;
        uint64_t hdr_addr, hdr_phys_addr;
        struct rte_mbuf *hdr_mbuf;
        struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
@@ -515,8 +595,12 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                goto out;
        }
 
-       desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
-       if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
+       dlen = buf_vec[vec_idx].buf_len;
+       desc_addr = vhost_iova_to_vva(dev, vq, buf_vec[vec_idx].buf_addr,
+                                               &dlen, VHOST_ACCESS_RW);
+       if (dlen != buf_vec[vec_idx].buf_len ||
+                       buf_vec[vec_idx].buf_len < dev->vhost_hlen ||
+                       !desc_addr) {
                error = -1;
                goto out;
        }
@@ -526,7 +610,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
        hdr_phys_addr = buf_vec[vec_idx].buf_addr;
        rte_prefetch0((void *)(uintptr_t)hdr_addr);
 
-       LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
+       VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
                dev->vid, num_buffers);
 
        desc_avail  = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
@@ -538,9 +622,14 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
                /* done with current desc buf, get the next one */
                if (desc_avail == 0) {
                        vec_idx++;
-                       desc_addr = rte_vhost_gpa_to_vva(dev->mem,
-                                       buf_vec[vec_idx].buf_addr);
-                       if (unlikely(!desc_addr)) {
+                       dlen = buf_vec[vec_idx].buf_len;
+                       desc_addr =
+                               vhost_iova_to_vva(dev, vq,
+                                       buf_vec[vec_idx].buf_addr,
+                                       &dlen,
+                                       VHOST_ACCESS_RW);
+                       if (unlikely(!desc_addr ||
+                                       dlen != buf_vec[vec_idx].buf_len)) {
                                error = -1;
                                goto out;
                        }
@@ -619,7 +708,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        struct buf_vector buf_vec[BUF_VECTOR_MAX];
        uint16_t avail_head;
 
-       LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+       VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
@@ -627,12 +716,22 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
        }
 
        vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+
        if (unlikely(vq->enabled == 0))
-               return 0;
+               goto out_access_unlock;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_lock(vq);
+
+       if (unlikely(vq->access_ok == 0))
+               if (unlikely(vring_translate(dev, vq) < 0))
+                       goto out;
 
        count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
        if (count == 0)
-               return 0;
+               goto out;
 
        vq->batch_copy_nb_elems = 0;
 
@@ -646,14 +745,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
                if (unlikely(reserve_avail_buf_mergeable(dev, vq,
                                                pkt_len, buf_vec, &num_buffers,
                                                avail_head) < 0)) {
-                       LOG_DEBUG(VHOST_DATA,
+                       VHOST_LOG_DEBUG(VHOST_DATA,
                                "(%d) failed to get enough desc from vring\n",
                                dev->vid);
                        vq->shadow_used_idx -= num_buffers;
                        break;
                }
 
-               LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
+               VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
                        dev->vid, vq->last_avail_idx,
                        vq->last_avail_idx + num_buffers);
 
@@ -670,15 +769,15 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
 
        if (likely(vq->shadow_used_idx)) {
                flush_shadow_used_ring(dev, vq);
+               vhost_vring_call(dev, vq);
+       }
 
-               /* flush used->idx update before we read avail->flags. */
-               rte_mb();
+out:
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_unlock(vq);
 
-               /* Kick the guest if necessary. */
-               if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
-                               && (vq->callfd >= 0))
-                       eventfd_write(vq->callfd, (eventfd_t)1);
-       }
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
 
        return pkt_idx;
 }
@@ -692,6 +791,13 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
        if (!dev)
                return 0;
 
+       if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+               RTE_LOG(ERR, VHOST_DATA,
+                       "(%d) %s: built-in vhost net backend is disabled.\n",
+                       dev->vid, __func__);
+               return 0;
+       }
+
        if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
                return virtio_dev_merge_rx(dev, queue_id, pkts, count);
        else
@@ -799,6 +905,11 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
                        m->tso_segsz = hdr->gso_size;
                        m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
                        break;
+               case VIRTIO_NET_HDR_GSO_UDP:
+                       m->ol_flags |= PKT_TX_UDP_SEG;
+                       m->tso_segsz = hdr->gso_size;
+                       m->l4_len = sizeof(struct udp_hdr);
+                       break;
                default:
                        RTE_LOG(WARNING, VHOST_DATA,
                                "unsupported gso type %u.\n", hdr->gso_type);
@@ -807,45 +918,6 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
        }
 }
 
-#define RARP_PKT_SIZE  64
-
-static int
-make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
-{
-       struct ether_hdr *eth_hdr;
-       struct arp_hdr  *rarp;
-
-       if (rarp_mbuf->buf_len < 64) {
-               RTE_LOG(WARNING, VHOST_DATA,
-                       "failed to make RARP; mbuf size too small %u (< %d)\n",
-                       rarp_mbuf->buf_len, RARP_PKT_SIZE);
-               return -1;
-       }
-
-       /* Ethernet header. */
-       eth_hdr = rte_pktmbuf_mtod_offset(rarp_mbuf, struct ether_hdr *, 0);
-       memset(eth_hdr->d_addr.addr_bytes, 0xff, ETHER_ADDR_LEN);
-       ether_addr_copy(mac, &eth_hdr->s_addr);
-       eth_hdr->ether_type = htons(ETHER_TYPE_RARP);
-
-       /* RARP header. */
-       rarp = (struct arp_hdr *)(eth_hdr + 1);
-       rarp->arp_hrd = htons(ARP_HRD_ETHER);
-       rarp->arp_pro = htons(ETHER_TYPE_IPv4);
-       rarp->arp_hln = ETHER_ADDR_LEN;
-       rarp->arp_pln = 4;
-       rarp->arp_op  = htons(ARP_OP_REVREQUEST);
-
-       ether_addr_copy(mac, &rarp->arp_data.arp_sha);
-       ether_addr_copy(mac, &rarp->arp_data.arp_tha);
-       memset(&rarp->arp_data.arp_sip, 0x00, 4);
-       memset(&rarp->arp_data.arp_tip, 0x00, 4);
-
-       rarp_mbuf->pkt_len  = rarp_mbuf->data_len = RARP_PKT_SIZE;
-
-       return 0;
-}
-
 static __rte_always_inline void
 put_zmbuf(struct zcopy_mbuf *zmbuf)
 {
@@ -863,6 +935,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint32_t desc_avail, desc_offset;
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
+       uint64_t dlen;
        struct rte_mbuf *cur = m, *prev = m;
        struct virtio_net_hdr *hdr = NULL;
        /* A counter to avoid desc dead loop chain */
@@ -878,8 +951,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                goto out;
        }
 
-       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
-       if (unlikely(!desc_addr)) {
+       dlen = desc->len;
+       desc_addr = vhost_iova_to_vva(dev,
+                                       vq, desc->addr,
+                                       &dlen,
+                                       VHOST_ACCESS_RO);
+       if (unlikely(!desc_addr || dlen != desc->len)) {
                error = -1;
                goto out;
        }
@@ -902,8 +979,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        goto out;
                }
 
-               desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
-               if (unlikely(!desc_addr)) {
+               dlen = desc->len;
+               desc_addr = vhost_iova_to_vva(dev,
+                                                       vq, desc->addr,
+                                                       &dlen,
+                                                       VHOST_ACCESS_RO);
+               if (unlikely(!desc_addr || dlen != desc->len)) {
                        error = -1;
                        goto out;
                }
@@ -936,8 +1017,9 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                        desc->addr + desc_offset, cpy_len)))) {
                        cur->data_len = cpy_len;
                        cur->data_off = 0;
-                       cur->buf_addr = (void *)(uintptr_t)desc_addr;
-                       cur->buf_physaddr = hpa;
+                       cur->buf_addr = (void *)(uintptr_t)(desc_addr
+                               + desc_offset);
+                       cur->buf_iova = hpa;
 
                        /*
                         * In zero copy mode, one mbuf can only reference data
@@ -946,7 +1028,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        mbuf_avail = cpy_len;
                } else {
                        if (likely(cpy_len > MAX_BATCH_LEN ||
-                                  copy_nb >= vq->size)) {
+                                  copy_nb >= vq->size ||
+                                  (hdr && cur == m))) {
                                rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
                                                                   mbuf_offset),
                                           (void *)((uintptr_t)(desc_addr +
@@ -985,8 +1068,11 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                goto out;
                        }
 
-                       desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
-                       if (unlikely(!desc_addr)) {
+                       dlen = desc->len;
+                       desc_addr = vhost_iova_to_vva(dev,
+                                                       vq, desc->addr,
+                                                       &dlen, VHOST_ACCESS_RO);
+                       if (unlikely(!desc_addr || dlen != desc->len)) {
                                error = -1;
                                goto out;
                        }
@@ -1061,11 +1147,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
        vq->used->idx += count;
        vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
                        sizeof(vq->used->idx));
-
-       /* Kick guest if required. */
-       if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
-                       && (vq->callfd >= 0))
-               eventfd_write(vq->callfd, (eventfd_t)1);
+       vhost_vring_call(dev, vq);
 }
 
 static __rte_always_inline struct zcopy_mbuf *
@@ -1111,6 +1193,22 @@ mbuf_is_consumed(struct rte_mbuf *m)
        return true;
 }
 
+static __rte_always_inline void
+restore_mbuf(struct rte_mbuf *m)
+{
+       uint32_t mbuf_size, priv_size;
+
+       while (m) {
+               priv_size = rte_pktmbuf_priv_size(m->pool);
+               mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+               /* start of buffer is after mbuf structure and priv data */
+
+               m->buf_addr = (char *)m + mbuf_size;
+               m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+               m = m->next;
+       }
+}
+
 uint16_t
 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -1128,6 +1226,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        if (!dev)
                return 0;
 
+       if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+               RTE_LOG(ERR, VHOST_DATA,
+                       "(%d) %s: built-in vhost net backend is disabled.\n",
+                       dev->vid, __func__);
+               return 0;
+       }
+
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
                RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
                        dev->vid, __func__, queue_id);
@@ -1135,11 +1240,22 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        }
 
        vq = dev->virtqueue[queue_id];
-       if (unlikely(vq->enabled == 0))
+
+       if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
                return 0;
 
+       if (unlikely(vq->enabled == 0))
+               goto out_access_unlock;
+
        vq->batch_copy_nb_elems = 0;
 
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_lock(vq);
+
+       if (unlikely(vq->access_ok == 0))
+               if (unlikely(vring_translate(dev, vq) < 0))
+                       goto out;
+
        if (unlikely(dev->dequeue_zero_copy)) {
                struct zcopy_mbuf *zmbuf, *next;
                int nr_updated = 0;
@@ -1155,6 +1271,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                                nr_updated += 1;
 
                                TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+                               restore_mbuf(zmbuf->mbuf);
                                rte_pktmbuf_free(zmbuf->mbuf);
                                put_zmbuf(zmbuf);
                                vq->nr_zmbuf -= 1;
@@ -1184,19 +1301,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                        rte_atomic16_cmpset((volatile uint16_t *)
                                &dev->broadcast_rarp.cnt, 1, 0))) {
 
-               rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool);
+               rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
                if (rarp_mbuf == NULL) {
                        RTE_LOG(ERR, VHOST_DATA,
-                               "Failed to allocate memory for mbuf.\n");
+                               "Failed to make RARP packet.\n");
                        return 0;
                }
-
-               if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
-                       rte_pktmbuf_free(rarp_mbuf);
-                       rarp_mbuf = NULL;
-               } else {
-                       count -= 1;
-               }
+               count -= 1;
        }
 
        free_entries = *((volatile uint16_t *)&vq->avail->idx) -
@@ -1204,7 +1315,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        if (free_entries == 0)
                goto out;
 
-       LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
+       VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 
        /* Prefetch available and used ring */
        avail_idx = vq->last_avail_idx & (vq->size - 1);
@@ -1214,7 +1325,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
 
        count = RTE_MIN(count, MAX_PKT_BURST);
        count = RTE_MIN(count, free_entries);
-       LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
+       VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
                        dev->vid, count);
 
        /* Retrieve all of the head indexes first to avoid caching issues. */
@@ -1230,20 +1341,37 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        /* Prefetch descriptor index. */
        rte_prefetch0(&vq->desc[desc_indexes[0]]);
        for (i = 0; i < count; i++) {
-               struct vring_desc *desc;
+               struct vring_desc *desc, *idesc = NULL;
                uint16_t sz, idx;
+               uint64_t dlen;
                int err;
 
                if (likely(i + 1 < count))
                        rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
 
                if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
+                       dlen = vq->desc[desc_indexes[i]].len;
                        desc = (struct vring_desc *)(uintptr_t)
-                               rte_vhost_gpa_to_vva(dev->mem,
-                                       vq->desc[desc_indexes[i]].addr);
+                               vhost_iova_to_vva(dev, vq,
+                                               vq->desc[desc_indexes[i]].addr,
+                                               &dlen,
+                                               VHOST_ACCESS_RO);
                        if (unlikely(!desc))
                                break;
 
+                       if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) {
+                               /*
+                                * The indirect desc table is not contiguous
+                                * in process VA space, we have to copy it.
+                                */
+                               idesc = alloc_copy_ind_table(dev, vq,
+                                               &vq->desc[desc_indexes[i]]);
+                               if (unlikely(!idesc))
+                                       break;
+
+                               desc = idesc;
+                       }
+
                        rte_prefetch0(desc);
                        sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
                        idx = 0;
@@ -1257,6 +1385,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                if (unlikely(pkts[i] == NULL)) {
                        RTE_LOG(ERR, VHOST_DATA,
                                "Failed to allocate memory for mbuf.\n");
+                       free_ind_table(idesc);
                        break;
                }
 
@@ -1264,6 +1393,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                                        mbuf_pool);
                if (unlikely(err)) {
                        rte_pktmbuf_free(pkts[i]);
+                       free_ind_table(idesc);
                        break;
                }
 
@@ -1273,6 +1403,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                        zmbuf = get_zmbuf(vq);
                        if (!zmbuf) {
                                rte_pktmbuf_free(pkts[i]);
+                               free_ind_table(idesc);
                                break;
                        }
                        zmbuf->mbuf = pkts[i];
@@ -1289,6 +1420,9 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                        vq->nr_zmbuf += 1;
                        TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
                }
+
+               if (unlikely(!!idesc))
+                       free_ind_table(idesc);
        }
        vq->last_avail_idx += i;
 
@@ -1299,6 +1433,12 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
        }
 
 out:
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+
        if (unlikely(rarp_mbuf != NULL)) {
                /*
                 * Inject it to the head of "pkts" array, so that switch's mac