vhost: replace SMP with thread fence for control path
[dpdk.git] / lib / librte_vhost / virtio_net.c
index 5e8c6b9..fec08b2 100644 (file)
 #include <rte_arp.h>
 #include <rte_spinlock.h>
 #include <rte_malloc.h>
+#include <rte_vhost_async.h>
 
 #include "iotlb.h"
 #include "vhost.h"
 
-#define MAX_PKT_BURST 32
-
 #define MAX_BATCH_LEN 256
 
+#define VHOST_ASYNC_BATCH_THRESHOLD 32
+
 static  __rte_always_inline bool
 rxvq_is_mergeable(struct virtio_net *dev)
 {
@@ -116,6 +117,31 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
                sizeof(vq->used->idx));
 }
 
+static __rte_always_inline void
+async_flush_shadow_used_ring_split(struct virtio_net *dev,
+       struct vhost_virtqueue *vq)
+{
+       uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
+
+       if (used_idx + vq->shadow_used_idx <= vq->size) {
+               do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
+                                         vq->shadow_used_idx);
+       } else {
+               uint16_t size;
+
+               /* update used ring interval [used_idx, vq->size] */
+               size = vq->size - used_idx;
+               do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
+
+               /* update the left half used ring interval [0, left_size] */
+               do_flush_shadow_used_ring_split(dev, vq, 0, size,
+                                         vq->shadow_used_idx - size);
+       }
+
+       vq->last_used_idx += vq->shadow_used_idx;
+       vq->shadow_used_idx = 0;
+}
+
 static __rte_always_inline void
 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
                         uint16_t desc_idx, uint32_t len)
@@ -145,7 +171,8 @@ vhost_flush_enqueue_shadow_packed(struct virtio_net *dev,
                        used_idx -= vq->size;
        }
 
-       rte_smp_wmb();
+       /* The ordering for storing desc flags needs to be enforced. */
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
 
        for (i = 0; i < vq->shadow_used_idx; i++) {
                uint16_t flags;
@@ -196,8 +223,9 @@ vhost_flush_dequeue_shadow_packed(struct virtio_net *dev,
        struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
 
        vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
-       rte_smp_wmb();
-       vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+       /* desc flags is the synchronization point for virtio packed vring */
+       __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+                        used_elem->flags, __ATOMIC_RELEASE);
 
        vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
                                   sizeof(struct vring_packed_desc),
@@ -227,7 +255,7 @@ vhost_flush_enqueue_batch_packed(struct virtio_net *dev,
                vq->desc_packed[vq->last_used_idx + i].len = lens[i];
        }
 
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                vq->desc_packed[vq->last_used_idx + i].flags = flags;
@@ -286,7 +314,7 @@ vhost_shadow_dequeue_batch_packed(struct virtio_net *dev,
                vq->desc_packed[vq->last_used_idx + i].len = 0;
        }
 
-       rte_smp_wmb();
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
        vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
                vq->desc_packed[vq->last_used_idx + i].flags = flags;
 
@@ -905,6 +933,209 @@ out:
        return error;
 }
 
+static __rte_always_inline void
+async_fill_vec(struct iovec *v, void *base, size_t len)
+{
+       v->iov_base = base;
+       v->iov_len = len;
+}
+
+static __rte_always_inline void
+async_fill_iter(struct rte_vhost_iov_iter *it, size_t count,
+       struct iovec *vec, unsigned long nr_seg)
+{
+       it->offset = 0;
+       it->count = count;
+
+       if (count) {
+               it->iov = vec;
+               it->nr_segs = nr_seg;
+       } else {
+               it->iov = 0;
+               it->nr_segs = 0;
+       }
+}
+
+static __rte_always_inline void
+async_fill_desc(struct rte_vhost_async_desc *desc,
+       struct rte_vhost_iov_iter *src, struct rte_vhost_iov_iter *dst)
+{
+       desc->src = src;
+       desc->dst = dst;
+}
+
+static __rte_always_inline int
+async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       struct rte_mbuf *m, struct buf_vector *buf_vec,
+                       uint16_t nr_vec, uint16_t num_buffers,
+                       struct iovec *src_iovec, struct iovec *dst_iovec,
+                       struct rte_vhost_iov_iter *src_it,
+                       struct rte_vhost_iov_iter *dst_it)
+{
+       uint32_t vec_idx = 0;
+       uint32_t mbuf_offset, mbuf_avail;
+       uint32_t buf_offset, buf_avail;
+       uint64_t buf_addr, buf_iova, buf_len;
+       uint32_t cpy_len, cpy_threshold;
+       uint64_t hdr_addr;
+       struct rte_mbuf *hdr_mbuf;
+       struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+       struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
+       int error = 0;
+       uint64_t mapped_len;
+
+       uint32_t tlen = 0;
+       int tvec_idx = 0;
+       void *hpa;
+
+       if (unlikely(m == NULL)) {
+               error = -1;
+               goto out;
+       }
+
+       cpy_threshold = vq->async_threshold;
+
+       buf_addr = buf_vec[vec_idx].buf_addr;
+       buf_iova = buf_vec[vec_idx].buf_iova;
+       buf_len = buf_vec[vec_idx].buf_len;
+
+       if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
+               error = -1;
+               goto out;
+       }
+
+       hdr_mbuf = m;
+       hdr_addr = buf_addr;
+       if (unlikely(buf_len < dev->vhost_hlen))
+               hdr = &tmp_hdr;
+       else
+               hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
+
+       VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
+               dev->vid, num_buffers);
+
+       if (unlikely(buf_len < dev->vhost_hlen)) {
+               buf_offset = dev->vhost_hlen - buf_len;
+               vec_idx++;
+               buf_addr = buf_vec[vec_idx].buf_addr;
+               buf_iova = buf_vec[vec_idx].buf_iova;
+               buf_len = buf_vec[vec_idx].buf_len;
+               buf_avail = buf_len - buf_offset;
+       } else {
+               buf_offset = dev->vhost_hlen;
+               buf_avail = buf_len - dev->vhost_hlen;
+       }
+
+       mbuf_avail  = rte_pktmbuf_data_len(m);
+       mbuf_offset = 0;
+
+       while (mbuf_avail != 0 || m->next != NULL) {
+               /* done with current buf, get the next one */
+               if (buf_avail == 0) {
+                       vec_idx++;
+                       if (unlikely(vec_idx >= nr_vec)) {
+                               error = -1;
+                               goto out;
+                       }
+
+                       buf_addr = buf_vec[vec_idx].buf_addr;
+                       buf_iova = buf_vec[vec_idx].buf_iova;
+                       buf_len = buf_vec[vec_idx].buf_len;
+
+                       buf_offset = 0;
+                       buf_avail  = buf_len;
+               }
+
+               /* done with current mbuf, get the next one */
+               if (mbuf_avail == 0) {
+                       m = m->next;
+
+                       mbuf_offset = 0;
+                       mbuf_avail  = rte_pktmbuf_data_len(m);
+               }
+
+               if (hdr_addr) {
+                       virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
+                       if (rxvq_is_mergeable(dev))
+                               ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
+                                               num_buffers);
+
+                       if (unlikely(hdr == &tmp_hdr)) {
+                               copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
+                       } else {
+                               PRINT_PACKET(dev, (uintptr_t)hdr_addr,
+                                               dev->vhost_hlen, 0);
+                               vhost_log_cache_write_iova(dev, vq,
+                                               buf_vec[0].buf_iova,
+                                               dev->vhost_hlen);
+                       }
+
+                       hdr_addr = 0;
+               }
+
+               cpy_len = RTE_MIN(buf_avail, mbuf_avail);
+
+               while (unlikely(cpy_len && cpy_len >= cpy_threshold)) {
+                       hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+                                       buf_iova + buf_offset,
+                                       cpy_len, &mapped_len);
+
+                       if (unlikely(!hpa || mapped_len < cpy_threshold))
+                               break;
+
+                       async_fill_vec(src_iovec + tvec_idx,
+                               (void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
+                               mbuf_offset), (size_t)mapped_len);
+
+                       async_fill_vec(dst_iovec + tvec_idx,
+                                       hpa, (size_t)mapped_len);
+
+                       tlen += (uint32_t)mapped_len;
+                       cpy_len -= (uint32_t)mapped_len;
+                       mbuf_avail  -= (uint32_t)mapped_len;
+                       mbuf_offset += (uint32_t)mapped_len;
+                       buf_avail  -= (uint32_t)mapped_len;
+                       buf_offset += (uint32_t)mapped_len;
+                       tvec_idx++;
+               }
+
+               if (likely(cpy_len)) {
+                       if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
+                               rte_memcpy(
+                               (void *)((uintptr_t)(buf_addr + buf_offset)),
+                               rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+                               cpy_len);
+
+                               PRINT_PACKET(dev,
+                                       (uintptr_t)(buf_addr + buf_offset),
+                                       cpy_len, 0);
+                       } else {
+                               batch_copy[vq->batch_copy_nb_elems].dst =
+                               (void *)((uintptr_t)(buf_addr + buf_offset));
+                               batch_copy[vq->batch_copy_nb_elems].src =
+                               rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+                               batch_copy[vq->batch_copy_nb_elems].log_addr =
+                                       buf_iova + buf_offset;
+                               batch_copy[vq->batch_copy_nb_elems].len =
+                                       cpy_len;
+                               vq->batch_copy_nb_elems++;
+                       }
+
+                       mbuf_avail  -= cpy_len;
+                       mbuf_offset += cpy_len;
+                       buf_avail  -= cpy_len;
+                       buf_offset += cpy_len;
+               }
+
+       }
+
+out:
+       async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+       async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+
+       return error;
+}
+
 static __rte_always_inline int
 vhost_enqueue_single_packed(struct virtio_net *dev,
                            struct vhost_virtqueue *vq,
@@ -918,7 +1149,7 @@ vhost_enqueue_single_packed(struct virtio_net *dev,
        uint16_t buf_id = 0;
        uint32_t len = 0;
        uint16_t desc_count;
-       uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+       uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
        uint16_t num_buffers = 0;
        uint32_t buffer_len[vq->size];
        uint16_t buffer_buf_id[vq->size];
@@ -1033,7 +1264,7 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
        uint16_t avail_idx = vq->last_avail_idx;
        uint64_t desc_addrs[PACKED_BATCH_SIZE];
        struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
-       uint32_t buf_offset = dev->vhost_hlen;
+       uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        uint64_t lens[PACKED_BATCH_SIZE];
        uint16_t ids[PACKED_BATCH_SIZE];
        uint16_t i;
@@ -1052,8 +1283,6 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
                        return -1;
        }
 
-       rte_smp_rmb();
-
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                lens[i] = descs[avail_idx + i].len;
 
@@ -1079,7 +1308,8 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
                rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
                hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
                                        (uintptr_t)desc_addrs[i];
-               lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+               lens[i] = pkts[i]->pkt_len +
+                       sizeof(struct virtio_net_hdr_mrg_rxbuf);
        }
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
@@ -1113,7 +1343,6 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
        struct buf_vector buf_vec[BUF_VECTOR_MAX];
        uint16_t nr_descs = 0;
 
-       rte_smp_rmb();
        if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
                                                 &nr_descs) < 0)) {
                VHOST_LOG_DATA(DEBUG,
@@ -1133,8 +1362,8 @@ virtio_dev_rx_single_packed(struct virtio_net *dev,
 
 static __rte_noinline uint32_t
 virtio_dev_rx_packed(struct virtio_net *dev,
-                    struct vhost_virtqueue *vq,
-                    struct rte_mbuf **pkts,
+                    struct vhost_virtqueue *__rte_restrict vq,
+                    struct rte_mbuf **__rte_restrict pkts,
                     uint32_t count)
 {
        uint32_t pkt_idx = 0;
@@ -1219,7 +1448,7 @@ out_access_unlock:
 
 uint16_t
 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
-       struct rte_mbuf **pkts, uint16_t count)
+       struct rte_mbuf **__rte_restrict pkts, uint16_t count)
 {
        struct virtio_net *dev = get_device(vid);
 
@@ -1236,6 +1465,313 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
        return virtio_dev_rx(dev, queue_id, pkts, count);
 }
 
+static __rte_always_inline uint16_t
+virtio_dev_rx_async_get_info_idx(uint16_t pkts_idx,
+       uint16_t vq_size, uint16_t n_inflight)
+{
+       return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
+               (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
+}
+
+static __rte_noinline uint32_t
+virtio_dev_rx_async_submit_split(struct virtio_net *dev,
+       struct vhost_virtqueue *vq, uint16_t queue_id,
+       struct rte_mbuf **pkts, uint32_t count)
+{
+       uint32_t pkt_idx = 0, pkt_burst_idx = 0;
+       uint16_t num_buffers;
+       struct buf_vector buf_vec[BUF_VECTOR_MAX];
+       uint16_t avail_head;
+
+       struct rte_vhost_iov_iter *it_pool = vq->it_pool;
+       struct iovec *vec_pool = vq->vec_pool;
+       struct rte_vhost_async_desc tdes[MAX_PKT_BURST];
+       struct iovec *src_iovec = vec_pool;
+       struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+       struct rte_vhost_iov_iter *src_it = it_pool;
+       struct rte_vhost_iov_iter *dst_it = it_pool + 1;
+       uint16_t n_free_slot, slot_idx = 0;
+       uint16_t pkt_err = 0;
+       uint16_t segs_await = 0;
+       struct async_inflight_info *pkts_info = vq->async_pkts_info;
+       int n_pkts = 0;
+
+       /*
+        * The ordering between avail index and desc reads need to be enforced.
+        */
+       avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
+
+       rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
+
+       for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
+               uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
+               uint16_t nr_vec = 0;
+
+               if (unlikely(reserve_avail_buf_split(dev, vq,
+                                               pkt_len, buf_vec, &num_buffers,
+                                               avail_head, &nr_vec) < 0)) {
+                       VHOST_LOG_DATA(DEBUG,
+                               "(%d) failed to get enough desc from vring\n",
+                               dev->vid);
+                       vq->shadow_used_idx -= num_buffers;
+                       break;
+               }
+
+               VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
+                       dev->vid, vq->last_avail_idx,
+                       vq->last_avail_idx + num_buffers);
+
+               if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx],
+                               buf_vec, nr_vec, num_buffers,
+                               src_iovec, dst_iovec, src_it, dst_it) < 0) {
+                       vq->shadow_used_idx -= num_buffers;
+                       break;
+               }
+
+               slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+               if (src_it->count) {
+                       async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
+                       pkt_burst_idx++;
+                       pkts_info[slot_idx].descs = num_buffers;
+                       pkts_info[slot_idx].segs = src_it->nr_segs;
+                       src_iovec += src_it->nr_segs;
+                       dst_iovec += dst_it->nr_segs;
+                       src_it += 2;
+                       dst_it += 2;
+                       segs_await += src_it->nr_segs;
+               } else {
+                       pkts_info[slot_idx].info = num_buffers;
+                       vq->async_pkts_inflight_n++;
+               }
+
+               vq->last_avail_idx += num_buffers;
+
+               /*
+                * conditions to trigger async device transfer:
+                * - buffered packet number reaches transfer threshold
+                * - this is the last packet in the burst enqueue
+                * - unused async iov number is less than max vhost vector
+                */
+               if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+                       (pkt_idx == count - 1 && pkt_burst_idx) ||
+                       (VHOST_MAX_ASYNC_VEC / 2 - segs_await <
+                       BUF_VECTOR_MAX)) {
+                       n_pkts = vq->async_ops.transfer_data(dev->vid,
+                                       queue_id, tdes, 0, pkt_burst_idx);
+                       src_iovec = vec_pool;
+                       dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
+                       src_it = it_pool;
+                       dst_it = it_pool + 1;
+                       segs_await = 0;
+                       vq->async_pkts_inflight_n += pkt_burst_idx;
+
+                       if (unlikely(n_pkts < (int)pkt_burst_idx)) {
+                               /*
+                                * log error packets number here and do actual
+                                * error processing when applications poll
+                                * completion
+                                */
+                               pkt_err = pkt_burst_idx - n_pkts;
+                               pkt_burst_idx = 0;
+                               break;
+                       }
+
+                       pkt_burst_idx = 0;
+               }
+       }
+
+       if (pkt_burst_idx) {
+               n_pkts = vq->async_ops.transfer_data(dev->vid,
+                               queue_id, tdes, 0, pkt_burst_idx);
+               vq->async_pkts_inflight_n += pkt_burst_idx;
+
+               if (unlikely(n_pkts < (int)pkt_burst_idx))
+                       pkt_err = pkt_burst_idx - n_pkts;
+       }
+
+       do_data_copy_enqueue(dev, vq);
+
+       while (unlikely(pkt_err && pkt_idx)) {
+               if (pkts_info[slot_idx].segs)
+                       pkt_err--;
+               vq->last_avail_idx -= pkts_info[slot_idx].descs;
+               vq->shadow_used_idx -= pkts_info[slot_idx].descs;
+               vq->async_pkts_inflight_n--;
+               slot_idx = (slot_idx - 1) & (vq->size - 1);
+               pkt_idx--;
+       }
+
+       n_free_slot = vq->size - vq->async_pkts_idx;
+       if (n_free_slot > pkt_idx) {
+               rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
+                       pkts, pkt_idx * sizeof(uintptr_t));
+               vq->async_pkts_idx += pkt_idx;
+       } else {
+               rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
+                       pkts, n_free_slot * sizeof(uintptr_t));
+               rte_memcpy(&vq->async_pkts_pending[0],
+                       &pkts[n_free_slot],
+                       (pkt_idx - n_free_slot) * sizeof(uintptr_t));
+               vq->async_pkts_idx = pkt_idx - n_free_slot;
+       }
+
+       if (likely(vq->shadow_used_idx))
+               async_flush_shadow_used_ring_split(dev, vq);
+
+       return pkt_idx;
+}
+
+uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+               struct rte_mbuf **pkts, uint16_t count)
+{
+       struct virtio_net *dev = get_device(vid);
+       struct vhost_virtqueue *vq;
+       uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
+       uint16_t start_idx, pkts_idx, vq_size;
+       uint16_t n_inflight;
+       struct async_inflight_info *pkts_info;
+
+       if (!dev)
+               return 0;
+
+       VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+               VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+                       dev->vid, __func__, queue_id);
+               return 0;
+       }
+
+       vq = dev->virtqueue[queue_id];
+
+       if (unlikely(!vq->async_registered)) {
+               VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+                       dev->vid, __func__, queue_id);
+               return 0;
+       }
+
+       rte_spinlock_lock(&vq->access_lock);
+
+       n_inflight = vq->async_pkts_inflight_n;
+       pkts_idx = vq->async_pkts_idx;
+       pkts_info = vq->async_pkts_info;
+       vq_size = vq->size;
+       start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
+               vq_size, vq->async_pkts_inflight_n);
+
+       if (count > vq->async_last_pkts_n)
+               n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+                       queue_id, 0, count - vq->async_last_pkts_n);
+       n_pkts_cpl += vq->async_last_pkts_n;
+
+       rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+       while (likely((n_pkts_put < count) && n_inflight)) {
+               uint16_t info_idx = (start_idx + n_pkts_put) & (vq_size - 1);
+               if (n_pkts_cpl && pkts_info[info_idx].segs)
+                       n_pkts_cpl--;
+               else if (!n_pkts_cpl && pkts_info[info_idx].segs)
+                       break;
+               n_pkts_put++;
+               n_inflight--;
+               n_descs += pkts_info[info_idx].descs;
+       }
+
+       vq->async_last_pkts_n = n_pkts_cpl;
+
+       if (n_pkts_put) {
+               vq->async_pkts_inflight_n = n_inflight;
+               if (likely(vq->enabled && vq->access_ok)) {
+                       __atomic_add_fetch(&vq->used->idx,
+                                       n_descs, __ATOMIC_RELEASE);
+                       vhost_vring_call_split(dev, vq);
+               }
+
+               if (start_idx + n_pkts_put <= vq_size) {
+                       rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
+                               n_pkts_put * sizeof(uintptr_t));
+               } else {
+                       rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
+                               (vq_size - start_idx) * sizeof(uintptr_t));
+                       rte_memcpy(&pkts[vq_size - start_idx],
+                               vq->async_pkts_pending,
+                               (n_pkts_put + start_idx - vq_size) *
+                               sizeof(uintptr_t));
+               }
+       }
+
+       rte_spinlock_unlock(&vq->access_lock);
+
+       return n_pkts_put;
+}
+
+static __rte_always_inline uint32_t
+virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
+       struct rte_mbuf **pkts, uint32_t count)
+{
+       struct vhost_virtqueue *vq;
+       uint32_t nb_tx = 0;
+
+       VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+       if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+               VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+                       dev->vid, __func__, queue_id);
+               return 0;
+       }
+
+       vq = dev->virtqueue[queue_id];
+
+       rte_spinlock_lock(&vq->access_lock);
+
+       if (unlikely(vq->enabled == 0 || !vq->async_registered))
+               goto out_access_unlock;
+
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_lock(vq);
+
+       if (unlikely(vq->access_ok == 0))
+               if (unlikely(vring_translate(dev, vq) < 0))
+                       goto out;
+
+       count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
+       if (count == 0)
+               goto out;
+
+       /* TODO: packed queue not implemented */
+       if (vq_is_packed(dev))
+               nb_tx = 0;
+       else
+               nb_tx = virtio_dev_rx_async_submit_split(dev,
+                               vq, queue_id, pkts, count);
+
+out:
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+               vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+       rte_spinlock_unlock(&vq->access_lock);
+
+       return nb_tx;
+}
+
+uint16_t
+rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
+               struct rte_mbuf **pkts, uint16_t count)
+{
+       struct virtio_net *dev = get_device(vid);
+
+       if (!dev)
+               return 0;
+
+       if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
+               VHOST_LOG_DATA(ERR,
+                       "(%d) %s: built-in vhost net backend is disabled.\n",
+                       dev->vid, __func__);
+               return 0;
+       }
+
+       return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
+}
+
 static inline bool
 virtio_net_with_host_offload(struct virtio_net *dev)
 {
@@ -1278,7 +1814,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
        case RTE_ETHER_TYPE_IPV4:
                ipv4_hdr = l3_hdr;
                *l4_proto = ipv4_hdr->next_proto_id;
-               m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
+               m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
                *l4_hdr = (char *)l3_hdr + m->l3_len;
                m->ol_flags |= PKT_TX_IPV4;
                break;
@@ -1378,7 +1914,7 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                  struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
 {
        uint32_t buf_avail, buf_offset;
-       uint64_t buf_addr, buf_iova, buf_len;
+       uint64_t buf_addr, buf_len;
        uint32_t mbuf_avail, mbuf_offset;
        uint32_t cpy_len;
        struct rte_mbuf *cur = m, *prev = m;
@@ -1390,7 +1926,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        int error = 0;
 
        buf_addr = buf_vec[vec_idx].buf_addr;
-       buf_iova = buf_vec[vec_idx].buf_iova;
        buf_len = buf_vec[vec_idx].buf_len;
 
        if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
@@ -1420,14 +1955,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                buf_offset = dev->vhost_hlen - buf_len;
                vec_idx++;
                buf_addr = buf_vec[vec_idx].buf_addr;
-               buf_iova = buf_vec[vec_idx].buf_iova;
                buf_len = buf_vec[vec_idx].buf_len;
                buf_avail  = buf_len - buf_offset;
        } else if (buf_len == dev->vhost_hlen) {
                if (unlikely(++vec_idx >= nr_vec))
                        goto out;
                buf_addr = buf_vec[vec_idx].buf_addr;
-               buf_iova = buf_vec[vec_idx].buf_iova;
                buf_len = buf_vec[vec_idx].buf_len;
 
                buf_offset = 0;
@@ -1444,48 +1977,23 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
        mbuf_offset = 0;
        mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
        while (1) {
-               uint64_t hpa;
-
                cpy_len = RTE_MIN(buf_avail, mbuf_avail);
 
-               /*
-                * A desc buf might across two host physical pages that are
-                * not continuous. In such case (gpa_to_hpa returns 0), data
-                * will be copied even though zero copy is enabled.
-                */
-               if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
-                                       buf_iova + buf_offset, cpy_len)))) {
-                       cur->data_len = cpy_len;
-                       cur->data_off = 0;
-                       cur->buf_addr =
-                               (void *)(uintptr_t)(buf_addr + buf_offset);
-                       cur->buf_iova = hpa;
-
-                       /*
-                        * In zero copy mode, one mbuf can only reference data
-                        * for one or partial of one desc buff.
-                        */
-                       mbuf_avail = cpy_len;
-               } else {
-                       if (likely(cpy_len > MAX_BATCH_LEN ||
-                                  vq->batch_copy_nb_elems >= vq->size ||
-                                  (hdr && cur == m))) {
-                               rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
-                                                                  mbuf_offset),
-                                          (void *)((uintptr_t)(buf_addr +
-                                                          buf_offset)),
-                                          cpy_len);
-                       } else {
-                               batch_copy[vq->batch_copy_nb_elems].dst =
-                                       rte_pktmbuf_mtod_offset(cur, void *,
-                                                               mbuf_offset);
-                               batch_copy[vq->batch_copy_nb_elems].src =
+               if (likely(cpy_len > MAX_BATCH_LEN ||
+                                       vq->batch_copy_nb_elems >= vq->size ||
+                                       (hdr && cur == m))) {
+                       rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+                                               mbuf_offset),
                                        (void *)((uintptr_t)(buf_addr +
-                                                               buf_offset));
-                               batch_copy[vq->batch_copy_nb_elems].len =
-                                       cpy_len;
-                               vq->batch_copy_nb_elems++;
-                       }
+                                                       buf_offset)), cpy_len);
+               } else {
+                       batch_copy[vq->batch_copy_nb_elems].dst =
+                               rte_pktmbuf_mtod_offset(cur, void *,
+                                               mbuf_offset);
+                       batch_copy[vq->batch_copy_nb_elems].src =
+                               (void *)((uintptr_t)(buf_addr + buf_offset));
+                       batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
+                       vq->batch_copy_nb_elems++;
                }
 
                mbuf_avail  -= cpy_len;
@@ -1499,7 +2007,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                break;
 
                        buf_addr = buf_vec[vec_idx].buf_addr;
-                       buf_iova = buf_vec[vec_idx].buf_iova;
                        buf_len = buf_vec[vec_idx].buf_len;
 
                        buf_offset = 0;
@@ -1521,8 +2028,6 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                error = -1;
                                goto out;
                        }
-                       if (unlikely(dev->dequeue_zero_copy))
-                               rte_mbuf_refcnt_update(cur, 1);
 
                        prev->next = cur;
                        prev->data_len = mbuf_offset;
@@ -1546,37 +2051,6 @@ out:
        return error;
 }
 
-static __rte_always_inline struct zcopy_mbuf *
-get_zmbuf(struct vhost_virtqueue *vq)
-{
-       uint16_t i;
-       uint16_t last;
-       int tries = 0;
-
-       /* search [last_zmbuf_idx, zmbuf_size) */
-       i = vq->last_zmbuf_idx;
-       last = vq->zmbuf_size;
-
-again:
-       for (; i < last; i++) {
-               if (vq->zmbufs[i].in_use == 0) {
-                       vq->last_zmbuf_idx = i + 1;
-                       vq->zmbufs[i].in_use = 1;
-                       return &vq->zmbufs[i];
-               }
-       }
-
-       tries++;
-       if (tries == 1) {
-               /* search [0, last_zmbuf_idx) */
-               i = 0;
-               last = vq->last_zmbuf_idx;
-               goto again;
-       }
-
-       return NULL;
-}
-
 static void
 virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
 {
@@ -1592,16 +2066,8 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
        rte_iova_t iova;
        void *buf;
 
-       /* Try to use pkt buffer to store shinfo to reduce the amount of memory
-        * required, otherwise store shinfo in the new buffer.
-        */
-       if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
-               shinfo = rte_pktmbuf_mtod(pkt,
-                                         struct rte_mbuf_ext_shared_info *);
-       else {
-               total_len += sizeof(*shinfo) + sizeof(uintptr_t);
-               total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
-       }
+       total_len += sizeof(*shinfo) + sizeof(uintptr_t);
+       total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
 
        if (unlikely(total_len > UINT16_MAX))
                return -ENOSPC;
@@ -1612,18 +2078,12 @@ virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
                return -ENOMEM;
 
        /* Initialize shinfo */
-       if (shinfo) {
-               shinfo->free_cb = virtio_dev_extbuf_free;
-               shinfo->fcb_opaque = buf;
-               rte_mbuf_ext_refcnt_set(shinfo, 1);
-       } else {
-               shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
-                                             virtio_dev_extbuf_free, buf);
-               if (unlikely(shinfo == NULL)) {
-                       rte_free(buf);
-                       VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
-                       return -1;
-               }
+       shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
+                                               virtio_dev_extbuf_free, buf);
+       if (unlikely(shinfo == NULL)) {
+               rte_free(buf);
+               VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+               return -1;
        }
 
        iova = rte_malloc_virt2iova(buf);
@@ -1673,30 +2133,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
        uint16_t i;
        uint16_t free_entries;
-
-       if (unlikely(dev->dequeue_zero_copy)) {
-               struct zcopy_mbuf *zmbuf, *next;
-
-               for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
-                    zmbuf != NULL; zmbuf = next) {
-                       next = TAILQ_NEXT(zmbuf, next);
-
-                       if (mbuf_is_consumed(zmbuf->mbuf)) {
-                               update_shadow_used_ring_split(vq,
-                                               zmbuf->desc_idx, 0);
-                               TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
-                               restore_mbuf(zmbuf->mbuf);
-                               rte_pktmbuf_free(zmbuf->mbuf);
-                               put_zmbuf(zmbuf);
-                               vq->nr_zmbuf -= 1;
-                       }
-               }
-
-               if (likely(vq->shadow_used_idx)) {
-                       flush_shadow_used_ring_split(dev, vq);
-                       vhost_vring_call_split(dev, vq);
-               }
-       }
+       uint16_t dropped = 0;
+       static bool allocerr_warned;
 
        /*
         * The ordering between avail index and
@@ -1730,56 +2168,53 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                                VHOST_ACCESS_RO) < 0))
                        break;
 
-               if (likely(dev->dequeue_zero_copy == 0))
-                       update_shadow_used_ring_split(vq, head_idx, 0);
+               update_shadow_used_ring_split(vq, head_idx, 0);
 
                pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-               if (unlikely(pkts[i] == NULL))
+               if (unlikely(pkts[i] == NULL)) {
+                       /*
+                        * mbuf allocation fails for jumbo packets when external
+                        * buffer allocation is not allowed and linear buffer
+                        * is required. Drop this packet.
+                        */
+                       if (!allocerr_warned) {
+                               VHOST_LOG_DATA(ERR,
+                                       "Failed mbuf alloc of size %d from %s on %s.\n",
+                                       buf_len, mbuf_pool->name, dev->ifname);
+                               allocerr_warned = true;
+                       }
+                       dropped += 1;
+                       i++;
                        break;
+               }
 
                err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
                                mbuf_pool);
                if (unlikely(err)) {
                        rte_pktmbuf_free(pkts[i]);
-                       break;
-               }
-
-               if (unlikely(dev->dequeue_zero_copy)) {
-                       struct zcopy_mbuf *zmbuf;
-
-                       zmbuf = get_zmbuf(vq);
-                       if (!zmbuf) {
-                               rte_pktmbuf_free(pkts[i]);
-                               break;
+                       if (!allocerr_warned) {
+                               VHOST_LOG_DATA(ERR,
+                                       "Failed to copy desc to mbuf on %s.\n",
+                                       dev->ifname);
+                               allocerr_warned = true;
                        }
-                       zmbuf->mbuf = pkts[i];
-                       zmbuf->desc_idx = head_idx;
-
-                       /*
-                        * Pin lock the mbuf; we will check later to see
-                        * whether the mbuf is freed (when we are the last
-                        * user) or not. If that's the case, we then could
-                        * update the used ring safely.
-                        */
-                       rte_mbuf_refcnt_update(pkts[i], 1);
-
-                       vq->nr_zmbuf += 1;
-                       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
+                       dropped += 1;
+                       i++;
+                       break;
                }
        }
+
        vq->last_avail_idx += i;
 
-       if (likely(dev->dequeue_zero_copy == 0)) {
-               do_data_copy_dequeue(vq);
-               if (unlikely(i < count))
-                       vq->shadow_used_idx = i;
-               if (likely(vq->shadow_used_idx)) {
-                       flush_shadow_used_ring_split(dev, vq);
-                       vhost_vring_call_split(dev, vq);
-               }
+       do_data_copy_dequeue(vq);
+       if (unlikely(i < count))
+               vq->shadow_used_idx = i;
+       if (likely(vq->shadow_used_idx)) {
+               flush_shadow_used_ring_split(dev, vq);
+               vhost_vring_call_split(dev, vq);
        }
 
-       return i;
+       return (i - dropped);
 }
 
 static __rte_always_inline int
@@ -1796,7 +2231,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
        struct virtio_net_hdr *hdr;
        uint64_t lens[PACKED_BATCH_SIZE];
        uint64_t buf_lens[PACKED_BATCH_SIZE];
-       uint32_t buf_offset = dev->vhost_hlen;
+       uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        uint16_t flags, i;
 
        if (unlikely(avail_idx & PACKED_BATCH_MASK))
@@ -1812,7 +2247,7 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
                        return -1;
        }
 
-       rte_smp_rmb();
+       rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
                lens[i] = descs[avail_idx + i].len;
@@ -1873,7 +2308,7 @@ virtio_dev_tx_batch_packed(struct virtio_net *dev,
                           struct rte_mbuf **pkts)
 {
        uint16_t avail_idx = vq->last_avail_idx;
-       uint32_t buf_offset = dev->vhost_hlen;
+       uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
        uintptr_t desc_addrs[PACKED_BATCH_SIZE];
        uint16_t ids[PACKED_BATCH_SIZE];
        uint16_t i;
@@ -1913,6 +2348,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
        uint32_t buf_len;
        uint16_t nr_vec = 0;
        int err;
+       static bool allocerr_warned;
 
        if (unlikely(fill_vec_buf_packed(dev, vq,
                                         vq->last_avail_idx, desc_count,
@@ -1923,14 +2359,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 
        *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
        if (unlikely(*pkts == NULL)) {
-               VHOST_LOG_DATA(ERR,
-                       "Failed to allocate memory for mbuf.\n");
+               if (!allocerr_warned) {
+                       VHOST_LOG_DATA(ERR,
+                               "Failed mbuf alloc of size %d from %s on %s.\n",
+                               buf_len, mbuf_pool->name, dev->ifname);
+                       allocerr_warned = true;
+               }
                return -1;
        }
 
        err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
                                mbuf_pool);
        if (unlikely(err)) {
+               if (!allocerr_warned) {
+                       VHOST_LOG_DATA(ERR,
+                               "Failed to copy desc to mbuf on %s.\n",
+                               dev->ifname);
+                       allocerr_warned = true;
+               }
                rte_pktmbuf_free(*pkts);
                return -1;
        }
@@ -1945,184 +2391,31 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
                            struct rte_mbuf **pkts)
 {
 
-       uint16_t buf_id, desc_count;
-
-       if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
-                                       &desc_count))
-               return -1;
-
-       if (virtio_net_is_inorder(dev))
-               vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
-                                                          desc_count);
-       else
-               vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
-
-       vq_inc_last_avail_packed(vq, desc_count);
-
-       return 0;
-}
-
-static __rte_always_inline int
-virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
-                                struct vhost_virtqueue *vq,
-                                struct rte_mempool *mbuf_pool,
-                                struct rte_mbuf **pkts)
-{
-       struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
-       uintptr_t desc_addrs[PACKED_BATCH_SIZE];
-       uint16_t ids[PACKED_BATCH_SIZE];
-       uint16_t i;
-
-       uint16_t avail_idx = vq->last_avail_idx;
-
-       if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
-                                            avail_idx, desc_addrs, ids))
-               return -1;
-
-       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
-               zmbufs[i] = get_zmbuf(vq);
-
-       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-               if (!zmbufs[i])
-                       goto free_pkt;
-       }
-
-       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
-               zmbufs[i]->mbuf = pkts[i];
-               zmbufs[i]->desc_idx = ids[i];
-               zmbufs[i]->desc_count = 1;
-       }
-
-       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
-               rte_mbuf_refcnt_update(pkts[i], 1);
-
-       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
-               TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
-
-       vq->nr_zmbuf += PACKED_BATCH_SIZE;
-       vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
-
-       return 0;
-
-free_pkt:
-       vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
-               rte_pktmbuf_free(pkts[i]);
-
-       return -1;
-}
-
-static __rte_always_inline int
-virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
-                                 struct vhost_virtqueue *vq,
-                                 struct rte_mempool *mbuf_pool,
-                                 struct rte_mbuf **pkts)
-{
-       uint16_t buf_id, desc_count;
-       struct zcopy_mbuf *zmbuf;
-
-       if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
-                                       &desc_count))
-               return -1;
-
-       zmbuf = get_zmbuf(vq);
-       if (!zmbuf) {
-               rte_pktmbuf_free(*pkts);
-               return -1;
-       }
-       zmbuf->mbuf = *pkts;
-       zmbuf->desc_idx = buf_id;
-       zmbuf->desc_count = desc_count;
-
-       rte_mbuf_refcnt_update(*pkts, 1);
-
-       vq->nr_zmbuf += 1;
-       TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
-
-       vq_inc_last_avail_packed(vq, desc_count);
-       return 0;
-}
-
-static __rte_always_inline void
-free_zmbuf(struct vhost_virtqueue *vq)
-{
-       struct zcopy_mbuf *next = NULL;
-       struct zcopy_mbuf *zmbuf;
-
-       for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
-            zmbuf != NULL; zmbuf = next) {
-               next = TAILQ_NEXT(zmbuf, next);
-
-               uint16_t last_used_idx = vq->last_used_idx;
-
-               if (mbuf_is_consumed(zmbuf->mbuf)) {
-                       uint16_t flags;
-                       flags = vq->desc_packed[last_used_idx].flags;
-                       if (vq->used_wrap_counter) {
-                               flags |= VRING_DESC_F_USED;
-                               flags |= VRING_DESC_F_AVAIL;
-                       } else {
-                               flags &= ~VRING_DESC_F_USED;
-                               flags &= ~VRING_DESC_F_AVAIL;
-                       }
-
-                       vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
-                       vq->desc_packed[last_used_idx].len = 0;
+       uint16_t buf_id, desc_count = 0;
+       int ret;
 
-                       rte_smp_wmb();
-                       vq->desc_packed[last_used_idx].flags = flags;
+       ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+                                       &desc_count);
 
-                       vq_inc_last_used_packed(vq, zmbuf->desc_count);
+       if (likely(desc_count > 0)) {
+               if (virtio_net_is_inorder(dev))
+                       vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+                                                                  desc_count);
+               else
+                       vhost_shadow_dequeue_single_packed(vq, buf_id,
+                                       desc_count);
 
-                       TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
-                       restore_mbuf(zmbuf->mbuf);
-                       rte_pktmbuf_free(zmbuf->mbuf);
-                       put_zmbuf(zmbuf);
-                       vq->nr_zmbuf -= 1;
-               }
+               vq_inc_last_avail_packed(vq, desc_count);
        }
-}
-
-static __rte_noinline uint16_t
-virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
-                          struct vhost_virtqueue *vq,
-                          struct rte_mempool *mbuf_pool,
-                          struct rte_mbuf **pkts,
-                          uint32_t count)
-{
-       uint32_t pkt_idx = 0;
-       uint32_t remained = count;
 
-       free_zmbuf(vq);
-
-       do {
-               if (remained >= PACKED_BATCH_SIZE) {
-                       if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
-                               mbuf_pool, &pkts[pkt_idx])) {
-                               pkt_idx += PACKED_BATCH_SIZE;
-                               remained -= PACKED_BATCH_SIZE;
-                               continue;
-                       }
-               }
-
-               if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
-                                                     &pkts[pkt_idx]))
-                       break;
-               pkt_idx++;
-               remained--;
-
-       } while (remained);
-
-       if (pkt_idx)
-               vhost_vring_call_packed(dev, vq);
-
-       return pkt_idx;
+       return ret;
 }
 
 static __rte_noinline uint16_t
 virtio_dev_tx_packed(struct virtio_net *dev,
-                    struct vhost_virtqueue *vq,
+                    struct vhost_virtqueue *__rte_restrict vq,
                     struct rte_mempool *mbuf_pool,
-                    struct rte_mbuf **pkts,
+                    struct rte_mbuf **__rte_restrict pkts,
                     uint32_t count)
 {
        uint32_t pkt_idx = 0;
@@ -2233,14 +2526,9 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
                count -= 1;
        }
 
-       if (vq_is_packed(dev)) {
-               if (unlikely(dev->dequeue_zero_copy))
-                       count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
-                                                          pkts, count);
-               else
-                       count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
-                                                    count);
-       } else
+       if (vq_is_packed(dev))
+               count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
+       else
                count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
 
 out: