vhost: fix build on RHEL 7.6 for Power
[dpdk.git] / lib / librte_vhost / vhost_user.c
index b086ad9..2a9fa7c 100644 (file)
 #ifdef RTE_LIBRTE_VHOST_POSTCOPY
 #include <linux/userfaultfd.h>
 #endif
+#ifdef F_ADD_SEALS /* if file sealing is supported, so is memfd */
+#include <linux/memfd.h>
+#define MEMFD_SUPPORTED
+#endif
 
 #include <rte_common.h>
 #include <rte_malloc.h>
@@ -49,6 +53,9 @@
 #define VIRTIO_MIN_MTU 68
 #define VIRTIO_MAX_MTU 65535
 
+#define INFLIGHT_ALIGNMENT     64
+#define INFLIGHT_VERSION       0x1
+
 static const char *vhost_message_str[VHOST_USER_MAX] = {
        [VHOST_USER_NONE] = "VHOST_USER_NONE",
        [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
@@ -78,6 +85,8 @@ static const char *vhost_message_str[VHOST_USER_MAX] = {
        [VHOST_USER_POSTCOPY_ADVISE]  = "VHOST_USER_POSTCOPY_ADVISE",
        [VHOST_USER_POSTCOPY_LISTEN]  = "VHOST_USER_POSTCOPY_LISTEN",
        [VHOST_USER_POSTCOPY_END]  = "VHOST_USER_POSTCOPY_END",
+       [VHOST_USER_GET_INFLIGHT_FD] = "VHOST_USER_GET_INFLIGHT_FD",
+       [VHOST_USER_SET_INFLIGHT_FD] = "VHOST_USER_SET_INFLIGHT_FD",
 };
 
 static int send_vhost_reply(int sockfd, struct VhostUserMsg *msg);
@@ -93,15 +102,47 @@ get_blk_size(int fd)
        return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
 }
 
+/*
+ * Reclaim all the outstanding zmbufs for a virtqueue.
+ */
+static void
+drain_zmbuf_list(struct vhost_virtqueue *vq)
+{
+       struct zcopy_mbuf *zmbuf, *next;
+
+       for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
+            zmbuf != NULL; zmbuf = next) {
+               next = TAILQ_NEXT(zmbuf, next);
+
+               while (!mbuf_is_consumed(zmbuf->mbuf))
+                       usleep(1000);
+
+               TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+               restore_mbuf(zmbuf->mbuf);
+               rte_pktmbuf_free(zmbuf->mbuf);
+               put_zmbuf(zmbuf);
+               vq->nr_zmbuf -= 1;
+       }
+}
+
 static void
 free_mem_region(struct virtio_net *dev)
 {
        uint32_t i;
        struct rte_vhost_mem_region *reg;
+       struct vhost_virtqueue *vq;
 
        if (!dev || !dev->mem)
                return;
 
+       if (dev->dequeue_zero_copy) {
+               for (i = 0; i < dev->nr_vring; i++) {
+                       vq = dev->virtqueue[i];
+                       if (vq)
+                               drain_zmbuf_list(vq);
+               }
+       }
+
        for (i = 0; i < dev->mem->nregions; i++) {
                reg = &dev->mem->regions[i];
                if (reg->host_user_addr) {
@@ -128,6 +169,22 @@ vhost_backend_cleanup(struct virtio_net *dev)
                dev->log_addr = 0;
        }
 
+       if (dev->inflight_info) {
+               if (dev->inflight_info->addr) {
+                       munmap(dev->inflight_info->addr,
+                              dev->inflight_info->size);
+                       dev->inflight_info->addr = NULL;
+               }
+
+               if (dev->inflight_info->fd > 0) {
+                       close(dev->inflight_info->fd);
+                       dev->inflight_info->fd = -1;
+               }
+
+               free(dev->inflight_info);
+               dev->inflight_info = NULL;
+       }
+
        if (dev->slave_req_fd >= 0) {
                close(dev->slave_req_fd);
                dev->slave_req_fd = -1;
@@ -252,6 +309,8 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
        } else {
                dev->vhost_hlen = sizeof(struct virtio_net_hdr);
        }
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "negotiated Virtio features: 0x%" PRIx64 "\n", dev->features);
        VHOST_LOG_DEBUG(VHOST_CONFIG,
                "(%d) mergeable RX buffers %s, virtio 1 %s\n",
                dev->vid,
@@ -274,6 +333,7 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg,
 
                        dev->virtqueue[dev->nr_vring] = NULL;
                        cleanup_vq(vq, 1);
+                       cleanup_vq_inflight(dev, vq);
                        free_vq(dev, vq);
                }
        }
@@ -376,6 +436,9 @@ numa_realloc(struct virtio_net *dev, int index)
        struct batch_copy_elem *new_batch_copy_elems;
        int ret;
 
+       if (dev->flags & VIRTIO_DEV_RUNNING)
+               return dev;
+
        old_dev = dev;
        vq = old_vq = dev->virtqueue[index];
 
@@ -524,11 +587,13 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
                uint64_t vva;
+               uint64_t req_size = *size;
 
                vva = vhost_user_iotlb_cache_find(vq, ra,
                                        size, VHOST_ACCESS_RW);
-               if (!vva)
-                       vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW);
+               if (req_size != *size)
+                       vhost_user_iotlb_miss(dev, (ra + *size),
+                                             VHOST_ACCESS_RW);
 
                return vva;
        }
@@ -536,6 +601,39 @@ ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
        return qva_to_vva(dev, ra, size);
 }
 
+/*
+ * Converts vring log address to GPA
+ * If IOMMU is enabled, the log address is IOVA
+ * If IOMMU not enabled, the log address is already GPA
+ */
+static uint64_t
+translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
+               uint64_t log_addr)
+{
+       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
+               const uint64_t exp_size = sizeof(struct vring_used) +
+                       sizeof(struct vring_used_elem) * vq->size;
+               uint64_t hva, gpa;
+               uint64_t size = exp_size;
+
+               hva = vhost_iova_to_vva(dev, vq, log_addr,
+                                       &size, VHOST_ACCESS_RW);
+               if (size != exp_size)
+                       return 0;
+
+               gpa = hva_to_gpa(dev, hva, exp_size);
+               if (!gpa) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "VQ: Failed to find GPA for log_addr: 0x%" PRIx64 " hva: 0x%" PRIx64 "\n",
+                               log_addr, hva);
+                       return 0;
+               }
+               return gpa;
+
+       } else
+               return log_addr;
+}
+
 static struct virtio_net *
 translate_ring_addresses(struct virtio_net *dev, int vq_index)
 {
@@ -585,6 +683,7 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
                        return dev;
                }
 
+               vq->access_ok = 1;
                return dev;
        }
 
@@ -642,7 +741,15 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index)
                vq->last_avail_idx = vq->used->idx;
        }
 
-       vq->log_guest_addr = addr->log_guest_addr;
+       vq->log_guest_addr =
+               translate_log_addr(dev, vq, addr->log_guest_addr);
+       if (vq->log_guest_addr == 0) {
+               RTE_LOG(DEBUG, VHOST_CONFIG,
+                       "(%d) failed to map log_guest_addr .\n",
+                       dev->vid);
+               return dev;
+       }
+       vq->access_ok = 1;
 
        VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
                        dev->vid, vq->desc);
@@ -667,6 +774,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        struct vhost_virtqueue *vq;
        struct vhost_vring_addr *addr = &msg->payload.addr;
+       bool access_ok;
 
        if (dev->mem == NULL)
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -674,6 +782,8 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
        /* addr->index refers to the queue index. The txq 1, rxq is 0. */
        vq = dev->virtqueue[msg->payload.addr.index];
 
+       access_ok = vq->access_ok;
+
        /*
         * Rings addresses should not be interpreted as long as the ring is not
         * started and enabled
@@ -682,8 +792,9 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, struct VhostUserMsg *msg,
 
        vring_invalidate(dev, vq);
 
-       if (vq->enabled && (dev->features &
-                               (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
+       if ((vq->enabled && (dev->features &
+                               (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) ||
+                       access_ok) {
                dev = translate_ring_addresses(dev, msg->payload.addr.index);
                if (!dev)
                        return RTE_VHOST_MSG_RESULT_ERR;
@@ -1052,10 +1163,11 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                                goto err_mmap;
                        }
                        RTE_LOG(INFO, VHOST_CONFIG,
-                               "\t userfaultfd registered for range : %llx - %llx\n",
-                               reg_struct.range.start,
-                               reg_struct.range.start +
-                               reg_struct.range.len - 1);
+                               "\t userfaultfd registered for range : "
+                               "%" PRIx64 " - %" PRIx64 "\n",
+                               (uint64_t)reg_struct.range.start,
+                               (uint64_t)reg_struct.range.start +
+                               (uint64_t)reg_struct.range.len - 1);
 #else
                        goto err_mmap;
 #endif
@@ -1133,6 +1245,234 @@ virtio_is_ready(struct virtio_net *dev)
        return 1;
 }
 
+static void *
+inflight_mem_alloc(const char *name, size_t size, int *fd)
+{
+       void *ptr;
+       int mfd = -1;
+       char fname[20] = "/tmp/memfd-XXXXXX";
+
+       *fd = -1;
+#ifdef MEMFD_SUPPORTED
+       mfd = memfd_create(name, MFD_CLOEXEC);
+#else
+       RTE_SET_USED(name);
+#endif
+       if (mfd == -1) {
+               mfd = mkstemp(fname);
+               if (mfd == -1) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to get inflight buffer fd\n");
+                       return NULL;
+               }
+
+               unlink(fname);
+       }
+
+       if (ftruncate(mfd, size) == -1) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "failed to alloc inflight buffer\n");
+               close(mfd);
+               return NULL;
+       }
+
+       ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, mfd, 0);
+       if (ptr == MAP_FAILED) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "failed to mmap inflight buffer\n");
+               close(mfd);
+               return NULL;
+       }
+
+       *fd = mfd;
+       return ptr;
+}
+
+static uint32_t
+get_pervq_shm_size_split(uint16_t queue_size)
+{
+       return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_split) *
+                                 queue_size + sizeof(uint64_t) +
+                                 sizeof(uint16_t) * 4, INFLIGHT_ALIGNMENT);
+}
+
+static uint32_t
+get_pervq_shm_size_packed(uint16_t queue_size)
+{
+       return RTE_ALIGN_MUL_CEIL(sizeof(struct rte_vhost_inflight_desc_packed)
+                                 * queue_size + sizeof(uint64_t) +
+                                 sizeof(uint16_t) * 6 + sizeof(uint8_t) * 9,
+                                 INFLIGHT_ALIGNMENT);
+}
+
+static int
+vhost_user_get_inflight_fd(struct virtio_net **pdev,
+                          VhostUserMsg *msg,
+                          int main_fd __rte_unused)
+{
+       struct rte_vhost_inflight_info_packed *inflight_packed;
+       uint64_t pervq_inflight_size, mmap_size;
+       uint16_t num_queues, queue_size;
+       struct virtio_net *dev = *pdev;
+       int fd, i, j;
+       void *addr;
+
+       if (msg->size != sizeof(msg->payload.inflight)) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "invalid get_inflight_fd message size is %d\n",
+                       msg->size);
+               return RTE_VHOST_MSG_RESULT_ERR;
+       }
+
+       if (dev->inflight_info == NULL) {
+               dev->inflight_info = calloc(1,
+                                           sizeof(struct inflight_mem_info));
+               if (!dev->inflight_info) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to alloc dev inflight area\n");
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+       }
+
+       num_queues = msg->payload.inflight.num_queues;
+       queue_size = msg->payload.inflight.queue_size;
+
+       RTE_LOG(INFO, VHOST_CONFIG, "get_inflight_fd num_queues: %u\n",
+               msg->payload.inflight.num_queues);
+       RTE_LOG(INFO, VHOST_CONFIG, "get_inflight_fd queue_size: %u\n",
+               msg->payload.inflight.queue_size);
+
+       if (vq_is_packed(dev))
+               pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
+       else
+               pervq_inflight_size = get_pervq_shm_size_split(queue_size);
+
+       mmap_size = num_queues * pervq_inflight_size;
+       addr = inflight_mem_alloc("vhost-inflight", mmap_size, &fd);
+       if (!addr) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "failed to alloc vhost inflight area\n");
+                       msg->payload.inflight.mmap_size = 0;
+               return RTE_VHOST_MSG_RESULT_ERR;
+       }
+       memset(addr, 0, mmap_size);
+
+       dev->inflight_info->addr = addr;
+       dev->inflight_info->size = msg->payload.inflight.mmap_size = mmap_size;
+       dev->inflight_info->fd = msg->fds[0] = fd;
+       msg->payload.inflight.mmap_offset = 0;
+       msg->fd_num = 1;
+
+       if (vq_is_packed(dev)) {
+               for (i = 0; i < num_queues; i++) {
+                       inflight_packed =
+                               (struct rte_vhost_inflight_info_packed *)addr;
+                       inflight_packed->used_wrap_counter = 1;
+                       inflight_packed->old_used_wrap_counter = 1;
+                       for (j = 0; j < queue_size; j++)
+                               inflight_packed->desc[j].next = j + 1;
+                       addr = (void *)((char *)addr + pervq_inflight_size);
+               }
+       }
+
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "send inflight mmap_size: %"PRIu64"\n",
+               msg->payload.inflight.mmap_size);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "send inflight mmap_offset: %"PRIu64"\n",
+               msg->payload.inflight.mmap_offset);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "send inflight fd: %d\n", msg->fds[0]);
+
+       return RTE_VHOST_MSG_RESULT_REPLY;
+}
+
+static int
+vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
+                          int main_fd __rte_unused)
+{
+       uint64_t mmap_size, mmap_offset;
+       uint16_t num_queues, queue_size;
+       struct virtio_net *dev = *pdev;
+       uint32_t pervq_inflight_size;
+       struct vhost_virtqueue *vq;
+       void *addr;
+       int fd, i;
+
+       fd = msg->fds[0];
+       if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "invalid set_inflight_fd message size is %d,fd is %d\n",
+                       msg->size, fd);
+               return RTE_VHOST_MSG_RESULT_ERR;
+       }
+
+       mmap_size = msg->payload.inflight.mmap_size;
+       mmap_offset = msg->payload.inflight.mmap_offset;
+       num_queues = msg->payload.inflight.num_queues;
+       queue_size = msg->payload.inflight.queue_size;
+
+       if (vq_is_packed(dev))
+               pervq_inflight_size = get_pervq_shm_size_packed(queue_size);
+       else
+               pervq_inflight_size = get_pervq_shm_size_split(queue_size);
+
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "set_inflight_fd mmap_size: %"PRIu64"\n", mmap_size);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "set_inflight_fd mmap_offset: %"PRIu64"\n", mmap_offset);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "set_inflight_fd num_queues: %u\n", num_queues);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "set_inflight_fd queue_size: %u\n", queue_size);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "set_inflight_fd fd: %d\n", fd);
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "set_inflight_fd pervq_inflight_size: %d\n",
+               pervq_inflight_size);
+
+       if (!dev->inflight_info) {
+               dev->inflight_info = calloc(1,
+                                           sizeof(struct inflight_mem_info));
+               if (dev->inflight_info == NULL) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to alloc dev inflight area\n");
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+       }
+
+       if (dev->inflight_info->addr)
+               munmap(dev->inflight_info->addr, dev->inflight_info->size);
+
+       addr = mmap(0, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
+                   fd, mmap_offset);
+       if (addr == MAP_FAILED) {
+               RTE_LOG(ERR, VHOST_CONFIG, "failed to mmap share memory.\n");
+               return RTE_VHOST_MSG_RESULT_ERR;
+       }
+
+       if (dev->inflight_info->fd)
+               close(dev->inflight_info->fd);
+
+       dev->inflight_info->fd = fd;
+       dev->inflight_info->addr = addr;
+       dev->inflight_info->size = mmap_size;
+
+       for (i = 0; i < num_queues; i++) {
+               vq = dev->virtqueue[i];
+               if (vq_is_packed(dev)) {
+                       vq->inflight_packed = addr;
+                       vq->inflight_packed->desc_num = queue_size;
+               } else {
+                       vq->inflight_split = addr;
+                       vq->inflight_split->desc_num = queue_size;
+               }
+               addr = (void *)((char *)addr + pervq_inflight_size);
+       }
+
+       return RTE_VHOST_MSG_RESULT_OK;
+}
+
 static int
 vhost_user_set_vring_call(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
@@ -1169,6 +1509,191 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused,
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
+static int
+resubmit_desc_compare(const void *a, const void *b)
+{
+       const struct rte_vhost_resubmit_desc *desc0 = a;
+       const struct rte_vhost_resubmit_desc *desc1 = b;
+
+       if (desc1->counter > desc0->counter)
+               return 1;
+
+       return -1;
+}
+
+static int
+vhost_check_queue_inflights_split(struct virtio_net *dev,
+                                 struct vhost_virtqueue *vq)
+{
+       uint16_t i;
+       uint16_t resubmit_num = 0, last_io, num;
+       struct vring_used *used = vq->used;
+       struct rte_vhost_resubmit_info *resubmit;
+       struct rte_vhost_inflight_info_split *inflight_split;
+
+       if (!(dev->protocol_features &
+           (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+               return RTE_VHOST_MSG_RESULT_OK;
+
+       if ((!vq->inflight_split))
+               return RTE_VHOST_MSG_RESULT_ERR;
+
+       if (!vq->inflight_split->version) {
+               vq->inflight_split->version = INFLIGHT_VERSION;
+               return RTE_VHOST_MSG_RESULT_OK;
+       }
+
+       if (vq->resubmit_inflight)
+               return RTE_VHOST_MSG_RESULT_OK;
+
+       inflight_split = vq->inflight_split;
+       vq->global_counter = 0;
+       last_io = inflight_split->last_inflight_io;
+
+       if (inflight_split->used_idx != used->idx) {
+               inflight_split->desc[last_io].inflight = 0;
+               rte_smp_mb();
+               inflight_split->used_idx = used->idx;
+       }
+
+       for (i = 0; i < inflight_split->desc_num; i++) {
+               if (inflight_split->desc[i].inflight == 1)
+                       resubmit_num++;
+       }
+
+       vq->last_avail_idx += resubmit_num;
+
+       if (resubmit_num) {
+               resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
+               if (!resubmit) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to allocate memory for resubmit info.\n");
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+
+               resubmit->resubmit_list = calloc(resubmit_num,
+                       sizeof(struct rte_vhost_resubmit_desc));
+               if (!resubmit->resubmit_list) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to allocate memory for inflight desc.\n");
+                       free(resubmit);
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+
+               num = 0;
+               for (i = 0; i < vq->inflight_split->desc_num; i++) {
+                       if (vq->inflight_split->desc[i].inflight == 1) {
+                               resubmit->resubmit_list[num].index = i;
+                               resubmit->resubmit_list[num].counter =
+                                       inflight_split->desc[i].counter;
+                               num++;
+                       }
+               }
+               resubmit->resubmit_num = num;
+
+               if (resubmit->resubmit_num > 1)
+                       qsort(resubmit->resubmit_list, resubmit->resubmit_num,
+                             sizeof(struct rte_vhost_resubmit_desc),
+                             resubmit_desc_compare);
+
+               vq->global_counter = resubmit->resubmit_list[0].counter + 1;
+               vq->resubmit_inflight = resubmit;
+       }
+
+       return RTE_VHOST_MSG_RESULT_OK;
+}
+
+static int
+vhost_check_queue_inflights_packed(struct virtio_net *dev,
+                                  struct vhost_virtqueue *vq)
+{
+       uint16_t i;
+       uint16_t resubmit_num = 0, old_used_idx, num;
+       struct rte_vhost_resubmit_info *resubmit;
+       struct rte_vhost_inflight_info_packed *inflight_packed;
+
+       if (!(dev->protocol_features &
+           (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)))
+               return RTE_VHOST_MSG_RESULT_OK;
+
+       if ((!vq->inflight_packed))
+               return RTE_VHOST_MSG_RESULT_ERR;
+
+       if (!vq->inflight_packed->version) {
+               vq->inflight_packed->version = INFLIGHT_VERSION;
+               return RTE_VHOST_MSG_RESULT_OK;
+       }
+
+       if (vq->resubmit_inflight)
+               return RTE_VHOST_MSG_RESULT_OK;
+
+       inflight_packed = vq->inflight_packed;
+       vq->global_counter = 0;
+       old_used_idx = inflight_packed->old_used_idx;
+
+       if (inflight_packed->used_idx != old_used_idx) {
+               if (inflight_packed->desc[old_used_idx].inflight == 0) {
+                       inflight_packed->old_used_idx =
+                               inflight_packed->used_idx;
+                       inflight_packed->old_used_wrap_counter =
+                               inflight_packed->used_wrap_counter;
+                       inflight_packed->old_free_head =
+                               inflight_packed->free_head;
+               } else {
+                       inflight_packed->used_idx =
+                               inflight_packed->old_used_idx;
+                       inflight_packed->used_wrap_counter =
+                               inflight_packed->old_used_wrap_counter;
+                       inflight_packed->free_head =
+                               inflight_packed->old_free_head;
+               }
+       }
+
+       for (i = 0; i < inflight_packed->desc_num; i++) {
+               if (inflight_packed->desc[i].inflight == 1)
+                       resubmit_num++;
+       }
+
+       if (resubmit_num) {
+               resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
+               if (resubmit == NULL) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to allocate memory for resubmit info.\n");
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+
+               resubmit->resubmit_list = calloc(resubmit_num,
+                       sizeof(struct rte_vhost_resubmit_desc));
+               if (resubmit->resubmit_list == NULL) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to allocate memory for resubmit desc.\n");
+                       free(resubmit);
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+
+               num = 0;
+               for (i = 0; i < inflight_packed->desc_num; i++) {
+                       if (vq->inflight_packed->desc[i].inflight == 1) {
+                               resubmit->resubmit_list[num].index = i;
+                               resubmit->resubmit_list[num].counter =
+                                       inflight_packed->desc[i].counter;
+                               num++;
+                       }
+               }
+               resubmit->resubmit_num = num;
+
+               if (resubmit->resubmit_num > 1)
+                       qsort(resubmit->resubmit_list, resubmit->resubmit_num,
+                             sizeof(struct rte_vhost_resubmit_desc),
+                             resubmit_desc_compare);
+
+               vq->global_counter = resubmit->resubmit_list[0].counter + 1;
+               vq->resubmit_inflight = resubmit;
+       }
+
+       return RTE_VHOST_MSG_RESULT_OK;
+}
+
 static int
 vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
@@ -1199,28 +1724,38 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
         * the ring starts already enabled. Otherwise, it is enabled via
         * the SET_VRING_ENABLE message.
         */
-       if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)))
+       if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
                vq->enabled = 1;
+               if (dev->notify_ops->vring_state_changed)
+                       dev->notify_ops->vring_state_changed(
+                               dev->vid, file.index, 1);
+       }
 
        if (vq->kickfd >= 0)
                close(vq->kickfd);
        vq->kickfd = file.fd;
 
+       if (vq_is_packed(dev)) {
+               if (vhost_check_queue_inflights_packed(dev, vq)) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to inflights for vq: %d\n", file.index);
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+       } else {
+               if (vhost_check_queue_inflights_split(dev, vq)) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to inflights for vq: %d\n", file.index);
+                       return RTE_VHOST_MSG_RESULT_ERR;
+               }
+       }
+
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
 static void
 free_zmbufs(struct vhost_virtqueue *vq)
 {
-       struct zcopy_mbuf *zmbuf, *next;
-
-       for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
-            zmbuf != NULL; zmbuf = next) {
-               next = TAILQ_NEXT(zmbuf, next);
-
-               rte_pktmbuf_free(zmbuf->mbuf);
-               TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
-       }
+       drain_zmbuf_list(vq);
 
        rte_free(vq->zmbufs);
 }
@@ -1274,6 +1809,8 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
 
        vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
 
+       vq->signalled_used_valid = false;
+
        if (dev->dequeue_zero_copy)
                free_zmbufs(vq);
        if (vq_is_packed(dev)) {
@@ -1290,6 +1827,8 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        msg->size = sizeof(msg->payload.state);
        msg->fd_num = 0;
 
+       vring_invalidate(dev, vq);
+
        return RTE_VHOST_MSG_RESULT_REPLY;
 }
 
@@ -1321,6 +1860,10 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
                dev->notify_ops->vring_state_changed(dev->vid,
                                index, enable);
 
+       /* On disable, rings have to be stopped being processed. */
+       if (!enable && dev->dequeue_zero_copy)
+               drain_zmbuf_list(dev->virtqueue[index]);
+
        dev->virtqueue[index]->enabled = enable;
 
        return RTE_VHOST_MSG_RESULT_OK;
@@ -1372,6 +1915,9 @@ vhost_user_set_protocol_features(struct virtio_net **pdev,
        }
 
        dev->protocol_features = protocol_features;
+       RTE_LOG(INFO, VHOST_CONFIG,
+               "negotiated Vhost-user protocol features: 0x%" PRIx64 "\n",
+               dev->protocol_features);
 
        return RTE_VHOST_MSG_RESULT_OK;
 }
@@ -1525,60 +2071,75 @@ vhost_user_set_req_fd(struct virtio_net **pdev, struct VhostUserMsg *msg,
                return RTE_VHOST_MSG_RESULT_ERR;
        }
 
+       if (dev->slave_req_fd >= 0)
+               close(dev->slave_req_fd);
+
        dev->slave_req_fd = fd;
 
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
 static int
-is_vring_iotlb_update(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
+is_vring_iotlb_split(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
 {
        struct vhost_vring_addr *ra;
-       uint64_t start, end;
+       uint64_t start, end, len;
 
        start = imsg->iova;
        end = start + imsg->size;
 
        ra = &vq->ring_addrs;
-       if (ra->desc_user_addr >= start && ra->desc_user_addr < end)
+       len = sizeof(struct vring_desc) * vq->size;
+       if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
                return 1;
-       if (ra->avail_user_addr >= start && ra->avail_user_addr < end)
+
+       len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
+       if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
                return 1;
-       if (ra->used_user_addr >= start && ra->used_user_addr < end)
+
+       len = sizeof(struct vring_used) +
+              sizeof(struct vring_used_elem) * vq->size;
+       if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
                return 1;
 
        return 0;
 }
 
 static int
-is_vring_iotlb_invalidate(struct vhost_virtqueue *vq,
-                               struct vhost_iotlb_msg *imsg)
+is_vring_iotlb_packed(struct vhost_virtqueue *vq, struct vhost_iotlb_msg *imsg)
 {
-       uint64_t istart, iend, vstart, vend;
+       struct vhost_vring_addr *ra;
+       uint64_t start, end, len;
 
-       istart = imsg->iova;
-       iend = istart + imsg->size - 1;
+       start = imsg->iova;
+       end = start + imsg->size;
 
-       vstart = (uintptr_t)vq->desc;
-       vend = vstart + sizeof(struct vring_desc) * vq->size - 1;
-       if (vstart <= iend && istart <= vend)
+       ra = &vq->ring_addrs;
+       len = sizeof(struct vring_packed_desc) * vq->size;
+       if (ra->desc_user_addr < end && (ra->desc_user_addr + len) > start)
                return 1;
 
-       vstart = (uintptr_t)vq->avail;
-       vend = vstart + sizeof(struct vring_avail);
-       vend += sizeof(uint16_t) * vq->size - 1;
-       if (vstart <= iend && istart <= vend)
+       len = sizeof(struct vring_packed_desc_event);
+       if (ra->avail_user_addr < end && (ra->avail_user_addr + len) > start)
                return 1;
 
-       vstart = (uintptr_t)vq->used;
-       vend = vstart + sizeof(struct vring_used);
-       vend += sizeof(struct vring_used_elem) * vq->size - 1;
-       if (vstart <= iend && istart <= vend)
+       len = sizeof(struct vring_packed_desc_event);
+       if (ra->used_user_addr < end && (ra->used_user_addr + len) > start)
                return 1;
 
        return 0;
 }
 
+static int is_vring_iotlb(struct virtio_net *dev,
+                         struct vhost_virtqueue *vq,
+                         struct vhost_iotlb_msg *imsg)
+{
+       if (vq_is_packed(dev))
+               return is_vring_iotlb_packed(vq, imsg);
+       else
+               return is_vring_iotlb_split(vq, imsg);
+}
+
 static int
 vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        int main_fd __rte_unused)
@@ -1601,7 +2162,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
                                        len, imsg->perm);
 
-                       if (is_vring_iotlb_update(vq, imsg))
+                       if (is_vring_iotlb(dev, vq, imsg))
                                *pdev = dev = translate_ring_addresses(dev, i);
                }
                break;
@@ -1612,7 +2173,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg,
                        vhost_user_iotlb_cache_remove(vq, imsg->iova,
                                        imsg->size);
 
-                       if (is_vring_iotlb_invalidate(vq, imsg))
+                       if (is_vring_iotlb(dev, vq, imsg))
                                vring_invalidate(dev, vq);
                }
                break;
@@ -1728,9 +2289,10 @@ static vhost_message_handler_t vhost_message_handlers[VHOST_USER_MAX] = {
        [VHOST_USER_POSTCOPY_ADVISE] = vhost_user_set_postcopy_advise,
        [VHOST_USER_POSTCOPY_LISTEN] = vhost_user_set_postcopy_listen,
        [VHOST_USER_POSTCOPY_END] = vhost_user_postcopy_end,
+       [VHOST_USER_GET_INFLIGHT_FD] = vhost_user_get_inflight_fd,
+       [VHOST_USER_SET_INFLIGHT_FD] = vhost_user_set_inflight_fd,
 };
 
-
 /* return bytes# of read on success or negative val on failure. */
 static int
 read_vhost_message(int sockfd, struct VhostUserMsg *msg)
@@ -1882,7 +2444,7 @@ vhost_user_msg_handler(int vid, int fd)
        int did = -1;
        int ret;
        int unlock_required = 0;
-       uint32_t skip_master = 0;
+       bool handled;
        int request;
 
        dev = get_device(vid);
@@ -1900,27 +2462,30 @@ vhost_user_msg_handler(int vid, int fd)
        }
 
        ret = read_vhost_message(fd, &msg);
-       if (ret <= 0 || msg.request.master >= VHOST_USER_MAX) {
+       if (ret <= 0) {
                if (ret < 0)
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "vhost read message failed\n");
-               else if (ret == 0)
+               else
                        RTE_LOG(INFO, VHOST_CONFIG,
                                "vhost peer closed\n");
-               else
-                       RTE_LOG(ERR, VHOST_CONFIG,
-                               "vhost read incorrect message\n");
 
                return -1;
        }
 
        ret = 0;
-       if (msg.request.master != VHOST_USER_IOTLB_MSG)
-               RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
-                       vhost_message_str[msg.request.master]);
-       else
-               RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
-                       vhost_message_str[msg.request.master]);
+       request = msg.request.master;
+       if (request > VHOST_USER_NONE && request < VHOST_USER_MAX &&
+                       vhost_message_str[request]) {
+               if (request != VHOST_USER_IOTLB_MSG)
+                       RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
+                               vhost_message_str[request]);
+               else
+                       RTE_LOG(DEBUG, VHOST_CONFIG, "read message %s\n",
+                               vhost_message_str[request]);
+       } else {
+               RTE_LOG(DEBUG, VHOST_CONFIG, "External request %d\n", request);
+       }
 
        ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
        if (ret < 0) {
@@ -1936,7 +2501,7 @@ vhost_user_msg_handler(int vid, int fd)
         * inactive, so it is safe. Otherwise taking the access_lock
         * would cause a dead lock.
         */
-       switch (msg.request.master) {
+       switch (request) {
        case VHOST_USER_SET_FEATURES:
        case VHOST_USER_SET_PROTOCOL_FEATURES:
        case VHOST_USER_SET_OWNER:
@@ -1961,19 +2526,24 @@ vhost_user_msg_handler(int vid, int fd)
 
        }
 
+       handled = false;
        if (dev->extern_ops.pre_msg_handle) {
                ret = (*dev->extern_ops.pre_msg_handle)(dev->vid,
-                               (void *)&msg, &skip_master);
-               if (ret == RTE_VHOST_MSG_RESULT_ERR)
-                       goto skip_to_reply;
-               else if (ret == RTE_VHOST_MSG_RESULT_REPLY)
+                               (void *)&msg);
+               switch (ret) {
+               case RTE_VHOST_MSG_RESULT_REPLY:
                        send_vhost_reply(fd, &msg);
-
-               if (skip_master)
+                       /* Fall-through */
+               case RTE_VHOST_MSG_RESULT_ERR:
+               case RTE_VHOST_MSG_RESULT_OK:
+                       handled = true;
                        goto skip_to_post_handle;
+               case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
+               default:
+                       break;
+               }
        }
 
-       request = msg.request.master;
        if (request > VHOST_USER_NONE && request < VHOST_USER_MAX) {
                if (!vhost_message_handlers[request])
                        goto skip_to_post_handle;
@@ -1984,40 +2554,54 @@ vhost_user_msg_handler(int vid, int fd)
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "Processing %s failed.\n",
                                vhost_message_str[request]);
+                       handled = true;
                        break;
                case RTE_VHOST_MSG_RESULT_OK:
                        RTE_LOG(DEBUG, VHOST_CONFIG,
                                "Processing %s succeeded.\n",
                                vhost_message_str[request]);
+                       handled = true;
                        break;
                case RTE_VHOST_MSG_RESULT_REPLY:
                        RTE_LOG(DEBUG, VHOST_CONFIG,
                                "Processing %s succeeded and needs reply.\n",
                                vhost_message_str[request]);
                        send_vhost_reply(fd, &msg);
+                       handled = true;
+                       break;
+               default:
                        break;
                }
-       } else {
-               RTE_LOG(ERR, VHOST_CONFIG,
-                       "Requested invalid message type %d.\n", request);
-               ret = RTE_VHOST_MSG_RESULT_ERR;
        }
 
 skip_to_post_handle:
        if (ret != RTE_VHOST_MSG_RESULT_ERR &&
                        dev->extern_ops.post_msg_handle) {
-               ret = (*dev->extern_ops.post_msg_handle)(
-                               dev->vid, (void *)&msg);
-               if (ret == RTE_VHOST_MSG_RESULT_ERR)
-                       goto skip_to_reply;
-               else if (ret == RTE_VHOST_MSG_RESULT_REPLY)
+               ret = (*dev->extern_ops.post_msg_handle)(dev->vid,
+                               (void *)&msg);
+               switch (ret) {
+               case RTE_VHOST_MSG_RESULT_REPLY:
                        send_vhost_reply(fd, &msg);
+                       /* Fall-through */
+               case RTE_VHOST_MSG_RESULT_ERR:
+               case RTE_VHOST_MSG_RESULT_OK:
+                       handled = true;
+               case RTE_VHOST_MSG_RESULT_NOT_HANDLED:
+               default:
+                       break;
+               }
        }
 
-skip_to_reply:
        if (unlock_required)
                vhost_user_unlock_all_queue_pairs(dev);
 
+       /* If message was not handled at this stage, treat it as an error */
+       if (!handled) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "vhost message (req: %d) was not handled.\n", request);
+               ret = RTE_VHOST_MSG_RESULT_ERR;
+       }
+
        /*
         * If the request required a reply that was already sent,
         * this optional reply-ack won't be sent as the