examples: skip build when missing dependencies
[dpdk.git] / lib / vhost / vhost_user.c
index cdd46a0..a781346 100644 (file)
@@ -45,6 +45,8 @@
 #include <rte_common.h>
 #include <rte_malloc.h>
 #include <rte_log.h>
+#include <rte_vfio.h>
+#include <rte_errno.h>
 
 #include "iotlb.h"
 #include "vhost.h"
@@ -141,6 +143,59 @@ get_blk_size(int fd)
        return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
 }
 
+static int
+async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
+{
+       uint64_t host_iova;
+       int ret = 0;
+
+       host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
+       if (do_map) {
+               /* Add mapped region into the default container of DPDK. */
+               ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+                                                region->host_user_addr,
+                                                host_iova,
+                                                region->size);
+               if (ret) {
+                       /*
+                        * DMA device may bind with kernel driver, in this case,
+                        * we don't need to program IOMMU manually. However, if no
+                        * device is bound with vfio/uio in DPDK, and vfio kernel
+                        * module is loaded, the API will still be called and return
+                        * with ENODEV/ENOSUP.
+                        *
+                        * DPDK vfio only returns ENODEV/ENOSUP in very similar
+                        * situations(vfio either unsupported, or supported
+                        * but no devices found). Either way, no mappings could be
+                        * performed. We treat it as normal case in async path.
+                        */
+                       if (rte_errno == ENODEV || rte_errno == ENOTSUP)
+                               return 0;
+
+                       VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
+                       /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
+                       return 0;
+               }
+
+       } else {
+               /* Remove mapped region from the default container of DPDK. */
+               ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+                                                  region->host_user_addr,
+                                                  host_iova,
+                                                  region->size);
+               if (ret) {
+                       /* like DMA map, ignore the kernel driver case when unmap. */
+                       if (rte_errno == EINVAL)
+                               return 0;
+
+                       VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
 static void
 free_mem_region(struct virtio_net *dev)
 {
@@ -153,6 +208,9 @@ free_mem_region(struct virtio_net *dev)
        for (i = 0; i < dev->mem->nregions; i++) {
                reg = &dev->mem->regions[i];
                if (reg->host_user_addr) {
+                       if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+                               async_dma_map(reg, false);
+
                        munmap(reg->mmap_addr, reg->mmap_size);
                        close(reg->fd);
                }
@@ -188,7 +246,7 @@ vhost_backend_cleanup(struct virtio_net *dev)
                        dev->inflight_info->fd = -1;
                }
 
-               free(dev->inflight_info);
+               rte_free(dev->inflight_info);
                dev->inflight_info = NULL;
        }
 
@@ -433,10 +491,10 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
        if (vq_is_packed(dev)) {
                if (vq->shadow_used_packed)
                        rte_free(vq->shadow_used_packed);
-               vq->shadow_used_packed = rte_malloc(NULL,
+               vq->shadow_used_packed = rte_malloc_socket(NULL,
                                vq->size *
                                sizeof(struct vring_used_elem_packed),
-                               RTE_CACHE_LINE_SIZE);
+                               RTE_CACHE_LINE_SIZE, vq->numa_node);
                if (!vq->shadow_used_packed) {
                        VHOST_LOG_CONFIG(ERR,
                                        "failed to allocate memory for shadow used ring.\n");
@@ -447,9 +505,9 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
                if (vq->shadow_used_split)
                        rte_free(vq->shadow_used_split);
 
-               vq->shadow_used_split = rte_malloc(NULL,
+               vq->shadow_used_split = rte_malloc_socket(NULL,
                                vq->size * sizeof(struct vring_used_elem),
-                               RTE_CACHE_LINE_SIZE);
+                               RTE_CACHE_LINE_SIZE, vq->numa_node);
 
                if (!vq->shadow_used_split) {
                        VHOST_LOG_CONFIG(ERR,
@@ -460,126 +518,174 @@ vhost_user_set_vring_num(struct virtio_net **pdev,
 
        if (vq->batch_copy_elems)
                rte_free(vq->batch_copy_elems);
-       vq->batch_copy_elems = rte_malloc(NULL,
+       vq->batch_copy_elems = rte_malloc_socket(NULL,
                                vq->size * sizeof(struct batch_copy_elem),
-                               RTE_CACHE_LINE_SIZE);
+                               RTE_CACHE_LINE_SIZE, vq->numa_node);
        if (!vq->batch_copy_elems) {
                VHOST_LOG_CONFIG(ERR,
                        "failed to allocate memory for batching copy.\n");
                return RTE_VHOST_MSG_RESULT_ERR;
        }
 
-       if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) {
-               if (vhost_user_iotlb_init(dev, msg->payload.state.index))
-                       return RTE_VHOST_MSG_RESULT_ERR;
-       }
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
 /*
- * Reallocate virtio_dev and vhost_virtqueue data structure to make them on the
- * same numa node as the memory of vring descriptor.
+ * Reallocate virtio_dev, vhost_virtqueue and related data structures to
+ * make them on the same numa node as the memory of vring descriptor.
  */
 #ifdef RTE_LIBRTE_VHOST_NUMA
 static struct virtio_net*
 numa_realloc(struct virtio_net *dev, int index)
 {
-       int oldnode, newnode;
+       int node, dev_node;
        struct virtio_net *old_dev;
-       struct vhost_virtqueue *old_vq, *vq;
-       struct vring_used_elem *new_shadow_used_split;
-       struct vring_used_elem_packed *new_shadow_used_packed;
-       struct batch_copy_elem *new_batch_copy_elems;
+       struct vhost_virtqueue *vq;
+       struct batch_copy_elem *bce;
+       struct guest_page *gp;
+       struct rte_vhost_memory *mem;
+       size_t mem_size;
        int ret;
 
-       if (dev->flags & VIRTIO_DEV_RUNNING)
-               return dev;
-
        old_dev = dev;
-       vq = old_vq = dev->virtqueue[index];
+       vq = dev->virtqueue[index];
 
-       ret = get_mempolicy(&newnode, NULL, 0, old_vq->desc,
-                           MPOL_F_NODE | MPOL_F_ADDR);
+       /*
+        * If VQ is ready, it is too late to reallocate, it certainly already
+        * happened anyway on VHOST_USER_SET_VRING_ADRR.
+        */
+       if (vq->ready)
+               return dev;
 
-       /* check if we need to reallocate vq */
-       ret |= get_mempolicy(&oldnode, NULL, 0, old_vq,
-                            MPOL_F_NODE | MPOL_F_ADDR);
+       ret = get_mempolicy(&node, NULL, 0, vq->desc, MPOL_F_NODE | MPOL_F_ADDR);
        if (ret) {
-               VHOST_LOG_CONFIG(ERR,
-                       "Unable to get vq numa information.\n");
+               VHOST_LOG_CONFIG(ERR, "Unable to get virtqueue %d numa information.\n", index);
                return dev;
        }
-       if (oldnode != newnode) {
-               VHOST_LOG_CONFIG(INFO,
-                       "reallocate vq from %d to %d node\n", oldnode, newnode);
-               vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
-               if (!vq)
+
+       if (node == vq->numa_node)
+               goto out_dev_realloc;
+
+       vq = rte_realloc_socket(vq, sizeof(*vq), 0, node);
+       if (!vq) {
+               VHOST_LOG_CONFIG(ERR, "Failed to realloc virtqueue %d on node %d\n",
+                               index, node);
+               return dev;
+       }
+
+       if (vq != dev->virtqueue[index]) {
+               VHOST_LOG_CONFIG(INFO, "reallocated virtqueue on node %d\n", node);
+               dev->virtqueue[index] = vq;
+               vhost_user_iotlb_init(dev, index);
+       }
+
+       if (vq_is_packed(dev)) {
+               struct vring_used_elem_packed *sup;
+
+               sup = rte_realloc_socket(vq->shadow_used_packed, vq->size * sizeof(*sup),
+                               RTE_CACHE_LINE_SIZE, node);
+               if (!sup) {
+                       VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow packed on node %d\n", node);
+                       return dev;
+               }
+               vq->shadow_used_packed = sup;
+       } else {
+               struct vring_used_elem *sus;
+
+               sus = rte_realloc_socket(vq->shadow_used_split, vq->size * sizeof(*sus),
+                               RTE_CACHE_LINE_SIZE, node);
+               if (!sus) {
+                       VHOST_LOG_CONFIG(ERR, "Failed to realloc shadow split on node %d\n", node);
                        return dev;
+               }
+               vq->shadow_used_split = sus;
+       }
 
-               memcpy(vq, old_vq, sizeof(*vq));
+       bce = rte_realloc_socket(vq->batch_copy_elems, vq->size * sizeof(*bce),
+                       RTE_CACHE_LINE_SIZE, node);
+       if (!bce) {
+               VHOST_LOG_CONFIG(ERR, "Failed to realloc batch copy elem on node %d\n", node);
+               return dev;
+       }
+       vq->batch_copy_elems = bce;
 
-               if (vq_is_packed(dev)) {
-                       new_shadow_used_packed = rte_malloc_socket(NULL,
-                                       vq->size *
-                                       sizeof(struct vring_used_elem_packed),
-                                       RTE_CACHE_LINE_SIZE,
-                                       newnode);
-                       if (new_shadow_used_packed) {
-                               rte_free(vq->shadow_used_packed);
-                               vq->shadow_used_packed = new_shadow_used_packed;
-                       }
-               } else {
-                       new_shadow_used_split = rte_malloc_socket(NULL,
-                                       vq->size *
-                                       sizeof(struct vring_used_elem),
-                                       RTE_CACHE_LINE_SIZE,
-                                       newnode);
-                       if (new_shadow_used_split) {
-                               rte_free(vq->shadow_used_split);
-                               vq->shadow_used_split = new_shadow_used_split;
-                       }
+       if (vq->log_cache) {
+               struct log_cache_entry *lc;
+
+               lc = rte_realloc_socket(vq->log_cache, sizeof(*lc) * VHOST_LOG_CACHE_NR, 0, node);
+               if (!lc) {
+                       VHOST_LOG_CONFIG(ERR, "Failed to realloc log cache on node %d\n", node);
+                       return dev;
                }
+               vq->log_cache = lc;
+       }
+
+       if (vq->resubmit_inflight) {
+               struct rte_vhost_resubmit_info *ri;
 
-               new_batch_copy_elems = rte_malloc_socket(NULL,
-                       vq->size * sizeof(struct batch_copy_elem),
-                       RTE_CACHE_LINE_SIZE,
-                       newnode);
-               if (new_batch_copy_elems) {
-                       rte_free(vq->batch_copy_elems);
-                       vq->batch_copy_elems = new_batch_copy_elems;
+               ri = rte_realloc_socket(vq->resubmit_inflight, sizeof(*ri), 0, node);
+               if (!ri) {
+                       VHOST_LOG_CONFIG(ERR, "Failed to realloc resubmit inflight on node %d\n",
+                                       node);
+                       return dev;
                }
+               vq->resubmit_inflight = ri;
+
+               if (ri->resubmit_list) {
+                       struct rte_vhost_resubmit_desc *rd;
 
-               rte_free(old_vq);
+                       rd = rte_realloc_socket(ri->resubmit_list, sizeof(*rd) * ri->resubmit_num,
+                                       0, node);
+                       if (!rd) {
+                               VHOST_LOG_CONFIG(ERR, "Failed to realloc resubmit list on node %d\n",
+                                               node);
+                               return dev;
+                       }
+                       ri->resubmit_list = rd;
+               }
        }
 
-       /* check if we need to reallocate dev */
-       ret = get_mempolicy(&oldnode, NULL, 0, old_dev,
-                           MPOL_F_NODE | MPOL_F_ADDR);
+       vq->numa_node = node;
+
+out_dev_realloc:
+
+       if (dev->flags & VIRTIO_DEV_RUNNING)
+               return dev;
+
+       ret = get_mempolicy(&dev_node, NULL, 0, dev, MPOL_F_NODE | MPOL_F_ADDR);
        if (ret) {
-               VHOST_LOG_CONFIG(ERR,
-                       "Unable to get dev numa information.\n");
-               goto out;
+               VHOST_LOG_CONFIG(ERR, "Unable to get Virtio dev %d numa information.\n", dev->vid);
+               return dev;
        }
-       if (oldnode != newnode) {
-               VHOST_LOG_CONFIG(INFO,
-                       "reallocate dev from %d to %d node\n",
-                       oldnode, newnode);
-               dev = rte_malloc_socket(NULL, sizeof(*dev), 0, newnode);
-               if (!dev) {
-                       dev = old_dev;
-                       goto out;
-               }
 
-               memcpy(dev, old_dev, sizeof(*dev));
-               rte_free(old_dev);
+       if (dev_node == node)
+               return dev;
+
+       dev = rte_realloc_socket(old_dev, sizeof(*dev), 0, node);
+       if (!dev) {
+               VHOST_LOG_CONFIG(ERR, "Failed to realloc dev on node %d\n", node);
+               return old_dev;
        }
 
-out:
-       dev->virtqueue[index] = vq;
+       VHOST_LOG_CONFIG(INFO, "reallocated device on node %d\n", node);
        vhost_devices[dev->vid] = dev;
 
-       if (old_vq != vq && (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
-               vhost_user_iotlb_init(dev, index);
+       mem_size = sizeof(struct rte_vhost_memory) +
+               sizeof(struct rte_vhost_mem_region) * dev->mem->nregions;
+       mem = rte_realloc_socket(dev->mem, mem_size, 0, node);
+       if (!mem) {
+               VHOST_LOG_CONFIG(ERR, "Failed to realloc mem table on node %d\n", node);
+               return dev;
+       }
+       dev->mem = mem;
+
+       gp = rte_realloc_socket(dev->guest_pages, dev->max_guest_pages * sizeof(*gp),
+                       RTE_CACHE_LINE_SIZE, node);
+       if (!gp) {
+               VHOST_LOG_CONFIG(ERR, "Failed to realloc guest pages on node %d\n", node);
+               return dev;
+       }
+       dev->guest_pages = gp;
 
        return dev;
 }
@@ -1109,6 +1215,7 @@ vhost_user_mmap_region(struct virtio_net *dev,
        uint64_t mmap_size;
        uint64_t alignment;
        int populate;
+       int ret;
 
        /* Check for memory_size + mmap_offset overflow */
        if (mmap_offset >= -region->size) {
@@ -1162,13 +1269,21 @@ vhost_user_mmap_region(struct virtio_net *dev,
        region->mmap_size = mmap_size;
        region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
 
-       if (dev->async_copy)
+       if (dev->async_copy) {
                if (add_guest_pages(dev, region, alignment) < 0) {
-                       VHOST_LOG_CONFIG(ERR,
-                                       "adding guest pages to region failed.\n");
+                       VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
                        return -1;
                }
 
+               if (rte_vfio_is_enabled("vfio")) {
+                       ret = async_dma_map(region, true);
+                       if (ret) {
+                               VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
+                               return -1;
+                       }
+               }
+       }
+
        VHOST_LOG_CONFIG(INFO,
                        "guest memory region size: 0x%" PRIx64 "\n"
                        "\t guest physical addr: 0x%" PRIx64 "\n"
@@ -1197,9 +1312,10 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
        struct virtio_net *dev = *pdev;
        struct VhostUserMemory *memory = &msg->payload.memory;
        struct rte_vhost_mem_region *reg;
-
+       int numa_node = SOCKET_ID_ANY;
        uint64_t mmap_offset;
        uint32_t i;
+       bool async_notify = false;
 
        if (validate_msg_fds(msg, memory->nregions) != 0)
                return RTE_VHOST_MSG_RESULT_ERR;
@@ -1227,6 +1343,16 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                                vdpa_dev->ops->dev_close(dev->vid);
                        dev->flags &= ~VIRTIO_DEV_VDPA_CONFIGURED;
                }
+
+               /* notify the vhost application to stop DMA transfers */
+               if (dev->async_copy && dev->notify_ops->vring_state_changed) {
+                       for (i = 0; i < dev->nr_vring; i++) {
+                               dev->notify_ops->vring_state_changed(dev->vid,
+                                               i, 0);
+                       }
+                       async_notify = true;
+               }
+
                free_mem_region(dev);
                rte_free(dev->mem);
                dev->mem = NULL;
@@ -1237,13 +1363,21 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                for (i = 0; i < dev->nr_vring; i++)
                        vhost_user_iotlb_flush_all(dev->virtqueue[i]);
 
+       /*
+        * If VQ 0 has already been allocated, try to allocate on the same
+        * NUMA node. It can be reallocated later in numa_realloc().
+        */
+       if (dev->nr_vring > 0)
+               numa_node = dev->virtqueue[0]->numa_node;
+
        dev->nr_guest_pages = 0;
        if (dev->guest_pages == NULL) {
                dev->max_guest_pages = 8;
-               dev->guest_pages = rte_zmalloc(NULL,
+               dev->guest_pages = rte_zmalloc_socket(NULL,
                                        dev->max_guest_pages *
                                        sizeof(struct guest_page),
-                                       RTE_CACHE_LINE_SIZE);
+                                       RTE_CACHE_LINE_SIZE,
+                                       numa_node);
                if (dev->guest_pages == NULL) {
                        VHOST_LOG_CONFIG(ERR,
                                "(%d) failed to allocate memory "
@@ -1253,8 +1387,8 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
                }
        }
 
-       dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
-               sizeof(struct rte_vhost_mem_region) * memory->nregions, 0);
+       dev->mem = rte_zmalloc_socket("vhost-mem-table", sizeof(struct rte_vhost_memory) +
+               sizeof(struct rte_vhost_mem_region) * memory->nregions, 0, numa_node);
        if (dev->mem == NULL) {
                VHOST_LOG_CONFIG(ERR,
                        "(%d) failed to allocate memory for dev->mem\n",
@@ -1315,12 +1449,18 @@ vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *msg,
 
        dump_guest_pages(dev);
 
+       if (async_notify) {
+               for (i = 0; i < dev->nr_vring; i++)
+                       dev->notify_ops->vring_state_changed(dev->vid, i, 1);
+       }
+
        return RTE_VHOST_MSG_RESULT_OK;
 
 free_mem_table:
        free_mem_region(dev);
        rte_free(dev->mem);
        dev->mem = NULL;
+
 free_guest_pages:
        rte_free(dev->guest_pages);
        dev->guest_pages = NULL;
@@ -1460,6 +1600,7 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev,
        uint16_t num_queues, queue_size;
        struct virtio_net *dev = *pdev;
        int fd, i, j;
+       int numa_node = SOCKET_ID_ANY;
        void *addr;
 
        if (msg->size != sizeof(msg->payload.inflight)) {
@@ -1469,9 +1610,16 @@ vhost_user_get_inflight_fd(struct virtio_net **pdev,
                return RTE_VHOST_MSG_RESULT_ERR;
        }
 
+       /*
+        * If VQ 0 has already been allocated, try to allocate on the same
+        * NUMA node. It can be reallocated later in numa_realloc().
+        */
+       if (dev->nr_vring > 0)
+               numa_node = dev->virtqueue[0]->numa_node;
+
        if (dev->inflight_info == NULL) {
-               dev->inflight_info = calloc(1,
-                                           sizeof(struct inflight_mem_info));
+               dev->inflight_info = rte_zmalloc_socket("inflight_info",
+                               sizeof(struct inflight_mem_info), 0, numa_node);
                if (!dev->inflight_info) {
                        VHOST_LOG_CONFIG(ERR,
                                "failed to alloc dev inflight area\n");
@@ -1554,6 +1702,7 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
        struct vhost_virtqueue *vq;
        void *addr;
        int fd, i;
+       int numa_node = SOCKET_ID_ANY;
 
        fd = msg->fds[0];
        if (msg->size != sizeof(msg->payload.inflight) || fd < 0) {
@@ -1587,9 +1736,16 @@ vhost_user_set_inflight_fd(struct virtio_net **pdev, VhostUserMsg *msg,
                "set_inflight_fd pervq_inflight_size: %d\n",
                pervq_inflight_size);
 
+       /*
+        * If VQ 0 has already been allocated, try to allocate on the same
+        * NUMA node. It can be reallocated later in numa_realloc().
+        */
+       if (dev->nr_vring > 0)
+               numa_node = dev->virtqueue[0]->numa_node;
+
        if (!dev->inflight_info) {
-               dev->inflight_info = calloc(1,
-                                           sizeof(struct inflight_mem_info));
+               dev->inflight_info = rte_zmalloc_socket("inflight_info",
+                               sizeof(struct inflight_mem_info), 0, numa_node);
                if (dev->inflight_info == NULL) {
                        VHOST_LOG_CONFIG(ERR,
                                "failed to alloc dev inflight area\n");
@@ -1748,19 +1904,21 @@ vhost_check_queue_inflights_split(struct virtio_net *dev,
        vq->last_avail_idx += resubmit_num;
 
        if (resubmit_num) {
-               resubmit  = calloc(1, sizeof(struct rte_vhost_resubmit_info));
+               resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
+                               0, vq->numa_node);
                if (!resubmit) {
                        VHOST_LOG_CONFIG(ERR,
                                "failed to allocate memory for resubmit info.\n");
                        return RTE_VHOST_MSG_RESULT_ERR;
                }
 
-               resubmit->resubmit_list = calloc(resubmit_num,
-                       sizeof(struct rte_vhost_resubmit_desc));
+               resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
+                               resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
+                               0, vq->numa_node);
                if (!resubmit->resubmit_list) {
                        VHOST_LOG_CONFIG(ERR,
                                "failed to allocate memory for inflight desc.\n");
-                       free(resubmit);
+                       rte_free(resubmit);
                        return RTE_VHOST_MSG_RESULT_ERR;
                }
 
@@ -1842,19 +2000,21 @@ vhost_check_queue_inflights_packed(struct virtio_net *dev,
        }
 
        if (resubmit_num) {
-               resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info));
+               resubmit = rte_zmalloc_socket("resubmit", sizeof(struct rte_vhost_resubmit_info),
+                               0, vq->numa_node);
                if (resubmit == NULL) {
                        VHOST_LOG_CONFIG(ERR,
                                "failed to allocate memory for resubmit info.\n");
                        return RTE_VHOST_MSG_RESULT_ERR;
                }
 
-               resubmit->resubmit_list = calloc(resubmit_num,
-                       sizeof(struct rte_vhost_resubmit_desc));
+               resubmit->resubmit_list = rte_zmalloc_socket("resubmit_list",
+                               resubmit_num * sizeof(struct rte_vhost_resubmit_desc),
+                               0, vq->numa_node);
                if (resubmit->resubmit_list == NULL) {
                        VHOST_LOG_CONFIG(ERR,
                                "failed to allocate memory for resubmit desc.\n");
-                       free(resubmit);
+                       rte_free(resubmit);
                        return RTE_VHOST_MSG_RESULT_ERR;
                }
 
@@ -1922,9 +2082,6 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg,
         */
        if (!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) {
                vq->enabled = true;
-               if (dev->notify_ops->vring_state_changed)
-                       dev->notify_ops->vring_state_changed(
-                               dev->vid, file.index, 1);
        }
 
        if (vq->ready) {
@@ -2013,13 +2170,6 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        } else {
                rte_free(vq->shadow_used_split);
                vq->shadow_used_split = NULL;
-
-               if (vq->async_pkts_info)
-                       rte_free(vq->async_pkts_info);
-               if (vq->async_descs_split)
-                       rte_free(vq->async_descs_split);
-               vq->async_pkts_info = NULL;
-               vq->async_descs_split = NULL;
        }
 
        rte_free(vq->batch_copy_elems);
@@ -2031,6 +2181,8 @@ vhost_user_get_vring_base(struct virtio_net **pdev,
        msg->size = sizeof(msg->payload.state);
        msg->fd_num = 0;
 
+       vhost_user_iotlb_flush_all(vq);
+
        vring_invalidate(dev, vq);
 
        return RTE_VHOST_MSG_RESULT_REPLY;
@@ -2056,8 +2208,8 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
                "set queue enable: %d to qp idx: %d\n",
                enable, index);
 
-       if (enable && dev->virtqueue[index]->async_registered) {
-               if (dev->virtqueue[index]->async_pkts_inflight_n) {
+       if (enable && dev->virtqueue[index]->async) {
+               if (dev->virtqueue[index]->async->pkts_inflight_n) {
                        VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
                        "async inflight packets must be completed first\n");
                        return RTE_VHOST_MSG_RESULT_ERR;
@@ -2187,9 +2339,9 @@ vhost_user_set_log_base(struct virtio_net **pdev, struct VhostUserMsg *msg,
                rte_free(vq->log_cache);
                vq->log_cache = NULL;
                vq->log_cache_nb_elem = 0;
-               vq->log_cache = rte_zmalloc("vq log cache",
+               vq->log_cache = rte_malloc_socket("vq log cache",
                                sizeof(struct log_cache_entry) * VHOST_LOG_CACHE_NR,
-                               0);
+                               0, vq->numa_node);
                /*
                 * If log cache alloc fail, don't fail migration, but no
                 * caching will be done, which will impact performance
@@ -2245,7 +2397,7 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg,
                return RTE_VHOST_MSG_RESULT_ERR;
 
        VHOST_LOG_CONFIG(DEBUG,
-               ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n",
+               ":: mac: " RTE_ETHER_ADDR_PRT_FMT "\n",
                mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
        memcpy(dev->mac.addr_bytes, mac, 6);
 
@@ -2714,6 +2866,7 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev,
                break;
        case VHOST_USER_SET_VRING_NUM:
        case VHOST_USER_SET_VRING_BASE:
+       case VHOST_USER_GET_VRING_BASE:
        case VHOST_USER_SET_VRING_ENABLE:
                vring_idx = msg->payload.state.index;
                break;
@@ -2929,9 +3082,6 @@ skip_to_post_handle:
                }
        }
 
-       if (unlock_required)
-               vhost_user_unlock_all_queue_pairs(dev);
-
        /* If message was not handled at this stage, treat it as an error */
        if (!handled) {
                VHOST_LOG_CONFIG(ERR,
@@ -2966,6 +3116,8 @@ skip_to_post_handle:
                }
        }
 
+       if (unlock_required)
+               vhost_user_unlock_all_queue_pairs(dev);
 
        if (!virtio_is_ready(dev))
                goto out;