vhost: add log when setting vring base
[dpdk.git] / lib / vhost / vhost_user.c
index 5a894ca..f99692b 100644 (file)
@@ -45,6 +45,8 @@
 #include <rte_common.h>
 #include <rte_malloc.h>
 #include <rte_log.h>
+#include <rte_vfio.h>
+#include <rte_errno.h>
 
 #include "iotlb.h"
 #include "vhost.h"
@@ -141,6 +143,59 @@ get_blk_size(int fd)
        return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize;
 }
 
+static int
+async_dma_map(struct rte_vhost_mem_region *region, bool do_map)
+{
+       uint64_t host_iova;
+       int ret = 0;
+
+       host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr);
+       if (do_map) {
+               /* Add mapped region into the default container of DPDK. */
+               ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD,
+                                                region->host_user_addr,
+                                                host_iova,
+                                                region->size);
+               if (ret) {
+                       /*
+                        * DMA device may bind with kernel driver, in this case,
+                        * we don't need to program IOMMU manually. However, if no
+                        * device is bound with vfio/uio in DPDK, and vfio kernel
+                        * module is loaded, the API will still be called and return
+                        * with ENODEV/ENOSUP.
+                        *
+                        * DPDK vfio only returns ENODEV/ENOSUP in very similar
+                        * situations(vfio either unsupported, or supported
+                        * but no devices found). Either way, no mappings could be
+                        * performed. We treat it as normal case in async path.
+                        */
+                       if (rte_errno == ENODEV || rte_errno == ENOTSUP)
+                               return 0;
+
+                       VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n");
+                       /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */
+                       return 0;
+               }
+
+       } else {
+               /* Remove mapped region from the default container of DPDK. */
+               ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD,
+                                                  region->host_user_addr,
+                                                  host_iova,
+                                                  region->size);
+               if (ret) {
+                       /* like DMA map, ignore the kernel driver case when unmap. */
+                       if (rte_errno == EINVAL)
+                               return 0;
+
+                       VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n");
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+
 static void
 free_mem_region(struct virtio_net *dev)
 {
@@ -153,6 +208,9 @@ free_mem_region(struct virtio_net *dev)
        for (i = 0; i < dev->mem->nregions; i++) {
                reg = &dev->mem->regions[i];
                if (reg->host_user_addr) {
+                       if (dev->async_copy && rte_vfio_is_enabled("vfio"))
+                               async_dma_map(reg, false);
+
                        munmap(reg->mmap_addr, reg->mmap_size);
                        close(reg->fd);
                }
@@ -915,6 +973,11 @@ vhost_user_set_vring_base(struct virtio_net **pdev,
                vq->last_avail_idx = msg->payload.state.num;
        }
 
+       VHOST_LOG_CONFIG(INFO,
+               "(%s) vring base idx:%u last_used_idx:%u last_avail_idx:%u.\n",
+               dev->ifname, msg->payload.state.index, vq->last_used_idx,
+               vq->last_avail_idx);
+
        return RTE_VHOST_MSG_RESULT_OK;
 }
 
@@ -1057,7 +1120,7 @@ vhost_user_postcopy_region_register(struct virtio_net *dev,
        struct uffdio_register reg_struct;
 
        /*
-        * Let's register all the mmap'ed area to ensure
+        * Let's register all the mmapped area to ensure
         * alignment on page boundary.
         */
        reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;
@@ -1119,7 +1182,7 @@ vhost_user_postcopy_register(struct virtio_net *dev, int main_fd,
        msg->fd_num = 0;
        send_vhost_reply(main_fd, msg);
 
-       /* Wait for qemu to acknolwedge it's got the addresses
+       /* Wait for qemu to acknowledge it got the addresses
         * we've got to wait before we're allowed to generate faults.
         */
        if (read_vhost_message(main_fd, &ack_msg) <= 0) {
@@ -1157,6 +1220,7 @@ vhost_user_mmap_region(struct virtio_net *dev,
        uint64_t mmap_size;
        uint64_t alignment;
        int populate;
+       int ret;
 
        /* Check for memory_size + mmap_offset overflow */
        if (mmap_offset >= -region->size) {
@@ -1210,13 +1274,21 @@ vhost_user_mmap_region(struct virtio_net *dev,
        region->mmap_size = mmap_size;
        region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset;
 
-       if (dev->async_copy)
+       if (dev->async_copy) {
                if (add_guest_pages(dev, region, alignment) < 0) {
-                       VHOST_LOG_CONFIG(ERR,
-                                       "adding guest pages to region failed.\n");
+                       VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n");
                        return -1;
                }
 
+               if (rte_vfio_is_enabled("vfio")) {
+                       ret = async_dma_map(region, true);
+                       if (ret) {
+                               VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n");
+                               return -1;
+                       }
+               }
+       }
+
        VHOST_LOG_CONFIG(INFO,
                        "guest memory region size: 0x%" PRIx64 "\n"
                        "\t guest physical addr: 0x%" PRIx64 "\n"
@@ -1393,6 +1465,7 @@ free_mem_table:
        free_mem_region(dev);
        rte_free(dev->mem);
        dev->mem = NULL;
+
 free_guest_pages:
        rte_free(dev->guest_pages);
        dev->guest_pages = NULL;
@@ -2140,8 +2213,8 @@ vhost_user_set_vring_enable(struct virtio_net **pdev,
                "set queue enable: %d to qp idx: %d\n",
                enable, index);
 
-       if (enable && dev->virtqueue[index]->async_registered) {
-               if (dev->virtqueue[index]->async_pkts_inflight_n) {
+       if (enable && dev->virtqueue[index]->async) {
+               if (dev->virtqueue[index]->async->pkts_inflight_n) {
                        VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
                        "async inflight packets must be completed first\n");
                        return RTE_VHOST_MSG_RESULT_ERR;