X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Fvhost%2Fvhost_user.c;h=a781346c4d0c006c52c5d8c9ab24725850fef5ef;hb=ee8024b3d4ad375cc5e28d493449c5dcea335540;hp=433f412fa8a8d944af377e8c628b1b93db006c89;hpb=259caa21d70181d7bc986f9921c813abdac90793;p=dpdk.git diff --git a/lib/vhost/vhost_user.c b/lib/vhost/vhost_user.c index 433f412fa8..a781346c4d 100644 --- a/lib/vhost/vhost_user.c +++ b/lib/vhost/vhost_user.c @@ -45,6 +45,8 @@ #include #include #include +#include +#include #include "iotlb.h" #include "vhost.h" @@ -141,6 +143,59 @@ get_blk_size(int fd) return ret == -1 ? (uint64_t)-1 : (uint64_t)stat.st_blksize; } +static int +async_dma_map(struct rte_vhost_mem_region *region, bool do_map) +{ + uint64_t host_iova; + int ret = 0; + + host_iova = rte_mem_virt2iova((void *)(uintptr_t)region->host_user_addr); + if (do_map) { + /* Add mapped region into the default container of DPDK. */ + ret = rte_vfio_container_dma_map(RTE_VFIO_DEFAULT_CONTAINER_FD, + region->host_user_addr, + host_iova, + region->size); + if (ret) { + /* + * DMA device may bind with kernel driver, in this case, + * we don't need to program IOMMU manually. However, if no + * device is bound with vfio/uio in DPDK, and vfio kernel + * module is loaded, the API will still be called and return + * with ENODEV/ENOSUP. + * + * DPDK vfio only returns ENODEV/ENOSUP in very similar + * situations(vfio either unsupported, or supported + * but no devices found). Either way, no mappings could be + * performed. We treat it as normal case in async path. + */ + if (rte_errno == ENODEV || rte_errno == ENOTSUP) + return 0; + + VHOST_LOG_CONFIG(ERR, "DMA engine map failed\n"); + /* DMA mapping errors won't stop VHST_USER_SET_MEM_TABLE. */ + return 0; + } + + } else { + /* Remove mapped region from the default container of DPDK. */ + ret = rte_vfio_container_dma_unmap(RTE_VFIO_DEFAULT_CONTAINER_FD, + region->host_user_addr, + host_iova, + region->size); + if (ret) { + /* like DMA map, ignore the kernel driver case when unmap. */ + if (rte_errno == EINVAL) + return 0; + + VHOST_LOG_CONFIG(ERR, "DMA engine unmap failed\n"); + return ret; + } + } + + return ret; +} + static void free_mem_region(struct virtio_net *dev) { @@ -153,6 +208,9 @@ free_mem_region(struct virtio_net *dev) for (i = 0; i < dev->mem->nregions; i++) { reg = &dev->mem->regions[i]; if (reg->host_user_addr) { + if (dev->async_copy && rte_vfio_is_enabled("vfio")) + async_dma_map(reg, false); + munmap(reg->mmap_addr, reg->mmap_size); close(reg->fd); } @@ -1157,6 +1215,7 @@ vhost_user_mmap_region(struct virtio_net *dev, uint64_t mmap_size; uint64_t alignment; int populate; + int ret; /* Check for memory_size + mmap_offset overflow */ if (mmap_offset >= -region->size) { @@ -1210,13 +1269,21 @@ vhost_user_mmap_region(struct virtio_net *dev, region->mmap_size = mmap_size; region->host_user_addr = (uint64_t)(uintptr_t)mmap_addr + mmap_offset; - if (dev->async_copy) + if (dev->async_copy) { if (add_guest_pages(dev, region, alignment) < 0) { - VHOST_LOG_CONFIG(ERR, - "adding guest pages to region failed.\n"); + VHOST_LOG_CONFIG(ERR, "adding guest pages to region failed.\n"); return -1; } + if (rte_vfio_is_enabled("vfio")) { + ret = async_dma_map(region, true); + if (ret) { + VHOST_LOG_CONFIG(ERR, "Configure IOMMU for DMA engine failed\n"); + return -1; + } + } + } + VHOST_LOG_CONFIG(INFO, "guest memory region size: 0x%" PRIx64 "\n" "\t guest physical addr: 0x%" PRIx64 "\n" @@ -1393,6 +1460,7 @@ free_mem_table: free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; + free_guest_pages: rte_free(dev->guest_pages); dev->guest_pages = NULL; @@ -2113,6 +2181,8 @@ vhost_user_get_vring_base(struct virtio_net **pdev, msg->size = sizeof(msg->payload.state); msg->fd_num = 0; + vhost_user_iotlb_flush_all(vq); + vring_invalidate(dev, vq); return RTE_VHOST_MSG_RESULT_REPLY; @@ -2138,8 +2208,8 @@ vhost_user_set_vring_enable(struct virtio_net **pdev, "set queue enable: %d to qp idx: %d\n", enable, index); - if (enable && dev->virtqueue[index]->async_registered) { - if (dev->virtqueue[index]->async_pkts_inflight_n) { + if (enable && dev->virtqueue[index]->async) { + if (dev->virtqueue[index]->async->pkts_inflight_n) { VHOST_LOG_CONFIG(ERR, "failed to enable vring. " "async inflight packets must be completed first\n"); return RTE_VHOST_MSG_RESULT_ERR; @@ -2327,7 +2397,7 @@ vhost_user_send_rarp(struct virtio_net **pdev, struct VhostUserMsg *msg, return RTE_VHOST_MSG_RESULT_ERR; VHOST_LOG_CONFIG(DEBUG, - ":: mac: %02x:%02x:%02x:%02x:%02x:%02x\n", + ":: mac: " RTE_ETHER_ADDR_PRT_FMT "\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); memcpy(dev->mac.addr_bytes, mac, 6); @@ -2796,6 +2866,7 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, break; case VHOST_USER_SET_VRING_NUM: case VHOST_USER_SET_VRING_BASE: + case VHOST_USER_GET_VRING_BASE: case VHOST_USER_SET_VRING_ENABLE: vring_idx = msg->payload.state.index; break;