From: Maxime Coquelin Date: Fri, 8 Jan 2021 09:41:49 +0000 (+0100) Subject: net/virtio: fix memory init with vDPA backend X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=a121f175722f7b717cb7576c5baff1c20dd3920e;p=dpdk.git net/virtio: fix memory init with vDPA backend This patch fixes an overhead met with mlx5-vdpa Kernel driver, where for every page in the mapped area, all the memory tables gets updated. For example, with 2MB hugepages, a single IOTLB_UPDATE for a 1GB region causes 512 memory updates on mlx5-vdpa side. Using batching mode, the mlx5 driver will only trigger a single memory update for all the IOTLB updates that happen between the batch begin and batch end commands. Fixes: 6b901437056e ("net/virtio: introduce vhost-vDPA backend") Cc: stable@dpdk.org Signed-off-by: Maxime Coquelin Reviewed-by: Chenbo Xia --- diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h index c1dcc50b58..be286173b0 100644 --- a/drivers/net/virtio/virtio_user/vhost.h +++ b/drivers/net/virtio/virtio_user/vhost.h @@ -90,6 +90,10 @@ enum vhost_user_request { #define VHOST_BACKEND_F_IOTLB_MSG_V2 1 #endif +#ifndef VHOST_BACKEND_F_IOTLB_BATCH +#define VHOST_BACKEND_F_IOTLB_BATCH 2 +#endif + extern const char * const vhost_msg_strings[VHOST_USER_MAX]; struct vhost_memory_region { diff --git a/drivers/net/virtio/virtio_user/vhost_vdpa.c b/drivers/net/virtio/virtio_user/vhost_vdpa.c index 83c60ea660..004802b9eb 100644 --- a/drivers/net/virtio/virtio_user/vhost_vdpa.c +++ b/drivers/net/virtio/virtio_user/vhost_vdpa.c @@ -70,6 +70,8 @@ struct vhost_iotlb_msg { #define VHOST_IOTLB_UPDATE 2 #define VHOST_IOTLB_INVALIDATE 3 #define VHOST_IOTLB_ACCESS_FAIL 4 +#define VHOST_IOTLB_BATCH_BEGIN 5 +#define VHOST_IOTLB_BATCH_END 6 uint8_t type; }; @@ -84,6 +86,56 @@ struct vhost_msg { }; }; +static int +vhost_vdpa_iotlb_batch_begin(struct virtio_user_dev *dev) +{ + struct vhost_msg msg = {}; + + if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) + return 0; + + if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) { + PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend."); + return -1; + } + + msg.type = VHOST_IOTLB_MSG_V2; + msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN; + + if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) { + PMD_DRV_LOG(ERR, "Failed to send IOTLB batch begin (%s)", + strerror(errno)); + return -1; + } + + return 0; +} + +static int +vhost_vdpa_iotlb_batch_end(struct virtio_user_dev *dev) +{ + struct vhost_msg msg = {}; + + if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) + return 0; + + if (!(dev->protocol_features & (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2))) { + PMD_DRV_LOG(ERR, "IOTLB_MSG_V2 not supported by the backend."); + return -1; + } + + msg.type = VHOST_IOTLB_MSG_V2; + msg.iotlb.type = VHOST_IOTLB_BATCH_END; + + if (write(dev->vhostfd, &msg, sizeof(msg)) != sizeof(msg)) { + PMD_DRV_LOG(ERR, "Failed to send IOTLB batch end (%s)", + strerror(errno)); + return -1; + } + + return 0; +} + static int vhost_vdpa_dma_map(struct virtio_user_dev *dev, void *addr, uint64_t iova, size_t len) @@ -142,6 +194,39 @@ vhost_vdpa_dma_unmap(struct virtio_user_dev *dev, __rte_unused void *addr, return 0; } +static int +vhost_vdpa_dma_map_batch(struct virtio_user_dev *dev, void *addr, + uint64_t iova, size_t len) +{ + int ret; + + if (vhost_vdpa_iotlb_batch_begin(dev) < 0) + return -1; + + ret = vhost_vdpa_dma_map(dev, addr, iova, len); + + if (vhost_vdpa_iotlb_batch_end(dev) < 0) + return -1; + + return ret; +} + +static int +vhost_vdpa_dma_unmap_batch(struct virtio_user_dev *dev, void *addr, + uint64_t iova, size_t len) +{ + int ret; + + if (vhost_vdpa_iotlb_batch_begin(dev) < 0) + return -1; + + ret = vhost_vdpa_dma_unmap(dev, addr, iova, len); + + if (vhost_vdpa_iotlb_batch_end(dev) < 0) + return -1; + + return ret; +} static int vhost_vdpa_map_contig(const struct rte_memseg_list *msl, @@ -179,21 +264,32 @@ vhost_vdpa_map(const struct rte_memseg_list *msl, const struct rte_memseg *ms, static int vhost_vdpa_dma_map_all(struct virtio_user_dev *dev) { + int ret; + + if (vhost_vdpa_iotlb_batch_begin(dev) < 0) + return -1; + vhost_vdpa_dma_unmap(dev, NULL, 0, SIZE_MAX); if (rte_eal_iova_mode() == RTE_IOVA_VA) { /* with IOVA as VA mode, we can get away with mapping contiguous * chunks rather than going page-by-page. */ - int ret = rte_memseg_contig_walk_thread_unsafe( + ret = rte_memseg_contig_walk_thread_unsafe( vhost_vdpa_map_contig, dev); if (ret) - return ret; + goto batch_end; /* we have to continue the walk because we've skipped the * external segments during the config walk. */ } - return rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev); + ret = rte_memseg_walk_thread_unsafe(vhost_vdpa_map, dev); + +batch_end: + if (vhost_vdpa_iotlb_batch_end(dev) < 0) + return -1; + + return ret; } /* with below features, vhost vdpa does not need to do the checksum and TSO, @@ -313,6 +409,6 @@ struct virtio_user_backend_ops virtio_ops_vdpa = { .setup = vhost_vdpa_setup, .send_request = vhost_vdpa_ioctl, .enable_qp = vhost_vdpa_enable_queue_pair, - .dma_map = vhost_vdpa_dma_map, - .dma_unmap = vhost_vdpa_dma_unmap, + .dma_map = vhost_vdpa_dma_map_batch, + .dma_unmap = vhost_vdpa_dma_unmap_batch, }; diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c index 39c5dfc9e4..202431ca22 100644 --- a/drivers/net/virtio/virtio_user/virtio_user_dev.c +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -446,7 +446,8 @@ virtio_user_dev_setup(struct virtio_user_dev *dev) 1ULL << VHOST_USER_PROTOCOL_F_STATUS) #define VHOST_VDPA_SUPPORTED_PROTOCOL_FEATURES \ - (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | \ + 1ULL << VHOST_BACKEND_F_IOTLB_BATCH) int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, int cq, int queue_size, const char *mac, char **ifname,