X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcommon%2Fmlx5%2Fmlx5_common.c;h=e6ff045c9538301e74a3e19f7804590def950c5e;hb=febcac7b46cdff5e3e12dd049ea191963cabbff7;hp=17a54acf1e0ff01d61c3f101ff49a55393e96ec5;hpb=fe46b20c96593ff9644097978b347286c6a4b71a;p=dpdk.git diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c index 17a54acf1e..e6ff045c95 100644 --- a/drivers/common/mlx5/mlx5_common.c +++ b/drivers/common/mlx5/mlx5_common.c @@ -13,6 +13,7 @@ #include "mlx5_common.h" #include "mlx5_common_os.h" +#include "mlx5_common_mp.h" #include "mlx5_common_log.h" #include "mlx5_common_defs.h" #include "mlx5_common_private.h" @@ -258,12 +259,6 @@ is_valid_class_combination(uint32_t user_classes) return 0; } -static bool -device_class_enabled(const struct mlx5_common_device *device, uint32_t class) -{ - return (device->classes_loaded & class) > 0; -} - static bool mlx5_bus_match(const struct mlx5_class_driver *drv, const struct rte_device *dev) @@ -308,6 +303,187 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size) #endif } +/** + * Register the mempool for the protection domain. + * + * @param cdev + * Pointer to the mlx5 common device. + * @param mp + * Mempool being registered. + * + * @return + * 0 on success, (-1) on failure and rte_errno is set. + */ +static int +mlx5_dev_mempool_register(struct mlx5_common_device *cdev, + struct rte_mempool *mp) +{ + struct mlx5_mp_id mp_id; + + mlx5_mp_id_init(&mp_id, 0); + return mlx5_mr_mempool_register(&cdev->mr_scache, cdev->pd, mp, &mp_id); +} + +/** + * Unregister the mempool from the protection domain. + * + * @param cdev + * Pointer to the mlx5 common device. + * @param mp + * Mempool being unregistered. + */ +void +mlx5_dev_mempool_unregister(struct mlx5_common_device *cdev, + struct rte_mempool *mp) +{ + struct mlx5_mp_id mp_id; + + mlx5_mp_id_init(&mp_id, 0); + if (mlx5_mr_mempool_unregister(&cdev->mr_scache, mp, &mp_id) < 0) + DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s", + mp->name, cdev->pd, rte_strerror(rte_errno)); +} + +/** + * rte_mempool_walk() callback to register mempools for the protection domain. + * + * @param mp + * The mempool being walked. + * @param arg + * Pointer to the device shared context. + */ +static void +mlx5_dev_mempool_register_cb(struct rte_mempool *mp, void *arg) +{ + struct mlx5_common_device *cdev = arg; + int ret; + + ret = mlx5_dev_mempool_register(cdev, mp); + if (ret < 0 && rte_errno != EEXIST) + DRV_LOG(ERR, + "Failed to register existing mempool %s for PD %p: %s", + mp->name, cdev->pd, rte_strerror(rte_errno)); +} + +/** + * rte_mempool_walk() callback to unregister mempools + * from the protection domain. + * + * @param mp + * The mempool being walked. + * @param arg + * Pointer to the device shared context. + */ +static void +mlx5_dev_mempool_unregister_cb(struct rte_mempool *mp, void *arg) +{ + mlx5_dev_mempool_unregister((struct mlx5_common_device *)arg, mp); +} + +/** + * Mempool life cycle callback for mlx5 common devices. + * + * @param event + * Mempool life cycle event. + * @param mp + * Associated mempool. + * @param arg + * Pointer to a device shared context. + */ +static void +mlx5_dev_mempool_event_cb(enum rte_mempool_event event, struct rte_mempool *mp, + void *arg) +{ + struct mlx5_common_device *cdev = arg; + + switch (event) { + case RTE_MEMPOOL_EVENT_READY: + if (mlx5_dev_mempool_register(cdev, mp) < 0) + DRV_LOG(ERR, + "Failed to register new mempool %s for PD %p: %s", + mp->name, cdev->pd, rte_strerror(rte_errno)); + break; + case RTE_MEMPOOL_EVENT_DESTROY: + mlx5_dev_mempool_unregister(cdev, mp); + break; + } +} + +int +mlx5_dev_mempool_subscribe(struct mlx5_common_device *cdev) +{ + int ret = 0; + + if (!cdev->config.mr_mempool_reg_en) + return 0; + rte_rwlock_write_lock(&cdev->mr_scache.mprwlock); + if (cdev->mr_scache.mp_cb_registered) + goto exit; + /* Callback for this device may be already registered. */ + ret = rte_mempool_event_callback_register(mlx5_dev_mempool_event_cb, + cdev); + if (ret != 0 && rte_errno != EEXIST) + goto exit; + /* Register mempools only once for this device. */ + if (ret == 0) + rte_mempool_walk(mlx5_dev_mempool_register_cb, cdev); + ret = 0; + cdev->mr_scache.mp_cb_registered = 1; +exit: + rte_rwlock_write_unlock(&cdev->mr_scache.mprwlock); + return ret; +} + +static void +mlx5_dev_mempool_unsubscribe(struct mlx5_common_device *cdev) +{ + int ret; + + if (!cdev->mr_scache.mp_cb_registered || + !cdev->config.mr_mempool_reg_en) + return; + /* Stop watching for mempool events and unregister all mempools. */ + ret = rte_mempool_event_callback_unregister(mlx5_dev_mempool_event_cb, + cdev); + if (ret == 0) + rte_mempool_walk(mlx5_dev_mempool_unregister_cb, cdev); +} + +/** + * Callback for memory event. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. + */ +static void +mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) +{ + struct mlx5_common_device *cdev; + + /* Must be called from the primary process. */ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + switch (event_type) { + case RTE_MEM_EVENT_FREE: + pthread_mutex_lock(&devices_list_lock); + /* Iterate all the existing mlx5 devices. */ + TAILQ_FOREACH(cdev, &devices_list, next) + mlx5_free_mr_by_addr(&cdev->mr_scache, + mlx5_os_get_ctx_device_name + (cdev->ctx), + addr, len); + pthread_mutex_unlock(&devices_list_lock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } +} + /** * Uninitialize all HW global of device context. * @@ -376,8 +552,14 @@ mlx5_common_dev_release(struct mlx5_common_device *cdev) pthread_mutex_lock(&devices_list_lock); TAILQ_REMOVE(&devices_list, cdev, next); pthread_mutex_unlock(&devices_list_lock); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (TAILQ_EMPTY(&devices_list)) + rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", + NULL); + mlx5_dev_mempool_unsubscribe(cdev); + mlx5_mr_release_cache(&cdev->mr_scache); mlx5_dev_hw_global_release(cdev); + } rte_free(cdev); } @@ -412,6 +594,18 @@ mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes) rte_free(cdev); return NULL; } + /* Initialize global MR cache resources and update its functions. */ + ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node); + if (ret) { + DRV_LOG(ERR, "Failed to initialize global MR share cache."); + mlx5_dev_hw_global_release(cdev); + rte_free(cdev); + return NULL; + } + /* Register callback function for global shared MR cache management. */ + if (TAILQ_EMPTY(&devices_list)) + rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", + mlx5_mr_mem_event_cb, NULL); exit: pthread_mutex_lock(&devices_list_lock); TAILQ_INSERT_HEAD(&devices_list, cdev, next); @@ -545,62 +739,106 @@ mlx5_common_dev_remove(struct rte_device *eal_dev) return ret; } +/** + * Callback to DMA map external memory to a device. + * + * @param rte_dev + * Pointer to the generic device. + * @param addr + * Starting virtual address of memory to be mapped. + * @param iova + * Starting IOVA address of memory to be mapped. + * @param len + * Length of memory segment being mapped. + * + * @return + * 0 on success, negative value on error. + */ int -mlx5_common_dev_dma_map(struct rte_device *dev, void *addr, uint64_t iova, - size_t len) +mlx5_common_dev_dma_map(struct rte_device *rte_dev, void *addr, + uint64_t iova __rte_unused, size_t len) { - struct mlx5_class_driver *driver = NULL; - struct mlx5_class_driver *temp; - struct mlx5_common_device *mdev; - int ret = -EINVAL; + struct mlx5_common_device *dev; + struct mlx5_mr *mr; - mdev = to_mlx5_device(dev); - if (!mdev) - return -ENODEV; - TAILQ_FOREACH(driver, &drivers_list, next) { - if (!device_class_enabled(mdev, driver->drv_class) || - driver->dma_map == NULL) - continue; - ret = driver->dma_map(dev, addr, iova, len); - if (ret) - goto map_err; + dev = to_mlx5_device(rte_dev); + if (!dev) { + DRV_LOG(WARNING, + "Unable to find matching mlx5 device to device %s", + rte_dev->name); + rte_errno = ENODEV; + return -1; } - return ret; -map_err: - TAILQ_FOREACH(temp, &drivers_list, next) { - if (temp == driver) - break; - if (device_class_enabled(mdev, temp->drv_class) && - temp->dma_map && temp->dma_unmap) - temp->dma_unmap(dev, addr, iova, len); + mr = mlx5_create_mr_ext(dev->pd, (uintptr_t)addr, len, + SOCKET_ID_ANY, dev->mr_scache.reg_mr_cb); + if (!mr) { + DRV_LOG(WARNING, "Device %s unable to DMA map", rte_dev->name); + rte_errno = EINVAL; + return -1; } - return ret; + rte_rwlock_write_lock(&dev->mr_scache.rwlock); + LIST_INSERT_HEAD(&dev->mr_scache.mr_list, mr, mr); + /* Insert to the global cache table. */ + mlx5_mr_insert_cache(&dev->mr_scache, mr); + rte_rwlock_write_unlock(&dev->mr_scache.rwlock); + return 0; } +/** + * Callback to DMA unmap external memory to a device. + * + * @param rte_dev + * Pointer to the generic device. + * @param addr + * Starting virtual address of memory to be unmapped. + * @param iova + * Starting IOVA address of memory to be unmapped. + * @param len + * Length of memory segment being unmapped. + * + * @return + * 0 on success, negative value on error. + */ int -mlx5_common_dev_dma_unmap(struct rte_device *dev, void *addr, uint64_t iova, - size_t len) +mlx5_common_dev_dma_unmap(struct rte_device *rte_dev, void *addr, + uint64_t iova __rte_unused, size_t len __rte_unused) { - struct mlx5_class_driver *driver; - struct mlx5_common_device *mdev; - int local_ret = -EINVAL; - int ret = 0; + struct mlx5_common_device *dev; + struct mr_cache_entry entry; + struct mlx5_mr *mr; - mdev = to_mlx5_device(dev); - if (!mdev) - return -ENODEV; - /* There is no unmap error recovery in current implementation. */ - TAILQ_FOREACH_REVERSE(driver, &drivers_list, mlx5_drivers, next) { - if (!device_class_enabled(mdev, driver->drv_class) || - driver->dma_unmap == NULL) - continue; - local_ret = driver->dma_unmap(dev, addr, iova, len); - if (local_ret && (ret == 0)) - ret = local_ret; + dev = to_mlx5_device(rte_dev); + if (!dev) { + DRV_LOG(WARNING, + "Unable to find matching mlx5 device to device %s.", + rte_dev->name); + rte_errno = ENODEV; + return -1; } - if (local_ret) - ret = local_ret; - return ret; + rte_rwlock_read_lock(&dev->mr_scache.rwlock); + mr = mlx5_mr_lookup_list(&dev->mr_scache, &entry, (uintptr_t)addr); + if (!mr) { + rte_rwlock_read_unlock(&dev->mr_scache.rwlock); + DRV_LOG(WARNING, + "Address 0x%" PRIxPTR " wasn't registered to device %s", + (uintptr_t)addr, rte_dev->name); + rte_errno = EINVAL; + return -1; + } + LIST_REMOVE(mr, mr); + DRV_LOG(DEBUG, "MR(%p) is removed from list.", (void *)mr); + mlx5_mr_free(mr, dev->mr_scache.dereg_mr_cb); + mlx5_mr_rebuild_cache(&dev->mr_scache); + /* + * No explicit wmb is needed after updating dev_gen due to + * store-release ordering in unlock that provides the + * implicit barrier at the software visible level. + */ + ++dev->mr_scache.dev_gen; + DRV_LOG(DEBUG, "Broadcasting local cache flush, gen=%d.", + dev->mr_scache.dev_gen); + rte_rwlock_read_unlock(&dev->mr_scache.rwlock); + return 0; } void