From 9f1d636f3ef08fd69b40cfe6eb954b0c9d1820d2 Mon Sep 17 00:00:00 2001 From: Michael Baum Date: Tue, 19 Oct 2021 23:56:00 +0300 Subject: [PATCH] common/mlx5: share MR management Add global shared MR cache as a field of common device structure. Move MR management to use this global cache for all drivers. Signed-off-by: Michael Baum Acked-by: Matan Azrad --- drivers/common/mlx5/mlx5_common.c | 54 ++++++++++++++++- drivers/common/mlx5/mlx5_common.h | 4 +- drivers/common/mlx5/mlx5_common_mr.c | 7 +-- drivers/common/mlx5/mlx5_common_mr.h | 4 -- drivers/common/mlx5/version.map | 4 -- drivers/compress/mlx5/mlx5_compress.c | 57 +----------------- drivers/crypto/mlx5/mlx5_crypto.c | 56 +---------------- drivers/crypto/mlx5/mlx5_crypto.h | 1 - drivers/net/mlx5/linux/mlx5_mp_os.c | 2 +- drivers/net/mlx5/linux/mlx5_os.c | 5 -- drivers/net/mlx5/mlx5.c | 36 ++--------- drivers/net/mlx5/mlx5.h | 3 - drivers/net/mlx5/mlx5_flow_aso.c | 28 ++++----- drivers/net/mlx5/mlx5_mr.c | 76 +++++++----------------- drivers/net/mlx5/mlx5_mr.h | 26 -------- drivers/net/mlx5/mlx5_rx.c | 1 - drivers/net/mlx5/mlx5_rx.h | 6 +- drivers/net/mlx5/mlx5_rxq.c | 4 +- drivers/net/mlx5/mlx5_rxtx.c | 1 - drivers/net/mlx5/mlx5_rxtx.h | 1 - drivers/net/mlx5/mlx5_rxtx_vec.h | 1 - drivers/net/mlx5/mlx5_trigger.c | 3 +- drivers/net/mlx5/mlx5_tx.c | 1 - drivers/net/mlx5/mlx5_tx.h | 1 - drivers/net/mlx5/mlx5_txq.c | 2 +- drivers/net/mlx5/windows/mlx5_os.c | 14 ----- drivers/regex/mlx5/mlx5_regex.c | 63 -------------------- drivers/regex/mlx5/mlx5_regex.h | 3 - drivers/regex/mlx5/mlx5_regex_control.c | 2 +- drivers/regex/mlx5/mlx5_regex_fastpath.c | 2 +- 30 files changed, 110 insertions(+), 358 deletions(-) delete mode 100644 drivers/net/mlx5/mlx5_mr.h diff --git a/drivers/common/mlx5/mlx5_common.c b/drivers/common/mlx5/mlx5_common.c index 17a54acf1e..d6acf87493 100644 --- a/drivers/common/mlx5/mlx5_common.c +++ b/drivers/common/mlx5/mlx5_common.c @@ -308,6 +308,41 @@ mlx5_dev_to_pci_str(const struct rte_device *dev, char *addr, size_t size) #endif } +/** + * Callback for memory event. + * + * @param event_type + * Memory event type. + * @param addr + * Address of memory. + * @param len + * Size of memory. + */ +static void +mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, + size_t len, void *arg __rte_unused) +{ + struct mlx5_common_device *cdev; + + /* Must be called from the primary process. */ + MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); + switch (event_type) { + case RTE_MEM_EVENT_FREE: + pthread_mutex_lock(&devices_list_lock); + /* Iterate all the existing mlx5 devices. */ + TAILQ_FOREACH(cdev, &devices_list, next) + mlx5_free_mr_by_addr(&cdev->mr_scache, + mlx5_os_get_ctx_device_name + (cdev->ctx), + addr, len); + pthread_mutex_unlock(&devices_list_lock); + break; + case RTE_MEM_EVENT_ALLOC: + default: + break; + } +} + /** * Uninitialize all HW global of device context. * @@ -376,8 +411,13 @@ mlx5_common_dev_release(struct mlx5_common_device *cdev) pthread_mutex_lock(&devices_list_lock); TAILQ_REMOVE(&devices_list, cdev, next); pthread_mutex_unlock(&devices_list_lock); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + if (TAILQ_EMPTY(&devices_list)) + rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", + NULL); + mlx5_mr_release_cache(&cdev->mr_scache); mlx5_dev_hw_global_release(cdev); + } rte_free(cdev); } @@ -412,6 +452,18 @@ mlx5_common_dev_create(struct rte_device *eal_dev, uint32_t classes) rte_free(cdev); return NULL; } + /* Initialize global MR cache resources and update its functions. */ + ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node); + if (ret) { + DRV_LOG(ERR, "Failed to initialize global MR share cache."); + mlx5_dev_hw_global_release(cdev); + rte_free(cdev); + return NULL; + } + /* Register callback function for global shared MR cache management. */ + if (TAILQ_EMPTY(&devices_list)) + rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", + mlx5_mr_mem_event_cb, NULL); exit: pthread_mutex_lock(&devices_list_lock); TAILQ_INSERT_HEAD(&devices_list, cdev, next); diff --git a/drivers/common/mlx5/mlx5_common.h b/drivers/common/mlx5/mlx5_common.h index 8df4f32aa2..1a6b8c0f52 100644 --- a/drivers/common/mlx5/mlx5_common.h +++ b/drivers/common/mlx5/mlx5_common.h @@ -350,6 +350,7 @@ struct mlx5_common_device { void *ctx; /* Verbs/DV/DevX context. */ void *pd; /* Protection Domain. */ uint32_t pdn; /* Protection Domain Number. */ + struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */ struct mlx5_common_dev_config config; /* Device configuration. */ }; @@ -453,8 +454,7 @@ mlx5_dev_is_pci(const struct rte_device *dev); __rte_internal uint32_t mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id, - struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf, - struct mlx5_mr_share_cache *share_cache); + struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf); /* mlx5_common_os.c */ diff --git a/drivers/common/mlx5/mlx5_common_mr.c b/drivers/common/mlx5/mlx5_common_mr.c index 73b902b652..6b455ccda3 100644 --- a/drivers/common/mlx5/mlx5_common_mr.c +++ b/drivers/common/mlx5/mlx5_common_mr.c @@ -1848,16 +1848,13 @@ mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache, * Pointer to per-queue MR control structure. * @param mbuf * Pointer to mbuf. - * @param share_cache - * Pointer to a global shared MR cache. * * @return * Searched LKey on success, UINT32_MAX on no match. */ uint32_t mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id, - struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf, - struct mlx5_mr_share_cache *share_cache) + struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf) { uint32_t lkey; uintptr_t addr = (uintptr_t)mbuf->buf_addr; @@ -1871,6 +1868,6 @@ mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id, if (likely(lkey != UINT32_MAX)) return lkey; /* Take slower bottom-half on miss. */ - return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, share_cache, mr_ctrl, + return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl, addr, cdev->config.mr_ext_memseg_en); } diff --git a/drivers/common/mlx5/mlx5_common_mr.h b/drivers/common/mlx5/mlx5_common_mr.h index 36689dfb54..0bc3519fd9 100644 --- a/drivers/common/mlx5/mlx5_common_mr.h +++ b/drivers/common/mlx5/mlx5_common_mr.h @@ -140,9 +140,7 @@ __rte_internal uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache, struct mlx5_mr_ctrl *mr_ctrl, struct rte_mempool *mp, uintptr_t addr); -__rte_internal void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache); -__rte_internal int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket); __rte_internal void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused); @@ -150,7 +148,6 @@ __rte_internal void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache); __rte_internal void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl); -__rte_internal void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache, const char *ibdev_name, const void *addr, size_t len); __rte_internal @@ -183,7 +180,6 @@ __rte_internal void mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr); -__rte_internal void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb); diff --git a/drivers/common/mlx5/version.map b/drivers/common/mlx5/version.map index 6ae1d891dd..db37f0dfed 100644 --- a/drivers/common/mlx5/version.map +++ b/drivers/common/mlx5/version.map @@ -109,7 +109,6 @@ INTERNAL { mlx5_mr_addr2mr_bh; mlx5_mr_btree_dump; mlx5_mr_btree_free; - mlx5_mr_create_cache; mlx5_mr_create_primary; mlx5_mr_ctrl_init; mlx5_mr_dump_cache; @@ -119,9 +118,7 @@ INTERNAL { mlx5_mr_lookup_cache; mlx5_mr_lookup_list; mlx5_mr_mb2mr; - mlx5_free_mr_by_addr; mlx5_mr_rebuild_cache; - mlx5_mr_release_cache; mlx5_nl_allmulti; # WINDOWS_NO_EXPORT mlx5_nl_ifindex; # WINDOWS_NO_EXPORT @@ -139,7 +136,6 @@ INTERNAL { mlx5_os_umem_dereg; mlx5_os_umem_reg; - mlx5_os_set_reg_mr_cb; mlx5_realloc; diff --git a/drivers/compress/mlx5/mlx5_compress.c b/drivers/compress/mlx5/mlx5_compress.c index a5cec27894..f68800ff5d 100644 --- a/drivers/compress/mlx5/mlx5_compress.c +++ b/drivers/compress/mlx5/mlx5_compress.c @@ -43,7 +43,6 @@ struct mlx5_compress_priv { struct rte_compressdev_config dev_config; LIST_HEAD(xform_list, mlx5_compress_xform) xform_list; rte_spinlock_t xform_sl; - struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */ volatile uint64_t *uar_addr; /* HCA caps*/ uint32_t mmo_decomp_sq:1; @@ -206,7 +205,7 @@ mlx5_compress_qp_setup(struct rte_compressdev *dev, uint16_t qp_id, return -rte_errno; } dev->data->queue_pairs[qp_id] = qp; - if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen, + if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen, priv->dev_config.socket_id)) { DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.", (uint32_t)qp_id); @@ -444,8 +443,7 @@ mlx5_compress_dseg_set(struct mlx5_compress_qp *qp, uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset); dseg->bcount = rte_cpu_to_be_32(len); - dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf, - &qp->priv->mr_scache); + dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf); dseg->pbuf = rte_cpu_to_be_64(addr); return dseg->lkey; } @@ -679,41 +677,6 @@ mlx5_compress_uar_prepare(struct mlx5_compress_priv *priv) return 0; } -/** - * Callback for memory event. - * - * @param event_type - * Memory event type. - * @param addr - * Address of memory. - * @param len - * Size of memory. - */ -static void -mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, - size_t len, void *arg __rte_unused) -{ - struct mlx5_compress_priv *priv; - - /* Must be called from the primary process. */ - MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - switch (event_type) { - case RTE_MEM_EVENT_FREE: - pthread_mutex_lock(&priv_list_lock); - /* Iterate all the existing mlx5 devices. */ - TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next) - mlx5_free_mr_by_addr(&priv->mr_scache, - mlx5_os_get_ctx_device_name - (priv->cdev->ctx), - addr, len); - pthread_mutex_unlock(&priv_list_lock); - break; - case RTE_MEM_EVENT_ALLOC: - default: - break; - } -} - static int mlx5_compress_dev_probe(struct mlx5_common_device *cdev) { @@ -765,18 +728,6 @@ mlx5_compress_dev_probe(struct mlx5_common_device *cdev) rte_compressdev_pmd_destroy(priv->compressdev); return -1; } - if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) { - DRV_LOG(ERR, "Failed to allocate shared cache MR memory."); - mlx5_compress_uar_release(priv); - rte_compressdev_pmd_destroy(priv->compressdev); - rte_errno = ENOMEM; - return -rte_errno; - } - /* Register callback function for global shared MR cache management. */ - if (TAILQ_EMPTY(&mlx5_compress_priv_list)) - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_compress_mr_mem_event_cb, - NULL); pthread_mutex_lock(&priv_list_lock); TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); @@ -796,10 +747,6 @@ mlx5_compress_dev_remove(struct mlx5_common_device *cdev) TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); if (priv) { - if (TAILQ_EMPTY(&mlx5_compress_priv_list)) - rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", - NULL); - mlx5_mr_release_cache(&priv->mr_scache); mlx5_compress_uar_release(priv); rte_compressdev_pmd_destroy(priv->compressdev); } diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c index 1105d3fcd5..d857331225 100644 --- a/drivers/crypto/mlx5/mlx5_crypto.c +++ b/drivers/crypto/mlx5/mlx5_crypto.c @@ -316,8 +316,7 @@ mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp, *remain -= data_len; klm->bcount = rte_cpu_to_be_32(data_len); klm->pbuf = rte_cpu_to_be_64(addr); - klm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf, - &priv->mr_scache); + klm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf); return klm->lkey; } @@ -643,7 +642,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, DRV_LOG(ERR, "Failed to create QP."); goto error; } - if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen, + if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen, priv->dev_config.socket_id) != 0) { DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.", (uint32_t)qp_id); @@ -844,41 +843,6 @@ mlx5_crypto_parse_devargs(struct rte_devargs *devargs, return 0; } -/** - * Callback for memory event. - * - * @param event_type - * Memory event type. - * @param addr - * Address of memory. - * @param len - * Size of memory. - */ -static void -mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, - size_t len, void *arg __rte_unused) -{ - struct mlx5_crypto_priv *priv; - - /* Must be called from the primary process. */ - MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - switch (event_type) { - case RTE_MEM_EVENT_FREE: - pthread_mutex_lock(&priv_list_lock); - /* Iterate all the existing mlx5 devices. */ - TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next) - mlx5_free_mr_by_addr(&priv->mr_scache, - mlx5_os_get_ctx_device_name - (priv->cdev->ctx), - addr, len); - pthread_mutex_unlock(&priv_list_lock); - break; - case RTE_MEM_EVENT_ALLOC: - default: - break; - } -} - static int mlx5_crypto_dev_probe(struct mlx5_common_device *cdev) { @@ -940,13 +904,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev) rte_cryptodev_pmd_destroy(priv->crypto_dev); return -1; } - if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) { - DRV_LOG(ERR, "Failed to allocate shared cache MR memory."); - mlx5_crypto_uar_release(priv); - rte_cryptodev_pmd_destroy(priv->crypto_dev); - rte_errno = ENOMEM; - return -rte_errno; - } priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag); priv->max_segs_num = devarg_prms.max_segs_num; priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) + @@ -960,11 +917,6 @@ mlx5_crypto_dev_probe(struct mlx5_common_device *cdev) priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size; priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB; priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg); - /* Register callback function for global shared MR cache management. */ - if (TAILQ_EMPTY(&mlx5_crypto_priv_list)) - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_crypto_mr_mem_event_cb, - NULL); pthread_mutex_lock(&priv_list_lock); TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); @@ -984,10 +936,6 @@ mlx5_crypto_dev_remove(struct mlx5_common_device *cdev) TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); if (priv) { - if (TAILQ_EMPTY(&mlx5_crypto_priv_list)) - rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", - NULL); - mlx5_mr_release_cache(&priv->mr_scache); mlx5_crypto_uar_release(priv); rte_cryptodev_pmd_destroy(priv->crypto_dev); claim_zero(mlx5_devx_cmd_destroy(priv->login_obj)); diff --git a/drivers/crypto/mlx5/mlx5_crypto.h b/drivers/crypto/mlx5/mlx5_crypto.h index 030f369423..69cef81d77 100644 --- a/drivers/crypto/mlx5/mlx5_crypto.h +++ b/drivers/crypto/mlx5/mlx5_crypto.h @@ -26,7 +26,6 @@ struct mlx5_crypto_priv { uint32_t max_segs_num; /* Maximum supported data segs. */ struct mlx5_hlist *dek_hlist; /* Dek hash list. */ struct rte_cryptodev_config dev_config; - struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */ struct mlx5_devx_obj *login_obj; uint64_t keytag; uint16_t wqe_set_size; diff --git a/drivers/net/mlx5/linux/mlx5_mp_os.c b/drivers/net/mlx5/linux/mlx5_mp_os.c index 286a7caf36..c3b6495d9e 100644 --- a/drivers/net/mlx5/linux/mlx5_mp_os.c +++ b/drivers/net/mlx5/linux/mlx5_mp_os.c @@ -91,7 +91,7 @@ mlx5_mp_os_primary_handle(const struct rte_mp_msg *mp_msg, const void *peer) case MLX5_MP_REQ_CREATE_MR: mp_init_msg(&priv->mp_id, &mp_res, param->type); lkey = mlx5_mr_create_primary(cdev->pd, - &priv->sh->share_cache, + &priv->sh->cdev->mr_scache, &entry, param->args.addr, cdev->config.mr_ext_memseg_en); if (lkey == UINT32_MAX) diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index 7bed7da7bf..54e0ba9f3a 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -44,7 +44,6 @@ #include "mlx5_rx.h" #include "mlx5_tx.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" #include "mlx5_flow.h" #include "rte_pmd_mlx5.h" #include "mlx5_verbs.h" @@ -623,10 +622,6 @@ mlx5_init_once(void) case RTE_PROC_PRIMARY: if (sd->init_done) break; - LIST_INIT(&sd->mem_event_cb_list); - rte_rwlock_init(&sd->mem_event_rwlock); - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_mr_mem_event_cb, NULL); ret = mlx5_mp_init_primary(MLX5_MP_NAME, mlx5_mp_os_primary_handle); if (ret) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 977b4fb13a..0ebda64f3e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -36,7 +36,6 @@ #include "mlx5_rx.h" #include "mlx5_tx.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" #include "mlx5_flow.h" #include "mlx5_flow_os.h" #include "rte_pmd_mlx5.h" @@ -1142,7 +1141,7 @@ mlx5_dev_ctx_shared_mempool_unregister(struct mlx5_dev_ctx_shared *sh, struct mlx5_mp_id mp_id; mlx5_mp_id_init(&mp_id, 0); - if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0) + if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0) DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s", mp->name, sh->cdev->pd, rte_strerror(rte_errno)); } @@ -1164,7 +1163,7 @@ mlx5_dev_ctx_shared_mempool_register_cb(struct rte_mempool *mp, void *arg) int ret; mlx5_mp_id_init(&mp_id, 0); - ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp, + ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp, &mp_id); if (ret < 0 && rte_errno != EEXIST) DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s", @@ -1207,8 +1206,8 @@ mlx5_dev_ctx_shared_mempool_event_cb(enum rte_mempool_event event, switch (event) { case RTE_MEMPOOL_EVENT_READY: mlx5_mp_id_init(&mp_id, 0); - if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp, - &mp_id) < 0) + if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, + mp, &mp_id) < 0) DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s", mp->name, sh->cdev->pd, rte_strerror(rte_errno)); @@ -1372,20 +1371,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++) rte_spinlock_init(&sh->uar_lock[i]); #endif - /* - * Once the device is added to the list of memory event - * callback, its global MR cache table cannot be expanded - * on the fly because of deadlock. If it overflows, lookup - * should be done by searching MR list linearly, which is slow. - * - * At this point the device is not added to the memory - * event list yet, context is just being created. - */ - err = mlx5_mr_create_cache(&sh->share_cache, sh->numa_node); - if (err) { - err = rte_errno; - goto error; - } mlx5_os_dev_shared_handler_install(sh); if (LIST_EMPTY(&mlx5_dev_ctx_list)) { err = mlx5_flow_os_init_workspace_once(); @@ -1395,11 +1380,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, mlx5_flow_aging_init(sh); mlx5_flow_counters_mng_init(sh); mlx5_flow_ipool_create(sh, config); - /* Add device to memory callback list. */ - rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); - LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list, - sh, mem_event_cb); - rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); /* Add context to the global device list. */ LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); rte_spinlock_init(&sh->geneve_tlv_opt_sl); @@ -1410,8 +1390,6 @@ error: pthread_mutex_destroy(&sh->txpp.mutex); pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); MLX5_ASSERT(sh); - if (sh->share_cache.cache.table) - mlx5_mr_btree_free(&sh->share_cache.cache); if (sh->tis) claim_zero(mlx5_devx_cmd_destroy(sh->tis)); if (sh->td) @@ -1467,12 +1445,6 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) if (ret == 0) rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb, sh); - /* Remove from memory callback device list. */ - rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); - LIST_REMOVE(sh, mem_event_cb); - rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); - /* Release created Memory Regions. */ - mlx5_mr_release_cache(&sh->share_cache); /* Remove context from the global device list. */ LIST_REMOVE(sh, next); /* Release flow workspaces objects on the last device. */ diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 119abc3649..90be83d4cc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -1135,9 +1135,6 @@ struct mlx5_dev_ctx_shared { char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */ struct mlx5_dev_attr device_attr; /* Device properties. */ int numa_node; /* Numa node of backing physical device. */ - LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb; - /**< Called by memory event callback. */ - struct mlx5_mr_share_cache share_cache; /* Packet pacing related structure. */ struct mlx5_dev_txpp txpp; /* Shared DV/DR flow data section. */ diff --git a/drivers/net/mlx5/mlx5_flow_aso.c b/drivers/net/mlx5/mlx5_flow_aso.c index 8f3d2ffc2c..1fc1000b01 100644 --- a/drivers/net/mlx5/mlx5_flow_aso.c +++ b/drivers/net/mlx5/mlx5_flow_aso.c @@ -60,17 +60,17 @@ mlx5_aso_cq_create(void *ctx, struct mlx5_aso_cq *cq, uint16_t log_desc_n, /** * Free MR resources. * - * @param[in] sh - * Pointer to shared device context. + * @param[in] cdev + * Pointer to the mlx5 common device. * @param[in] mr * MR to free. */ static void -mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr) +mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr) { void *addr = mr->addr; - sh->share_cache.dereg_mr_cb(mr); + cdev->mr_scache.dereg_mr_cb(mr); mlx5_free(addr); memset(mr, 0, sizeof(*mr)); } @@ -78,8 +78,8 @@ mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr) /** * Register Memory Region. * - * @param[in] sh - * Pointer to shared device context. + * @param[in] cdev + * Pointer to the mlx5 common device. * @param[in] length * Size of MR buffer. * @param[in/out] mr @@ -91,7 +91,7 @@ mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr) * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length, +mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length, struct mlx5_pmd_mr *mr, int socket) { @@ -103,7 +103,7 @@ mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length, DRV_LOG(ERR, "Failed to create ASO bits mem for MR."); return -1; } - ret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr); + ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr); if (ret) { DRV_LOG(ERR, "Failed to create direct Mkey."); mlx5_free(mr->addr); @@ -313,14 +313,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh, switch (aso_opc_mod) { case ASO_OPC_MOD_FLOW_HIT: - if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * + if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) * sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0)) return -1; if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0, sh->tx_uar, cdev->pdn, MLX5_ASO_QUEUE_LOG_DESC, cdev->config.hca_attr.sq_ts_format)) { - mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr); + mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr); return -1; } mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq); @@ -335,14 +335,14 @@ mlx5_aso_queue_init(struct mlx5_dev_ctx_shared *sh, break; case ASO_OPC_MOD_CONNECTION_TRACKING: /* 64B per object for query. */ - if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n, + if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n, &sh->ct_mng->aso_sq.mr, 0)) return -1; if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0, sh->tx_uar, cdev->pdn, MLX5_ASO_QUEUE_LOG_DESC, cdev->config.hca_attr.sq_ts_format)) { - mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr); + mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr); return -1; } mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq); @@ -370,14 +370,14 @@ mlx5_aso_queue_uninit(struct mlx5_dev_ctx_shared *sh, switch (aso_opc_mod) { case ASO_OPC_MOD_FLOW_HIT: - mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr); + mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr); sq = &sh->aso_age_mng->aso_sq; break; case ASO_OPC_MOD_POLICER: sq = &sh->mtrmng->pools_mng.sq; break; case ASO_OPC_MOD_CONNECTION_TRACKING: - mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr); + mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr); sq = &sh->ct_mng->aso_sq; break; default: diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 74bdecf36a..51cac0606c 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -12,46 +12,10 @@ #include #include "mlx5.h" -#include "mlx5_mr.h" #include "mlx5_rxtx.h" #include "mlx5_rx.h" #include "mlx5_tx.h" -/** - * Callback for memory event. This can be called from both primary and secondary - * process. - * - * @param event_type - * Memory event type. - * @param addr - * Address of memory. - * @param len - * Size of memory. - */ -void -mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, - size_t len, void *arg __rte_unused) -{ - struct mlx5_dev_ctx_shared *sh; - struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list; - - /* Must be called from the primary process. */ - MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - switch (event_type) { - case RTE_MEM_EVENT_FREE: - rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock); - /* Iterate all the existing mlx5 devices. */ - LIST_FOREACH(sh, dev_list, mem_event_cb) - mlx5_free_mr_by_addr(&sh->share_cache, - sh->ibdev_name, addr, len); - rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); - break; - case RTE_MEM_EVENT_ALLOC: - default: - break; - } -} - /** * Bottom-half of LKey search on Tx. * @@ -72,7 +36,7 @@ mlx5_tx_addr2mr_bh(struct mlx5_txq_data *txq, uintptr_t addr) struct mlx5_priv *priv = txq_ctrl->priv; return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id, - &priv->sh->share_cache, mr_ctrl, addr, + &priv->sh->cdev->mr_scache, mr_ctrl, addr, priv->sh->cdev->config.mr_ext_memseg_en); } @@ -110,7 +74,7 @@ mlx5_tx_mb2mr_bh(struct mlx5_txq_data *txq, struct rte_mbuf *mb) mp = buf->mp; } if (mp != NULL) { - lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache, + lkey = mlx5_mr_mempool2mr_bh(&priv->sh->cdev->mr_scache, mr_ctrl, mp, addr); /* * Lookup can only fail on invalid input, e.g. "addr" @@ -169,7 +133,7 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr, struct rte_eth_dev *dev; struct mlx5_mr *mr; struct mlx5_priv *priv; - struct mlx5_dev_ctx_shared *sh; + struct mlx5_common_device *cdev; dev = dev_to_eth_dev(rte_dev); if (!dev) { @@ -179,20 +143,20 @@ mlx5_net_dma_map(struct rte_device *rte_dev, void *addr, return -1; } priv = dev->data->dev_private; - sh = priv->sh; - mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len, - SOCKET_ID_ANY, sh->share_cache.reg_mr_cb); + cdev = priv->sh->cdev; + mr = mlx5_create_mr_ext(cdev->pd, (uintptr_t)addr, len, + SOCKET_ID_ANY, cdev->mr_scache.reg_mr_cb); if (!mr) { DRV_LOG(WARNING, "port %u unable to dma map", dev->data->port_id); rte_errno = EINVAL; return -1; } - rte_rwlock_write_lock(&sh->share_cache.rwlock); - LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr); + rte_rwlock_write_lock(&cdev->mr_scache.rwlock); + LIST_INSERT_HEAD(&cdev->mr_scache.mr_list, mr, mr); /* Insert to the global cache table. */ - mlx5_mr_insert_cache(&sh->share_cache, mr); - rte_rwlock_write_unlock(&sh->share_cache.rwlock); + mlx5_mr_insert_cache(&cdev->mr_scache, mr); + rte_rwlock_write_unlock(&cdev->mr_scache.rwlock); return 0; } @@ -217,7 +181,7 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr, { struct rte_eth_dev *dev; struct mlx5_priv *priv; - struct mlx5_dev_ctx_shared *sh; + struct mlx5_common_device *cdev; struct mlx5_mr *mr; struct mr_cache_entry entry; @@ -229,11 +193,11 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr, return -1; } priv = dev->data->dev_private; - sh = priv->sh; - rte_rwlock_write_lock(&sh->share_cache.rwlock); - mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr); + cdev = priv->sh->cdev; + rte_rwlock_write_lock(&cdev->mr_scache.rwlock); + mr = mlx5_mr_lookup_list(&cdev->mr_scache, &entry, (uintptr_t)addr); if (!mr) { - rte_rwlock_write_unlock(&sh->share_cache.rwlock); + rte_rwlock_write_unlock(&cdev->mr_scache.rwlock); DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s", (uintptr_t)addr, rte_dev->name); rte_errno = EINVAL; @@ -242,16 +206,16 @@ mlx5_net_dma_unmap(struct rte_device *rte_dev, void *addr, LIST_REMOVE(mr, mr); DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id, (void *)mr); - mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb); - mlx5_mr_rebuild_cache(&sh->share_cache); + mlx5_mr_free(mr, cdev->mr_scache.dereg_mr_cb); + mlx5_mr_rebuild_cache(&cdev->mr_scache); /* * No explicit wmb is needed after updating dev_gen due to * store-release ordering in unlock that provides the * implicit barrier at the software visible level. */ - ++sh->share_cache.dev_gen; + ++cdev->mr_scache.dev_gen; DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d", - sh->share_cache.dev_gen); - rte_rwlock_write_unlock(&sh->share_cache.rwlock); + cdev->mr_scache.dev_gen); + rte_rwlock_write_unlock(&cdev->mr_scache.rwlock); return 0; } diff --git a/drivers/net/mlx5/mlx5_mr.h b/drivers/net/mlx5/mlx5_mr.h deleted file mode 100644 index c984e777b5..0000000000 --- a/drivers/net/mlx5/mlx5_mr.h +++ /dev/null @@ -1,26 +0,0 @@ -/* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2018 6WIND S.A. - * Copyright 2018 Mellanox Technologies, Ltd - */ - -#ifndef RTE_PMD_MLX5_MR_H_ -#define RTE_PMD_MLX5_MR_H_ - -#include -#include -#include - -#include -#include -#include -#include - -#include - -/* First entry must be NULL for comparison. */ -#define mlx5_mr_btree_len(bt) ((bt)->len - 1) - -void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, - size_t len, void *arg); - -#endif /* RTE_PMD_MLX5_MR_H_ */ diff --git a/drivers/net/mlx5/mlx5_rx.c b/drivers/net/mlx5/mlx5_rx.c index 1a9eb35acc..c5fc5f044d 100644 --- a/drivers/net/mlx5/mlx5_rx.c +++ b/drivers/net/mlx5/mlx5_rx.c @@ -22,7 +22,6 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_mr.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_rx.h" diff --git a/drivers/net/mlx5/mlx5_rx.h b/drivers/net/mlx5/mlx5_rx.h index a90cb497d1..bf655307d5 100644 --- a/drivers/net/mlx5/mlx5_rx.h +++ b/drivers/net/mlx5/mlx5_rx.h @@ -18,11 +18,13 @@ #include "mlx5.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" /* Support tunnel matching. */ #define MLX5_FLOW_TUNNEL 10 +/* First entry must be NULL for comparison. */ +#define mlx5_mr_btree_len(bt) ((bt)->len - 1) + struct mlx5_rxq_stats { #ifdef MLX5_PMD_SOFT_COUNTERS uint64_t ipackets; /**< Total of successfully received packets. */ @@ -309,7 +311,7 @@ mlx5_rx_addr2mr(struct mlx5_rxq_data *rxq, uintptr_t addr) */ rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp; - return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->share_cache, + return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache, mr_ctrl, mp, addr); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index a6a140078e..ae634b934a 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1241,7 +1241,7 @@ mlx5_mprq_alloc_mp(struct rte_eth_dev *dev) rte_errno = ENOMEM; return -rte_errno; } - ret = mlx5_mr_mempool_register(&priv->sh->share_cache, + ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache, priv->sh->cdev->pd, mp, &priv->mp_id); if (ret < 0 && rte_errno != EEXIST) { ret = rte_errno; @@ -1450,7 +1450,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, } tmpl->type = MLX5_RXQ_TYPE_STANDARD; if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl, - &priv->sh->share_cache.dev_gen, socket)) { + &priv->sh->cdev->mr_scache.dev_gen, socket)) { /* rte_errno is already set. */ goto error; } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 7b984eff35..ed1f2d2c8c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -22,7 +22,6 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_mr.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_rx.h" diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index ad1144e218..b400295e7d 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -24,7 +24,6 @@ #include "mlx5_utils.h" #include "mlx5.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" struct mlx5_priv; diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.h b/drivers/net/mlx5/mlx5_rxtx_vec.h index 93b4f517bb..1aec72817e 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec.h @@ -12,7 +12,6 @@ #include #include "mlx5_autoconf.h" -#include "mlx5_mr.h" /* HW checksum offload capabilities of vectorized Tx. */ #define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \ diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index cf4fbd3c9f..54c2893437 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -14,7 +14,6 @@ #include #include "mlx5.h" -#include "mlx5_mr.h" #include "mlx5_rx.h" #include "mlx5_tx.h" #include "mlx5_utils.h" @@ -148,7 +147,7 @@ mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl) } for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) { mp = rxq_ctrl->rxq.rxseg[s].mp; - ret = mlx5_mr_mempool_register(&priv->sh->share_cache, + ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache, priv->sh->cdev->pd, mp, &priv->mp_id); if (ret < 0 && rte_errno != EEXIST) diff --git a/drivers/net/mlx5/mlx5_tx.c b/drivers/net/mlx5/mlx5_tx.c index df671379e4..2cc9ae6772 100644 --- a/drivers/net/mlx5/mlx5_tx.c +++ b/drivers/net/mlx5/mlx5_tx.c @@ -22,7 +22,6 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" #include "mlx5.h" -#include "mlx5_mr.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" #include "mlx5_tx.h" diff --git a/drivers/net/mlx5/mlx5_tx.h b/drivers/net/mlx5/mlx5_tx.h index cdbcf659df..bab9008d9b 100644 --- a/drivers/net/mlx5/mlx5_tx.h +++ b/drivers/net/mlx5/mlx5_tx.h @@ -18,7 +18,6 @@ #include "mlx5.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" /* TX burst subroutines return codes. */ enum mlx5_txcmp_code { diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 00dfb42e66..8fef3b0993 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -1135,7 +1135,7 @@ mlx5_txq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return NULL; } if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl, - &priv->sh->share_cache.dev_gen, socket)) { + &priv->sh->cdev->mr_scache.dev_gen, socket)) { /* rte_errno is already set. */ goto error; } diff --git a/drivers/net/mlx5/windows/mlx5_os.c b/drivers/net/mlx5/windows/mlx5_os.c index 8442dbd445..685babe0ae 100644 --- a/drivers/net/mlx5/windows/mlx5_os.c +++ b/drivers/net/mlx5/windows/mlx5_os.c @@ -26,7 +26,6 @@ #include "mlx5_rx.h" #include "mlx5_tx.h" #include "mlx5_autoconf.h" -#include "mlx5_mr.h" #include "mlx5_flow.h" #include "mlx5_devx.h" @@ -122,21 +121,8 @@ error: static int mlx5_init_once(void) { - struct mlx5_shared_data *sd; - if (mlx5_init_shared_data()) return -rte_errno; - sd = mlx5_shared_data; - rte_spinlock_lock(&sd->lock); - MLX5_ASSERT(sd); - if (!sd->init_done) { - LIST_INIT(&sd->mem_event_cb_list); - rte_rwlock_init(&sd->mem_event_rwlock); - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_mr_mem_event_cb, NULL); - sd->init_done = true; - } - rte_spinlock_unlock(&sd->lock); return 0; } diff --git a/drivers/regex/mlx5/mlx5_regex.c b/drivers/regex/mlx5/mlx5_regex.c index b39181ebb5..7f900b67ee 100644 --- a/drivers/regex/mlx5/mlx5_regex.c +++ b/drivers/regex/mlx5/mlx5_regex.c @@ -25,10 +25,6 @@ int mlx5_regex_logtype; -TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list = - TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list); -static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER; - const struct rte_regexdev_ops mlx5_regexdev_ops = { .dev_info_get = mlx5_regex_info_get, .dev_configure = mlx5_regex_configure, @@ -86,41 +82,6 @@ mlx5_regex_get_name(char *name, struct rte_device *dev) sprintf(name, "mlx5_regex_%s", dev->name); } -/** - * Callback for memory event. - * - * @param event_type - * Memory event type. - * @param addr - * Address of memory. - * @param len - * Size of memory. - */ -static void -mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, - size_t len, void *arg __rte_unused) -{ - struct mlx5_regex_priv *priv; - - /* Must be called from the primary process. */ - MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - switch (event_type) { - case RTE_MEM_EVENT_FREE: - pthread_mutex_lock(&mem_event_list_lock); - /* Iterate all the existing mlx5 devices. */ - TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb) - mlx5_free_mr_by_addr(&priv->mr_scache, - mlx5_os_get_ctx_device_name - (priv->cdev->ctx), - addr, len); - pthread_mutex_unlock(&mem_event_list_lock); - break; - case RTE_MEM_EVENT_ALLOC: - default: - break; - } -} - static int mlx5_regex_dev_probe(struct mlx5_common_device *cdev) { @@ -194,21 +155,6 @@ mlx5_regex_dev_probe(struct mlx5_common_device *cdev) priv->regexdev->device = cdev->dev; priv->regexdev->data->dev_private = priv; priv->regexdev->state = RTE_REGEXDEV_READY; - ret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()); - if (ret) { - DRV_LOG(ERR, "MR init tree failed."); - rte_errno = ENOMEM; - goto error; - } - /* Register callback function for global shared MR cache management. */ - if (TAILQ_EMPTY(&mlx5_mem_event_list)) - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_regex_mr_mem_event_cb, - NULL); - /* Add device to memory callback list. */ - pthread_mutex_lock(&mem_event_list_lock); - TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb); - pthread_mutex_unlock(&mem_event_list_lock); DRV_LOG(INFO, "RegEx GGA is %s.", priv->has_umr ? "supported" : "unsupported"); return 0; @@ -237,15 +183,6 @@ mlx5_regex_dev_remove(struct mlx5_common_device *cdev) return 0; priv = dev->data->dev_private; if (priv) { - /* Remove from memory callback device list. */ - pthread_mutex_lock(&mem_event_list_lock); - TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb); - pthread_mutex_unlock(&mem_event_list_lock); - if (TAILQ_EMPTY(&mlx5_mem_event_list)) - rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", - NULL); - if (priv->mr_scache.cache.table) - mlx5_mr_release_cache(&priv->mr_scache); if (priv->uar) mlx5_glue->devx_free_uar(priv->uar); if (priv->regexdev) diff --git a/drivers/regex/mlx5/mlx5_regex.h b/drivers/regex/mlx5/mlx5_regex.h index be81931b3a..eb59cc38a6 100644 --- a/drivers/regex/mlx5/mlx5_regex.h +++ b/drivers/regex/mlx5/mlx5_regex.h @@ -68,9 +68,6 @@ struct mlx5_regex_priv { MLX5_RXP_EM_COUNT]; uint32_t nb_engines; /* Number of RegEx engines. */ struct mlx5dv_devx_uar *uar; /* UAR object. */ - TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb; - /**< Called by memory event callback. */ - struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */ uint8_t is_bf2; /* The device is BF2 device. */ uint8_t has_umr; /* The device supports UMR. */ uint32_t mmo_regex_qp_cap:1; diff --git a/drivers/regex/mlx5/mlx5_regex_control.c b/drivers/regex/mlx5/mlx5_regex_control.c index 6735e51976..50c966a022 100644 --- a/drivers/regex/mlx5/mlx5_regex_control.c +++ b/drivers/regex/mlx5/mlx5_regex_control.c @@ -242,7 +242,7 @@ mlx5_regex_qp_setup(struct rte_regexdev *dev, uint16_t qp_ind, nb_sq_config++; } - ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen, + ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen, rte_socket_id()); if (ret) { DRV_LOG(ERR, "Error setting up mr btree"); diff --git a/drivers/regex/mlx5/mlx5_regex_fastpath.c b/drivers/regex/mlx5/mlx5_regex_fastpath.c index 8817e2e074..adb5343a46 100644 --- a/drivers/regex/mlx5/mlx5_regex_fastpath.c +++ b/drivers/regex/mlx5/mlx5_regex_fastpath.c @@ -126,7 +126,7 @@ static inline uint32_t mlx5_regex_mb2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf) { - return mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf, &priv->mr_scache); + return mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf); } static inline void -- 2.20.1