Add global shared MR cache as a field of common device structure.
Move MR management to use this global cache for all drivers.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
#endif
}
+/**
+ * Callback for memory event.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+static void
+mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct mlx5_common_device *cdev;
+
+ /* Must be called from the primary process. */
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ pthread_mutex_lock(&devices_list_lock);
+ /* Iterate all the existing mlx5 devices. */
+ TAILQ_FOREACH(cdev, &devices_list, next)
+ mlx5_free_mr_by_addr(&cdev->mr_scache,
+ mlx5_os_get_ctx_device_name
+ (cdev->ctx),
+ addr, len);
+ pthread_mutex_unlock(&devices_list_lock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
+}
+
/**
* Uninitialize all HW global of device context.
*
pthread_mutex_lock(&devices_list_lock);
TAILQ_REMOVE(&devices_list, cdev, next);
pthread_mutex_unlock(&devices_list_lock);
- if (rte_eal_process_type() == RTE_PROC_PRIMARY)
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (TAILQ_EMPTY(&devices_list))
+ rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
+ NULL);
+ mlx5_mr_release_cache(&cdev->mr_scache);
mlx5_dev_hw_global_release(cdev);
+ }
rte_free(cdev);
}
rte_free(cdev);
return NULL;
}
+ /* Initialize global MR cache resources and update its functions. */
+ ret = mlx5_mr_create_cache(&cdev->mr_scache, eal_dev->numa_node);
+ if (ret) {
+ DRV_LOG(ERR, "Failed to initialize global MR share cache.");
+ mlx5_dev_hw_global_release(cdev);
+ rte_free(cdev);
+ return NULL;
+ }
+ /* Register callback function for global shared MR cache management. */
+ if (TAILQ_EMPTY(&devices_list))
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_mr_mem_event_cb, NULL);
exit:
pthread_mutex_lock(&devices_list_lock);
TAILQ_INSERT_HEAD(&devices_list, cdev, next);
void *ctx; /* Verbs/DV/DevX context. */
void *pd; /* Protection Domain. */
uint32_t pdn; /* Protection Domain Number. */
+ struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
struct mlx5_common_dev_config config; /* Device configuration. */
};
__rte_internal
uint32_t
mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,
- struct mlx5_mr_share_cache *share_cache);
+ struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf);
/* mlx5_common_os.c */
* Pointer to per-queue MR control structure.
* @param mbuf
* Pointer to mbuf.
- * @param share_cache
- * Pointer to a global shared MR cache.
*
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
uint32_t
mlx5_mr_mb2mr(struct mlx5_common_device *cdev, struct mlx5_mp_id *mp_id,
- struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf,
- struct mlx5_mr_share_cache *share_cache)
+ struct mlx5_mr_ctrl *mr_ctrl, struct rte_mbuf *mbuf)
{
uint32_t lkey;
uintptr_t addr = (uintptr_t)mbuf->buf_addr;
if (likely(lkey != UINT32_MAX))
return lkey;
/* Take slower bottom-half on miss. */
- return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, share_cache, mr_ctrl,
+ return mlx5_mr_addr2mr_bh(cdev->pd, mp_id, &cdev->mr_scache, mr_ctrl,
addr, cdev->config.mr_ext_memseg_en);
}
uint32_t mlx5_mr_mempool2mr_bh(struct mlx5_mr_share_cache *share_cache,
struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mempool *mp, uintptr_t addr);
-__rte_internal
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
-__rte_internal
int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
__rte_internal
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
__rte_internal
void mlx5_mr_flush_local_cache(struct mlx5_mr_ctrl *mr_ctrl);
-__rte_internal
void mlx5_free_mr_by_addr(struct mlx5_mr_share_cache *share_cache,
const char *ibdev_name, const void *addr, size_t len);
__rte_internal
void
mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
-__rte_internal
void
mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
mlx5_mr_addr2mr_bh;
mlx5_mr_btree_dump;
mlx5_mr_btree_free;
- mlx5_mr_create_cache;
mlx5_mr_create_primary;
mlx5_mr_ctrl_init;
mlx5_mr_dump_cache;
mlx5_mr_lookup_cache;
mlx5_mr_lookup_list;
mlx5_mr_mb2mr;
- mlx5_free_mr_by_addr;
mlx5_mr_rebuild_cache;
- mlx5_mr_release_cache;
mlx5_nl_allmulti; # WINDOWS_NO_EXPORT
mlx5_nl_ifindex; # WINDOWS_NO_EXPORT
mlx5_os_umem_dereg;
mlx5_os_umem_reg;
- mlx5_os_set_reg_mr_cb;
mlx5_realloc;
struct rte_compressdev_config dev_config;
LIST_HEAD(xform_list, mlx5_compress_xform) xform_list;
rte_spinlock_t xform_sl;
- struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
volatile uint64_t *uar_addr;
/* HCA caps*/
uint32_t mmo_decomp_sq:1;
return -rte_errno;
}
dev->data->queue_pairs[qp_id] = qp;
- if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
+ if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
priv->dev_config.socket_id)) {
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
(uint32_t)qp_id);
uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
dseg->bcount = rte_cpu_to_be_32(len);
- dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf,
- &qp->priv->mr_scache);
+ dseg->lkey = mlx5_mr_mb2mr(qp->priv->cdev, 0, &qp->mr_ctrl, mbuf);
dseg->pbuf = rte_cpu_to_be_64(addr);
return dseg->lkey;
}
return 0;
}
-/**
- * Callback for memory event.
- *
- * @param event_type
- * Memory event type.
- * @param addr
- * Address of memory.
- * @param len
- * Size of memory.
- */
-static void
-mlx5_compress_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
- size_t len, void *arg __rte_unused)
-{
- struct mlx5_compress_priv *priv;
-
- /* Must be called from the primary process. */
- MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- switch (event_type) {
- case RTE_MEM_EVENT_FREE:
- pthread_mutex_lock(&priv_list_lock);
- /* Iterate all the existing mlx5 devices. */
- TAILQ_FOREACH(priv, &mlx5_compress_priv_list, next)
- mlx5_free_mr_by_addr(&priv->mr_scache,
- mlx5_os_get_ctx_device_name
- (priv->cdev->ctx),
- addr, len);
- pthread_mutex_unlock(&priv_list_lock);
- break;
- case RTE_MEM_EVENT_ALLOC:
- default:
- break;
- }
-}
-
static int
mlx5_compress_dev_probe(struct mlx5_common_device *cdev)
{
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
}
- if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
- DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
- mlx5_compress_uar_release(priv);
- rte_compressdev_pmd_destroy(priv->compressdev);
- rte_errno = ENOMEM;
- return -rte_errno;
- }
- /* Register callback function for global shared MR cache management. */
- if (TAILQ_EMPTY(&mlx5_compress_priv_list))
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_compress_mr_mem_event_cb,
- NULL);
pthread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&mlx5_compress_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
TAILQ_REMOVE(&mlx5_compress_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
if (priv) {
- if (TAILQ_EMPTY(&mlx5_compress_priv_list))
- rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
- NULL);
- mlx5_mr_release_cache(&priv->mr_scache);
mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
}
*remain -= data_len;
klm->bcount = rte_cpu_to_be_32(data_len);
klm->pbuf = rte_cpu_to_be_64(addr);
- klm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf,
- &priv->mr_scache);
+ klm->lkey = mlx5_mr_mb2mr(priv->cdev, 0, &qp->mr_ctrl, mbuf);
return klm->lkey;
}
DRV_LOG(ERR, "Failed to create QP.");
goto error;
}
- if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
+ if (mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
priv->dev_config.socket_id) != 0) {
DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
(uint32_t)qp_id);
return 0;
}
-/**
- * Callback for memory event.
- *
- * @param event_type
- * Memory event type.
- * @param addr
- * Address of memory.
- * @param len
- * Size of memory.
- */
-static void
-mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
- size_t len, void *arg __rte_unused)
-{
- struct mlx5_crypto_priv *priv;
-
- /* Must be called from the primary process. */
- MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- switch (event_type) {
- case RTE_MEM_EVENT_FREE:
- pthread_mutex_lock(&priv_list_lock);
- /* Iterate all the existing mlx5 devices. */
- TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
- mlx5_free_mr_by_addr(&priv->mr_scache,
- mlx5_os_get_ctx_device_name
- (priv->cdev->ctx),
- addr, len);
- pthread_mutex_unlock(&priv_list_lock);
- break;
- case RTE_MEM_EVENT_ALLOC:
- default:
- break;
- }
-}
-
static int
mlx5_crypto_dev_probe(struct mlx5_common_device *cdev)
{
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
- if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
- DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
- mlx5_crypto_uar_release(priv);
- rte_cryptodev_pmd_destroy(priv->crypto_dev);
- rte_errno = ENOMEM;
- return -rte_errno;
- }
priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
priv->max_segs_num = devarg_prms.max_segs_num;
priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
- /* Register callback function for global shared MR cache management. */
- if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_crypto_mr_mem_event_cb,
- NULL);
pthread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
if (priv) {
- if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
- rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
- NULL);
- mlx5_mr_release_cache(&priv->mr_scache);
mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
uint32_t max_segs_num; /* Maximum supported data segs. */
struct mlx5_hlist *dek_hlist; /* Dek hash list. */
struct rte_cryptodev_config dev_config;
- struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
struct mlx5_devx_obj *login_obj;
uint64_t keytag;
uint16_t wqe_set_size;
case MLX5_MP_REQ_CREATE_MR:
mp_init_msg(&priv->mp_id, &mp_res, param->type);
lkey = mlx5_mr_create_primary(cdev->pd,
- &priv->sh->share_cache,
+ &priv->sh->cdev->mr_scache,
&entry, param->args.addr,
cdev->config.mr_ext_memseg_en);
if (lkey == UINT32_MAX)
#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
#include "mlx5_flow.h"
#include "rte_pmd_mlx5.h"
#include "mlx5_verbs.h"
case RTE_PROC_PRIMARY:
if (sd->init_done)
break;
- LIST_INIT(&sd->mem_event_cb_list);
- rte_rwlock_init(&sd->mem_event_rwlock);
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_mr_mem_event_cb, NULL);
ret = mlx5_mp_init_primary(MLX5_MP_NAME,
mlx5_mp_os_primary_handle);
if (ret)
#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
#include "mlx5_flow.h"
#include "mlx5_flow_os.h"
#include "rte_pmd_mlx5.h"
struct mlx5_mp_id mp_id;
mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_unregister(&sh->share_cache, mp, &mp_id) < 0)
+ if (mlx5_mr_mempool_unregister(&sh->cdev->mr_scache, mp, &mp_id) < 0)
DRV_LOG(WARNING, "Failed to unregister mempool %s for PD %p: %s",
mp->name, sh->cdev->pd, rte_strerror(rte_errno));
}
int ret;
mlx5_mp_id_init(&mp_id, 0);
- ret = mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
+ ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd, mp,
&mp_id);
if (ret < 0 && rte_errno != EEXIST)
DRV_LOG(ERR, "Failed to register existing mempool %s for PD %p: %s",
switch (event) {
case RTE_MEMPOOL_EVENT_READY:
mlx5_mp_id_init(&mp_id, 0);
- if (mlx5_mr_mempool_register(&sh->share_cache, sh->cdev->pd, mp,
- &mp_id) < 0)
+ if (mlx5_mr_mempool_register(&sh->cdev->mr_scache, sh->cdev->pd,
+ mp, &mp_id) < 0)
DRV_LOG(ERR, "Failed to register new mempool %s for PD %p: %s",
mp->name, sh->cdev->pd,
rte_strerror(rte_errno));
for (i = 0; i < MLX5_UAR_PAGE_NUM_MAX; i++)
rte_spinlock_init(&sh->uar_lock[i]);
#endif
- /*
- * Once the device is added to the list of memory event
- * callback, its global MR cache table cannot be expanded
- * on the fly because of deadlock. If it overflows, lookup
- * should be done by searching MR list linearly, which is slow.
- *
- * At this point the device is not added to the memory
- * event list yet, context is just being created.
- */
- err = mlx5_mr_create_cache(&sh->share_cache, sh->numa_node);
- if (err) {
- err = rte_errno;
- goto error;
- }
mlx5_os_dev_shared_handler_install(sh);
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
err = mlx5_flow_os_init_workspace_once();
mlx5_flow_aging_init(sh);
mlx5_flow_counters_mng_init(sh);
mlx5_flow_ipool_create(sh, config);
- /* Add device to memory callback list. */
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
- sh, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
/* Add context to the global device list. */
LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next);
rte_spinlock_init(&sh->geneve_tlv_opt_sl);
pthread_mutex_destroy(&sh->txpp.mutex);
pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex);
MLX5_ASSERT(sh);
- if (sh->share_cache.cache.table)
- mlx5_mr_btree_free(&sh->share_cache.cache);
if (sh->tis)
claim_zero(mlx5_devx_cmd_destroy(sh->tis));
if (sh->td)
if (ret == 0)
rte_mempool_walk(mlx5_dev_ctx_shared_mempool_unregister_cb,
sh);
- /* Remove from memory callback device list. */
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_REMOVE(sh, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
- /* Release created Memory Regions. */
- mlx5_mr_release_cache(&sh->share_cache);
/* Remove context from the global device list. */
LIST_REMOVE(sh, next);
/* Release flow workspaces objects on the last device. */
char ibdev_path[MLX5_FS_PATH_MAX]; /* SYSFS dev path for secondary */
struct mlx5_dev_attr device_attr; /* Device properties. */
int numa_node; /* Numa node of backing physical device. */
- LIST_ENTRY(mlx5_dev_ctx_shared) mem_event_cb;
- /**< Called by memory event callback. */
- struct mlx5_mr_share_cache share_cache;
/* Packet pacing related structure. */
struct mlx5_dev_txpp txpp;
/* Shared DV/DR flow data section. */
/**
* Free MR resources.
*
- * @param[in] sh
- * Pointer to shared device context.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in] mr
* MR to free.
*/
static void
-mlx5_aso_dereg_mr(struct mlx5_dev_ctx_shared *sh, struct mlx5_pmd_mr *mr)
+mlx5_aso_dereg_mr(struct mlx5_common_device *cdev, struct mlx5_pmd_mr *mr)
{
void *addr = mr->addr;
- sh->share_cache.dereg_mr_cb(mr);
+ cdev->mr_scache.dereg_mr_cb(mr);
mlx5_free(addr);
memset(mr, 0, sizeof(*mr));
}
/**
* Register Memory Region.
*
- * @param[in] sh
- * Pointer to shared device context.
+ * @param[in] cdev
+ * Pointer to the mlx5 common device.
* @param[in] length
* Size of MR buffer.
* @param[in/out] mr
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-mlx5_aso_reg_mr(struct mlx5_dev_ctx_shared *sh, size_t length,
+mlx5_aso_reg_mr(struct mlx5_common_device *cdev, size_t length,
struct mlx5_pmd_mr *mr, int socket)
{
DRV_LOG(ERR, "Failed to create ASO bits mem for MR.");
return -1;
}
- ret = sh->share_cache.reg_mr_cb(sh->cdev->pd, mr->addr, length, mr);
+ ret = cdev->mr_scache.reg_mr_cb(cdev->pd, mr->addr, length, mr);
if (ret) {
DRV_LOG(ERR, "Failed to create direct Mkey.");
mlx5_free(mr->addr);
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
- if (mlx5_aso_reg_mr(sh, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
+ if (mlx5_aso_reg_mr(cdev, (MLX5_ASO_AGE_ACTIONS_PER_POOL / 8) *
sq_desc_n, &sh->aso_age_mng->aso_sq.mr, 0))
return -1;
if (mlx5_aso_sq_create(cdev->ctx, &sh->aso_age_mng->aso_sq, 0,
sh->tx_uar, cdev->pdn,
MLX5_ASO_QUEUE_LOG_DESC,
cdev->config.hca_attr.sq_ts_format)) {
- mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(cdev, &sh->aso_age_mng->aso_sq.mr);
return -1;
}
mlx5_aso_age_init_sq(&sh->aso_age_mng->aso_sq);
break;
case ASO_OPC_MOD_CONNECTION_TRACKING:
/* 64B per object for query. */
- if (mlx5_aso_reg_mr(sh, 64 * sq_desc_n,
+ if (mlx5_aso_reg_mr(cdev, 64 * sq_desc_n,
&sh->ct_mng->aso_sq.mr, 0))
return -1;
if (mlx5_aso_sq_create(cdev->ctx, &sh->ct_mng->aso_sq, 0,
sh->tx_uar, cdev->pdn,
MLX5_ASO_QUEUE_LOG_DESC,
cdev->config.hca_attr.sq_ts_format)) {
- mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(cdev, &sh->ct_mng->aso_sq.mr);
return -1;
}
mlx5_aso_ct_init_sq(&sh->ct_mng->aso_sq);
switch (aso_opc_mod) {
case ASO_OPC_MOD_FLOW_HIT:
- mlx5_aso_dereg_mr(sh, &sh->aso_age_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(sh->cdev, &sh->aso_age_mng->aso_sq.mr);
sq = &sh->aso_age_mng->aso_sq;
break;
case ASO_OPC_MOD_POLICER:
sq = &sh->mtrmng->pools_mng.sq;
break;
case ASO_OPC_MOD_CONNECTION_TRACKING:
- mlx5_aso_dereg_mr(sh, &sh->ct_mng->aso_sq.mr);
+ mlx5_aso_dereg_mr(sh->cdev, &sh->ct_mng->aso_sq.mr);
sq = &sh->ct_mng->aso_sq;
break;
default:
#include <mlx5_common_mr.h>
#include "mlx5.h"
-#include "mlx5_mr.h"
#include "mlx5_rxtx.h"
#include "mlx5_rx.h"
#include "mlx5_tx.h"
-/**
- * Callback for memory event. This can be called from both primary and secondary
- * process.
- *
- * @param event_type
- * Memory event type.
- * @param addr
- * Address of memory.
- * @param len
- * Size of memory.
- */
-void
-mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
- size_t len, void *arg __rte_unused)
-{
- struct mlx5_dev_ctx_shared *sh;
- struct mlx5_dev_list *dev_list = &mlx5_shared_data->mem_event_cb_list;
-
- /* Must be called from the primary process. */
- MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- switch (event_type) {
- case RTE_MEM_EVENT_FREE:
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- /* Iterate all the existing mlx5 devices. */
- LIST_FOREACH(sh, dev_list, mem_event_cb)
- mlx5_free_mr_by_addr(&sh->share_cache,
- sh->ibdev_name, addr, len);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
- break;
- case RTE_MEM_EVENT_ALLOC:
- default:
- break;
- }
-}
-
/**
* Bottom-half of LKey search on Tx.
*
struct mlx5_priv *priv = txq_ctrl->priv;
return mlx5_mr_addr2mr_bh(priv->sh->cdev->pd, &priv->mp_id,
- &priv->sh->share_cache, mr_ctrl, addr,
+ &priv->sh->cdev->mr_scache, mr_ctrl, addr,
priv->sh->cdev->config.mr_ext_memseg_en);
}
mp = buf->mp;
}
if (mp != NULL) {
- lkey = mlx5_mr_mempool2mr_bh(&priv->sh->share_cache,
+ lkey = mlx5_mr_mempool2mr_bh(&priv->sh->cdev->mr_scache,
mr_ctrl, mp, addr);
/*
* Lookup can only fail on invalid input, e.g. "addr"
struct rte_eth_dev *dev;
struct mlx5_mr *mr;
struct mlx5_priv *priv;
- struct mlx5_dev_ctx_shared *sh;
+ struct mlx5_common_device *cdev;
dev = dev_to_eth_dev(rte_dev);
if (!dev) {
return -1;
}
priv = dev->data->dev_private;
- sh = priv->sh;
- mr = mlx5_create_mr_ext(sh->cdev->pd, (uintptr_t)addr, len,
- SOCKET_ID_ANY, sh->share_cache.reg_mr_cb);
+ cdev = priv->sh->cdev;
+ mr = mlx5_create_mr_ext(cdev->pd, (uintptr_t)addr, len,
+ SOCKET_ID_ANY, cdev->mr_scache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to dma map", dev->data->port_id);
rte_errno = EINVAL;
return -1;
}
- rte_rwlock_write_lock(&sh->share_cache.rwlock);
- LIST_INSERT_HEAD(&sh->share_cache.mr_list, mr, mr);
+ rte_rwlock_write_lock(&cdev->mr_scache.rwlock);
+ LIST_INSERT_HEAD(&cdev->mr_scache.mr_list, mr, mr);
/* Insert to the global cache table. */
- mlx5_mr_insert_cache(&sh->share_cache, mr);
- rte_rwlock_write_unlock(&sh->share_cache.rwlock);
+ mlx5_mr_insert_cache(&cdev->mr_scache, mr);
+ rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
return 0;
}
{
struct rte_eth_dev *dev;
struct mlx5_priv *priv;
- struct mlx5_dev_ctx_shared *sh;
+ struct mlx5_common_device *cdev;
struct mlx5_mr *mr;
struct mr_cache_entry entry;
return -1;
}
priv = dev->data->dev_private;
- sh = priv->sh;
- rte_rwlock_write_lock(&sh->share_cache.rwlock);
- mr = mlx5_mr_lookup_list(&sh->share_cache, &entry, (uintptr_t)addr);
+ cdev = priv->sh->cdev;
+ rte_rwlock_write_lock(&cdev->mr_scache.rwlock);
+ mr = mlx5_mr_lookup_list(&cdev->mr_scache, &entry, (uintptr_t)addr);
if (!mr) {
- rte_rwlock_write_unlock(&sh->share_cache.rwlock);
+ rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
DRV_LOG(WARNING, "address 0x%" PRIxPTR " wasn't registered to device %s",
(uintptr_t)addr, rte_dev->name);
rte_errno = EINVAL;
LIST_REMOVE(mr, mr);
DRV_LOG(DEBUG, "port %u remove MR(%p) from list", dev->data->port_id,
(void *)mr);
- mlx5_mr_free(mr, sh->share_cache.dereg_mr_cb);
- mlx5_mr_rebuild_cache(&sh->share_cache);
+ mlx5_mr_free(mr, cdev->mr_scache.dereg_mr_cb);
+ mlx5_mr_rebuild_cache(&cdev->mr_scache);
/*
* No explicit wmb is needed after updating dev_gen due to
* store-release ordering in unlock that provides the
* implicit barrier at the software visible level.
*/
- ++sh->share_cache.dev_gen;
+ ++cdev->mr_scache.dev_gen;
DRV_LOG(DEBUG, "broadcasting local cache flush, gen=%d",
- sh->share_cache.dev_gen);
- rte_rwlock_write_unlock(&sh->share_cache.rwlock);
+ cdev->mr_scache.dev_gen);
+ rte_rwlock_write_unlock(&cdev->mr_scache.rwlock);
return 0;
}
+++ /dev/null
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright 2018 6WIND S.A.
- * Copyright 2018 Mellanox Technologies, Ltd
- */
-
-#ifndef RTE_PMD_MLX5_MR_H_
-#define RTE_PMD_MLX5_MR_H_
-
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/queue.h>
-
-#include <rte_ethdev.h>
-#include <rte_rwlock.h>
-#include <rte_bitmap.h>
-#include <rte_memory.h>
-
-#include <mlx5_common_mr.h>
-
-/* First entry must be NULL for comparison. */
-#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
-
-void mlx5_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
- size_t len, void *arg);
-
-#endif /* RTE_PMD_MLX5_MR_H_ */
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_mr.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rx.h"
#include "mlx5.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
/* Support tunnel matching. */
#define MLX5_FLOW_TUNNEL 10
+/* First entry must be NULL for comparison. */
+#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
+
struct mlx5_rxq_stats {
#ifdef MLX5_PMD_SOFT_COUNTERS
uint64_t ipackets; /**< Total of successfully received packets. */
*/
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
- return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->share_cache,
+ return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache,
mr_ctrl, mp, addr);
}
rte_errno = ENOMEM;
return -rte_errno;
}
- ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+ ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
priv->sh->cdev->pd, mp, &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST) {
ret = rte_errno;
}
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
if (mlx5_mr_ctrl_init(&tmpl->rxq.mr_ctrl,
- &priv->sh->share_cache.dev_gen, socket)) {
+ &priv->sh->cdev->mr_scache.dev_gen, socket)) {
/* rte_errno is already set. */
goto error;
}
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_mr.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
struct mlx5_priv;
#include <mlx5_prm.h>
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
/* HW checksum offload capabilities of vectorized Tx. */
#define MLX5_VEC_TX_CKSUM_OFFLOAD_CAP \
#include <mlx5_malloc.h>
#include "mlx5.h"
-#include "mlx5_mr.h"
#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_utils.h"
}
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
mp = rxq_ctrl->rxq.rxseg[s].mp;
- ret = mlx5_mr_mempool_register(&priv->sh->share_cache,
+ ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
priv->sh->cdev->pd, mp,
&priv->mp_id);
if (ret < 0 && rte_errno != EEXIST)
#include "mlx5_autoconf.h"
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_mr.h"
#include "mlx5_utils.h"
#include "mlx5_rxtx.h"
#include "mlx5_tx.h"
#include "mlx5.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
/* TX burst subroutines return codes. */
enum mlx5_txcmp_code {
return NULL;
}
if (mlx5_mr_ctrl_init(&tmpl->txq.mr_ctrl,
- &priv->sh->share_cache.dev_gen, socket)) {
+ &priv->sh->cdev->mr_scache.dev_gen, socket)) {
/* rte_errno is already set. */
goto error;
}
#include "mlx5_rx.h"
#include "mlx5_tx.h"
#include "mlx5_autoconf.h"
-#include "mlx5_mr.h"
#include "mlx5_flow.h"
#include "mlx5_devx.h"
static int
mlx5_init_once(void)
{
- struct mlx5_shared_data *sd;
-
if (mlx5_init_shared_data())
return -rte_errno;
- sd = mlx5_shared_data;
- rte_spinlock_lock(&sd->lock);
- MLX5_ASSERT(sd);
- if (!sd->init_done) {
- LIST_INIT(&sd->mem_event_cb_list);
- rte_rwlock_init(&sd->mem_event_rwlock);
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_mr_mem_event_cb, NULL);
- sd->init_done = true;
- }
- rte_spinlock_unlock(&sd->lock);
return 0;
}
int mlx5_regex_logtype;
-TAILQ_HEAD(regex_mem_event, mlx5_regex_priv) mlx5_mem_event_list =
- TAILQ_HEAD_INITIALIZER(mlx5_mem_event_list);
-static pthread_mutex_t mem_event_list_lock = PTHREAD_MUTEX_INITIALIZER;
-
const struct rte_regexdev_ops mlx5_regexdev_ops = {
.dev_info_get = mlx5_regex_info_get,
.dev_configure = mlx5_regex_configure,
sprintf(name, "mlx5_regex_%s", dev->name);
}
-/**
- * Callback for memory event.
- *
- * @param event_type
- * Memory event type.
- * @param addr
- * Address of memory.
- * @param len
- * Size of memory.
- */
-static void
-mlx5_regex_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
- size_t len, void *arg __rte_unused)
-{
- struct mlx5_regex_priv *priv;
-
- /* Must be called from the primary process. */
- MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
- switch (event_type) {
- case RTE_MEM_EVENT_FREE:
- pthread_mutex_lock(&mem_event_list_lock);
- /* Iterate all the existing mlx5 devices. */
- TAILQ_FOREACH(priv, &mlx5_mem_event_list, mem_event_cb)
- mlx5_free_mr_by_addr(&priv->mr_scache,
- mlx5_os_get_ctx_device_name
- (priv->cdev->ctx),
- addr, len);
- pthread_mutex_unlock(&mem_event_list_lock);
- break;
- case RTE_MEM_EVENT_ALLOC:
- default:
- break;
- }
-}
-
static int
mlx5_regex_dev_probe(struct mlx5_common_device *cdev)
{
priv->regexdev->device = cdev->dev;
priv->regexdev->data->dev_private = priv;
priv->regexdev->state = RTE_REGEXDEV_READY;
- ret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id());
- if (ret) {
- DRV_LOG(ERR, "MR init tree failed.");
- rte_errno = ENOMEM;
- goto error;
- }
- /* Register callback function for global shared MR cache management. */
- if (TAILQ_EMPTY(&mlx5_mem_event_list))
- rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
- mlx5_regex_mr_mem_event_cb,
- NULL);
- /* Add device to memory callback list. */
- pthread_mutex_lock(&mem_event_list_lock);
- TAILQ_INSERT_TAIL(&mlx5_mem_event_list, priv, mem_event_cb);
- pthread_mutex_unlock(&mem_event_list_lock);
DRV_LOG(INFO, "RegEx GGA is %s.",
priv->has_umr ? "supported" : "unsupported");
return 0;
return 0;
priv = dev->data->dev_private;
if (priv) {
- /* Remove from memory callback device list. */
- pthread_mutex_lock(&mem_event_list_lock);
- TAILQ_REMOVE(&mlx5_mem_event_list, priv, mem_event_cb);
- pthread_mutex_unlock(&mem_event_list_lock);
- if (TAILQ_EMPTY(&mlx5_mem_event_list))
- rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
- NULL);
- if (priv->mr_scache.cache.table)
- mlx5_mr_release_cache(&priv->mr_scache);
if (priv->uar)
mlx5_glue->devx_free_uar(priv->uar);
if (priv->regexdev)
MLX5_RXP_EM_COUNT];
uint32_t nb_engines; /* Number of RegEx engines. */
struct mlx5dv_devx_uar *uar; /* UAR object. */
- TAILQ_ENTRY(mlx5_regex_priv) mem_event_cb;
- /**< Called by memory event callback. */
- struct mlx5_mr_share_cache mr_scache; /* Global shared MR cache. */
uint8_t is_bf2; /* The device is BF2 device. */
uint8_t has_umr; /* The device supports UMR. */
uint32_t mmo_regex_qp_cap:1;
nb_sq_config++;
}
- ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->mr_scache.dev_gen,
+ ret = mlx5_mr_ctrl_init(&qp->mr_ctrl, &priv->cdev->mr_scache.dev_gen,
rte_socket_id());
if (ret) {
DRV_LOG(ERR, "Error setting up mr btree");
mlx5_regex_mb2mr(struct mlx5_regex_priv *priv, struct mlx5_mr_ctrl *mr_ctrl,
struct rte_mbuf *mbuf)
{
- return mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf, &priv->mr_scache);
+ return mlx5_mr_mb2mr(priv->cdev, 0, mr_ctrl, mbuf);
}
static inline void