* Pointer to MR to free.
*/
static void
-mr_free(struct mlx5_mr *mr)
+mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb)
{
if (mr == NULL)
return;
DRV_LOG(DEBUG, "freeing MR(%p):", (void *)mr);
- mlx5_common_verbs_dereg_mr(&mr->pmd_mr);
+ dereg_mr_cb(&mr->pmd_mr);
if (mr->ms_bmp != NULL)
rte_bitmap_free(mr->ms_bmp);
rte_free(mr);
struct mlx5_mr *mr = mr_next;
mr_next = LIST_NEXT(mr, mr);
- mr_free(mr);
+ mr_free(mr, share_cache->dereg_mr_cb);
}
}
data.start = RTE_ALIGN_FLOOR(addr, msl->page_sz);
data.end = data.start + msl->page_sz;
rte_mcfg_mem_read_unlock();
- mr_free(mr);
+ mr_free(mr, share_cache->dereg_mr_cb);
goto alloc_resources;
}
MLX5_ASSERT(data.msl == data_re.msl);
* Must be unlocked before calling rte_free() because
* mlx5_mr_mem_event_free_cb() can be called inside.
*/
- mr_free(mr);
+ mr_free(mr, share_cache->dereg_mr_cb);
return entry->lkey;
}
/*
mr->ms_bmp_n = len / msl->page_sz;
MLX5_ASSERT(ms_idx_shift + mr->ms_bmp_n <= ms_n);
/*
- * Finally create a verbs MR for the memory chunk. ibv_reg_mr() can be
- * called with holding the memory lock because it doesn't use
+ * Finally create an MR for the memory chunk. Verbs: ibv_reg_mr() can
+ * be called with holding the memory lock because it doesn't use
* mlx5_alloc_buf_extern() which eventually calls rte_malloc_socket()
* through mlx5_alloc_verbs_buf().
*/
- mlx5_common_verbs_reg_mr(pd, (void *)data.start, len, &mr->pmd_mr);
+ share_cache->reg_mr_cb(pd, (void *)data.start, len, &mr->pmd_mr);
if (mr->pmd_mr.obj == NULL) {
DEBUG("Fail to create an MR for address (%p)",
(void *)addr);
* calling rte_free() because mlx5_mr_mem_event_free_cb() can be called
* inside.
*/
- mr_free(mr);
+ mr_free(mr, share_cache->dereg_mr_cb);
return UINT32_MAX;
}
* Pointer to MR structure on success, NULL otherwise.
*/
struct mlx5_mr *
-mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id)
+mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
+ mlx5_reg_mr_t reg_mr_cb)
{
struct mlx5_mr *mr = NULL;
RTE_CACHE_LINE_SIZE, socket_id);
if (mr == NULL)
return NULL;
- mlx5_common_verbs_reg_mr(pd, (void *)addr, len, &mr->pmd_mr);
+ reg_mr_cb(pd, (void *)addr, len, &mr->pmd_mr);
if (mr->pmd_mr.obj == NULL) {
DRV_LOG(WARNING,
"Fail to create MR for address (%p)",
size_t len;
void *obj; /* verbs mr object or devx umem object. */
};
+
+/**
+ * mr operations typedef
+ */
+typedef int (*mlx5_reg_mr_t)(void *pd, void *addr, size_t length,
+ struct mlx5_pmd_mr *pmd_mr);
+typedef void (*mlx5_dereg_mr_t)(struct mlx5_pmd_mr *pmd_mr);
+
/* Memory Region object. */
struct mlx5_mr {
LIST_ENTRY(mlx5_mr) mr; /**< Pointer to the prev/next entry. */
struct mlx5_mr_btree cache; /* Global MR cache table. */
struct mlx5_mr_list mr_list; /* Registered MR list. */
struct mlx5_mr_list mr_free_list; /* Freed MR list. */
+ mlx5_reg_mr_t reg_mr_cb; /* Callback to reg_mr func */
+ mlx5_dereg_mr_t dereg_mr_cb; /* Callback to dereg_mr func */
} __rte_packed;
/**
struct mr_cache_entry *entry, uintptr_t addr);
__rte_internal
struct mlx5_mr *
-mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len,
- int socket_id);
+mlx5_create_mr_ext(void *pd, uintptr_t addr, size_t len, int socket_id,
+ mlx5_reg_mr_t reg_mr_cb);
__rte_internal
uint32_t
mlx5_mr_create_primary(void *pd,
#include <mlx5_devx_cmds.h>
#include <mlx5_common.h>
#include <mlx5_common_mp.h>
+#include <mlx5_common_mr.h>
#include "mlx5_defs.h"
#include "mlx5.h"
rte_free(strings);
}
+/**
+ * Set the reg_mr and dereg_mr call backs
+ *
+ * @param reg_mr_cb[out]
+ * Pointer to reg_mr func
+ * @param dereg_mr_cb[out]
+ * Pointer to dereg_mr func
+ *
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
+ mlx5_dereg_mr_t *dereg_mr_cb)
+{
+ *reg_mr_cb = mlx5_common_verbs_reg_mr;
+ *dereg_mr_cb = mlx5_common_verbs_dereg_mr;
+}
+
const struct eth_dev_ops mlx5_os_dev_ops = {
.dev_configure = mlx5_dev_configure,
.dev_start = mlx5_dev_start,
err = rte_errno;
goto error;
}
+ mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
+ &sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
mlx5_flow_aging_init(sh);
mlx5_flow_counters_mng_init(sh);
int mlx5_os_read_dev_counters(struct rte_eth_dev *dev, uint64_t *stats);
int mlx5_os_get_stats_n(struct rte_eth_dev *dev);
void mlx5_os_stats_init(struct rte_eth_dev *dev);
+void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
+ mlx5_dereg_mr_t *dereg_mr_cb);
#endif /* RTE_PMD_MLX5_H_ */
return;
DRV_LOG(DEBUG, "port %u register MR for chunk #%d of mempool (%s)",
dev->data->port_id, mem_idx, mp->name);
- mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id);
+ mr = mlx5_create_mr_ext(sh->pd, addr, len, mp->socket_id,
+ sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to allocate a new MR of"
}
priv = dev->data->dev_private;
sh = priv->sh;
- mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY);
+ mr = mlx5_create_mr_ext(sh->pd, (uintptr_t)addr, len, SOCKET_ID_ANY,
+ sh->share_cache.reg_mr_cb);
if (!mr) {
DRV_LOG(WARNING,
"port %u unable to dma map", dev->data->port_id);