Add function for global shared MR cache structure initialization.
This function include:
- btree initialization.
- set callbacks for reg and dereg MR.
Signed-off-by: Michael Baum <michaelba@nvidia.com>
Acked-by: Matan Azrad <matan@nvidia.com>
memset(pmd_mr, 0, sizeof(*pmd_mr));
}
}
+
+/**
+ * Set the reg_mr and dereg_mr callbacks.
+ *
+ * @param[out] reg_mr_cb
+ * Pointer to reg_mr func
+ * @param[out] dereg_mr_cb
+ * Pointer to dereg_mr func
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb)
+{
+ *reg_mr_cb = mlx5_common_verbs_reg_mr;
+ *dereg_mr_cb = mlx5_common_verbs_dereg_mr;
+}
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-int
+static int
mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket)
{
if (bt == NULL) {
mlx5_mr_garbage_collect(share_cache);
}
+/**
+ * Initialize global MR cache of a device.
+ *
+ * @param share_cache
+ * Pointer to a global shared MR cache.
+ * @param socket
+ * NUMA socket on which memory must be allocated.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket)
+{
+ /* Set the reg_mr and dereg_mr callback functions */
+ mlx5_os_set_reg_mr_cb(&share_cache->reg_mr_cb,
+ &share_cache->dereg_mr_cb);
+ rte_rwlock_init(&share_cache->rwlock);
+ /* Initialize B-tree and allocate memory for global MR cache table. */
+ return mlx5_mr_btree_init(&share_cache->cache,
+ MLX5_MR_BTREE_CACHE_N * 2, socket);
+}
+
/**
* Flush all of the local cache entries.
*
int mlx5_mr_ctrl_init(struct mlx5_mr_ctrl *mr_ctrl, uint32_t *dev_gen_ptr,
int socket);
__rte_internal
-int mlx5_mr_btree_init(struct mlx5_mr_btree *bt, int n, int socket);
-__rte_internal
void mlx5_mr_btree_free(struct mlx5_mr_btree *bt);
__rte_internal
void mlx5_mr_btree_dump(struct mlx5_mr_btree *bt __rte_unused);
__rte_internal
void mlx5_mr_release_cache(struct mlx5_mr_share_cache *mr_cache);
__rte_internal
+int mlx5_mr_create_cache(struct mlx5_mr_share_cache *share_cache, int socket);
+__rte_internal
void mlx5_mr_dump_cache(struct mlx5_mr_share_cache *share_cache __rte_unused);
__rte_internal
void mlx5_mr_rebuild_cache(struct mlx5_mr_share_cache *share_cache);
void
mlx5_common_verbs_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
+__rte_internal
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb);
+
__rte_internal
void
mlx5_mr_free(struct mlx5_mr *mr, mlx5_dereg_mr_t dereg_mr_cb);
mlx5_mr_addr2mr_bh;
mlx5_mr_btree_dump;
mlx5_mr_btree_free;
- mlx5_mr_btree_init;
+ mlx5_mr_create_cache;
mlx5_mr_create_primary;
mlx5_mr_ctrl_init;
mlx5_mr_dump_cache;
mlx5_nl_vlan_vmwa_create; # WINDOWS_NO_EXPORT
mlx5_nl_vlan_vmwa_delete; # WINDOWS_NO_EXPORT
- mlx5_os_dereg_mr;
- mlx5_os_reg_mr;
mlx5_os_umem_dereg;
mlx5_os_umem_reg;
+ mlx5_os_set_reg_mr_cb;
mlx5_realloc;
* @return
* 0 on successful registration, -1 otherwise
*/
-int
+static int
mlx5_os_reg_mr(void *pd,
void *addr, size_t length, struct mlx5_pmd_mr *pmd_mr)
{
* @param[in] pmd_mr
* Pointer to PMD mr object
*/
-void
+static void
mlx5_os_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
{
if (pmd_mr && pmd_mr->mkey)
claim_zero(mlx5_os_umem_dereg(pmd_mr->obj));
memset(pmd_mr, 0, sizeof(*pmd_mr));
}
+
+/**
+ * Set the reg_mr and dereg_mr callbacks.
+ *
+ * @param[out] reg_mr_cb
+ * Pointer to reg_mr func
+ * @param[out] dereg_mr_cb
+ * Pointer to dereg_mr func
+ *
+ */
+void
+mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb, mlx5_dereg_mr_t *dereg_mr_cb)
+{
+ *reg_mr_cb = mlx5_os_reg_mr;
+ *dereg_mr_cb = mlx5_os_dereg_mr;
+}
void *mlx5_os_umem_reg(void *ctx, void *addr, size_t size, uint32_t access);
__rte_internal
int mlx5_os_umem_dereg(void *pumem);
-__rte_internal
-int mlx5_os_reg_mr(void *pd,
- void *addr, size_t length, struct mlx5_pmd_mr *pmd_mr);
-__rte_internal
-void mlx5_os_dereg_mr(struct mlx5_pmd_mr *pmd_mr);
+
#endif /* RTE_PMD_MLX5_COMMON_OS_H_ */
rte_compressdev_pmd_destroy(priv->compressdev);
return -1;
}
- if (mlx5_mr_btree_init(&priv->mr_scache.cache,
- MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
+ if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
mlx5_compress_uar_release(priv);
rte_compressdev_pmd_destroy(priv->compressdev);
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
- priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
/* Register callback function for global shared MR cache management. */
if (TAILQ_EMPTY(&mlx5_compress_priv_list))
rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
rte_cryptodev_pmd_destroy(priv->crypto_dev);
return -1;
}
- if (mlx5_mr_btree_init(&priv->mr_scache.cache,
- MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
+ if (mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id()) != 0) {
DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
mlx5_crypto_uar_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
rte_errno = ENOMEM;
return -rte_errno;
}
- priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
- priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
priv->max_segs_num = devarg_prms.max_segs_num;
priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
return 1;
}
-/**
- * Set the reg_mr and dereg_mr call backs
- *
- * @param reg_mr_cb[out]
- * Pointer to reg_mr func
- * @param dereg_mr_cb[out]
- * Pointer to dereg_mr func
- *
- */
-void
-mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb)
-{
- *reg_mr_cb = mlx5_mr_verbs_ops.reg_mr;
- *dereg_mr_cb = mlx5_mr_verbs_ops.dereg_mr;
-}
-
/**
* Remove a MAC address from device
*
#include <mlx5_utils.h>
#include <mlx5_malloc.h>
-/**
- * Register mr. Given protection domain pointer, pointer to addr and length
- * register the memory region.
- *
- * @param[in] pd
- * Pointer to protection domain context.
- * @param[in] addr
- * Pointer to memory start address.
- * @param[in] length
- * Length of the memory to register.
- * @param[out] pmd_mr
- * pmd_mr struct set with lkey, address, length and pointer to mr object
- *
- * @return
- * 0 on successful registration, -1 otherwise
- */
-static int
-mlx5_reg_mr(void *pd, void *addr, size_t length,
- struct mlx5_pmd_mr *pmd_mr)
-{
- return mlx5_common_verbs_reg_mr(pd, addr, length, pmd_mr);
-}
-
-/**
- * Deregister mr. Given the mlx5 pmd MR - deregister the MR
- *
- * @param[in] pmd_mr
- * pmd_mr struct set with lkey, address, length and pointer to mr object
- *
- */
-static void
-mlx5_dereg_mr(struct mlx5_pmd_mr *pmd_mr)
-{
- mlx5_common_verbs_dereg_mr(pmd_mr);
-}
-
-/* verbs operations. */
-const struct mlx5_mr_ops mlx5_mr_verbs_ops = {
- .reg_mr = mlx5_reg_mr,
- .dereg_mr = mlx5_dereg_mr,
-};
-
/**
* Modify Rx WQ vlan stripping offload
*
int mlx5_rxq_ibv_obj_dummy_lb_create(struct rte_eth_dev *dev);
void mlx5_rxq_ibv_obj_dummy_lb_release(struct rte_eth_dev *dev);
-/* Verbs ops struct */
-extern const struct mlx5_mr_ops mlx5_mr_verbs_ops;
extern struct mlx5_obj_ops ibv_obj_ops;
#endif /* RTE_PMD_MLX5_VERBS_H_ */
* At this point the device is not added to the memory
* event list yet, context is just being created.
*/
- err = mlx5_mr_btree_init(&sh->share_cache.cache,
- MLX5_MR_BTREE_CACHE_N * 2,
- sh->numa_node);
+ err = mlx5_mr_create_cache(&sh->share_cache, sh->numa_node);
if (err) {
err = rte_errno;
goto error;
}
- mlx5_os_set_reg_mr_cb(&sh->share_cache.reg_mr_cb,
- &sh->share_cache.dereg_mr_cb);
mlx5_os_dev_shared_handler_install(sh);
if (LIST_EMPTY(&mlx5_dev_ctx_list)) {
err = mlx5_flow_os_init_workspace_once();
#define MLX5_RSS_HASH_FIELDS_LEN RTE_DIM(mlx5_rss_hash_fields)
-/* MR operations structure. */
-struct mlx5_mr_ops {
- mlx5_reg_mr_t reg_mr;
- mlx5_dereg_mr_t dereg_mr;
-};
-
struct mlx5_priv {
struct rte_eth_dev_data *dev_data; /* Pointer to device data. */
struct mlx5_dev_ctx_shared *sh; /* Shared device context. */
int mlx5_os_net_probe(struct mlx5_common_device *cdev);
void mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh);
void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh);
-void mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb);
void mlx5_os_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index);
int mlx5_os_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac,
uint32_t index);
return 0;
}
-/**
- * Set the reg_mr and dereg_mr call backs
- *
- * @param reg_mr_cb[out]
- * Pointer to reg_mr func
- * @param dereg_mr_cb[out]
- * Pointer to dereg_mr func
- *
- */
-void
-mlx5_os_set_reg_mr_cb(mlx5_reg_mr_t *reg_mr_cb,
- mlx5_dereg_mr_t *dereg_mr_cb)
-{
- *reg_mr_cb = mlx5_os_reg_mr;
- *dereg_mr_cb = mlx5_os_dereg_mr;
-}
-
const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {0};
priv->regexdev->device = cdev->dev;
priv->regexdev->data->dev_private = priv;
priv->regexdev->state = RTE_REGEXDEV_READY;
- priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
- priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
- ret = mlx5_mr_btree_init(&priv->mr_scache.cache,
- MLX5_MR_BTREE_CACHE_N * 2,
- rte_socket_id());
+ ret = mlx5_mr_create_cache(&priv->mr_scache, rte_socket_id());
if (ret) {
DRV_LOG(ERR, "MR init tree failed.");
rte_errno = ENOMEM;