The encryption does not require text to be aligned to the AES block size (128b).
+For security reasons and to increase robustness, this driver only deals with virtual
+memory addresses. The way resources allocations are handled by the kernel,
+combined with hardware specifications that allow handling virtual memory
+addresses directly, ensure that DPDK applications cannot access random
+physical memory (or memory that does not belong to the current process).
+
The PMD uses ``libibverbs`` and ``libmlx5`` to access the device firmware
or to access the hardware components directly.
There are different levels of objects and bypassing abilities.
claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
if (qp->umem_buf != NULL)
rte_free(qp->umem_buf);
+ mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
mlx5_devx_cq_destroy(&qp->cq_obj);
rte_free(qp);
dev->data->queue_pairs[qp_id] = NULL;
DRV_LOG(ERR, "Failed to register QP umem.");
goto error;
}
+ if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
+ priv->dev_config.socket_id) != 0) {
+ DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
+ (uint32_t)qp_id);
+ rte_errno = ENOMEM;
+ goto error;
+ }
+ qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
attr.pd = priv->pdn;
attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
attr.cqn = qp->cq_obj.cq->id;
return 0;
}
+/**
+ * Callback for memory event.
+ *
+ * @param event_type
+ * Memory event type.
+ * @param addr
+ * Address of memory.
+ * @param len
+ * Size of memory.
+ */
+static void
+mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
+ size_t len, void *arg __rte_unused)
+{
+ struct mlx5_crypto_priv *priv;
+
+ /* Must be called from the primary process. */
+ MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
+ switch (event_type) {
+ case RTE_MEM_EVENT_FREE:
+ pthread_mutex_lock(&priv_list_lock);
+ /* Iterate all the existing mlx5 devices. */
+ TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
+ mlx5_free_mr_by_addr(&priv->mr_scache,
+ priv->ctx->device->name,
+ addr, len);
+ pthread_mutex_unlock(&priv_list_lock);
+ break;
+ case RTE_MEM_EVENT_ALLOC:
+ default:
+ break;
+ }
+}
+
/**
* DPDK callback to register a PCI device.
*
claim_zero(mlx5_glue->close_device(priv->ctx));
return -1;
}
+ if (mlx5_mr_btree_init(&priv->mr_scache.cache,
+ MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
+ DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
+ mlx5_crypto_hw_global_release(priv);
+ rte_cryptodev_pmd_destroy(priv->crypto_dev);
+ claim_zero(mlx5_glue->close_device(priv->ctx));
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
+ priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
+ /* Register callback function for global shared MR cache management. */
+ if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
+ rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
+ mlx5_crypto_mr_mem_event_cb,
+ NULL);
pthread_mutex_lock(&priv_list_lock);
TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
pthread_mutex_unlock(&priv_list_lock);
if (priv) {
+ if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
+ rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
+ NULL);
+ mlx5_mr_release_cache(&priv->mr_scache);
mlx5_crypto_hw_global_release(priv);
rte_cryptodev_pmd_destroy(priv->crypto_dev);
claim_zero(mlx5_glue->close_device(priv->ctx));