X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Fmlx5%2Fmlx5_crypto.c;h=3e4fd9b98d2e3361f50ea6c18868e6a80afd7ea0;hb=334ed198ab4d22f249d7b8274568253df33914a6;hp=71c7e133ad2d81ff97545d81abd7137a8a5dd8a3;hpb=427ec3346d2902ae8f3c5ad5f39c10fe07a66ffb;p=dpdk.git diff --git a/drivers/crypto/mlx5/mlx5_crypto.c b/drivers/crypto/mlx5/mlx5_crypto.c index 71c7e133ad..3e4fd9b98d 100644 --- a/drivers/crypto/mlx5/mlx5_crypto.c +++ b/drivers/crypto/mlx5/mlx5_crypto.c @@ -4,6 +4,7 @@ #include #include +#include #include #include #include @@ -33,7 +34,7 @@ TAILQ_HEAD(mlx5_crypto_privs, mlx5_crypto_priv) mlx5_crypto_priv_list = TAILQ_HEAD_INITIALIZER(mlx5_crypto_priv_list); -static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t priv_list_lock; int mlx5_crypto_logtype; @@ -59,7 +60,8 @@ const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = { }, .dataunit_set = RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES | - RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES, + RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES | + RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES, }, } }, } }, @@ -142,8 +144,9 @@ mlx5_crypto_dev_stop(struct rte_cryptodev *dev) static int mlx5_crypto_dev_start(struct rte_cryptodev *dev) { - RTE_SET_USED(dev); - return 0; + struct mlx5_crypto_priv *priv = dev->data->dev_private; + + return mlx5_dev_mempool_subscribe(priv->cdev); } static int @@ -220,6 +223,11 @@ mlx5_crypto_sym_session_configure(struct rte_cryptodev *dev, ((uint32_t)MLX5_BLOCK_SIZE_4096B << MLX5_BLOCK_SIZE_OFFSET); break; + case 1048576: + sess_private_data->bsp_res = rte_cpu_to_be_32 + ((uint32_t)MLX5_BLOCK_SIZE_1MB << + MLX5_BLOCK_SIZE_OFFSET); + break; default: DRV_LOG(ERR, "Cipher data unit length is not supported."); return -ENOTSUP; @@ -252,49 +260,35 @@ mlx5_crypto_sym_session_clear(struct rte_cryptodev *dev, DRV_LOG(DEBUG, "Session %p was cleared.", spriv); } -static int -mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id) +static void +mlx5_crypto_indirect_mkeys_release(struct mlx5_crypto_qp *qp, uint16_t n) { - struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id]; + uint16_t i; - if (qp->qp_obj != NULL) - claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj)); - if (qp->umem_obj != NULL) - claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj)); - if (qp->umem_buf != NULL) - rte_free(qp->umem_buf); + for (i = 0; i < n; i++) + if (qp->mkey[i]) + claim_zero(mlx5_devx_cmd_destroy(qp->mkey[i])); +} + +static void +mlx5_crypto_qp_release(struct mlx5_crypto_qp *qp) +{ + if (qp == NULL) + return; + mlx5_devx_qp_destroy(&qp->qp_obj); mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh); mlx5_devx_cq_destroy(&qp->cq_obj); rte_free(qp); - dev->data->queue_pairs[qp_id] = NULL; - return 0; } static int -mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp) +mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id) { - /* - * In Order to configure self loopback, when calling these functions the - * remote QP id that is used is the id of the same QP. - */ - if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP, - qp->qp_obj->id)) { - DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).", - rte_errno); - return -1; - } - if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP, - qp->qp_obj->id)) { - DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).", - rte_errno); - return -1; - } - if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP, - qp->qp_obj->id)) { - DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).", - rte_errno); - return -1; - } + struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id]; + + mlx5_crypto_indirect_mkeys_release(qp, qp->entries_n); + mlx5_crypto_qp_release(qp); + dev->data->queue_pairs[qp_id] = NULL; return 0; } @@ -317,44 +311,10 @@ mlx5_crypto_get_block_size(struct rte_crypto_op *op) } } -/** - * Query LKey from a packet buffer for QP. If not found, add the mempool. - * - * @param priv - * Pointer to the priv object. - * @param addr - * Search key. - * @param mr_ctrl - * Pointer to per-queue MR control structure. - * @param ol_flags - * Mbuf offload features. - * - * @return - * Searched LKey on success, UINT32_MAX on no match. - */ -static __rte_always_inline uint32_t -mlx5_crypto_addr2mr(struct mlx5_crypto_priv *priv, uintptr_t addr, - struct mlx5_mr_ctrl *mr_ctrl, uint64_t ol_flags) -{ - uint32_t lkey; - - /* Check generation bit to see if there's any change on existing MRs. */ - if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen)) - mlx5_mr_flush_local_cache(mr_ctrl); - /* Linear search on MR cache array. */ - lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru, - MLX5_MR_CACHE_N, addr); - if (likely(lkey != UINT32_MAX)) - return lkey; - /* Take slower bottom-half on miss. */ - return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr, - !!(ol_flags & EXT_ATTACHED_MBUF)); -} - static __rte_always_inline uint32_t -mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp, - struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm, - uint32_t offset, uint32_t *remain) +mlx5_crypto_klm_set(struct mlx5_crypto_qp *qp, struct rte_mbuf *mbuf, + struct mlx5_wqe_dseg *klm, uint32_t offset, + uint32_t *remain) { uint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset); uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset); @@ -364,23 +324,21 @@ mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp, *remain -= data_len; klm->bcount = rte_cpu_to_be_32(data_len); klm->pbuf = rte_cpu_to_be_64(addr); - klm->lkey = mlx5_crypto_addr2mr(priv, addr, &qp->mr_ctrl, - mbuf->ol_flags); + klm->lkey = mlx5_mr_mb2mr(&qp->mr_ctrl, mbuf, 0); return klm->lkey; } static __rte_always_inline uint32_t -mlx5_crypto_klms_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp, - struct rte_crypto_op *op, struct rte_mbuf *mbuf, - struct mlx5_wqe_dseg *klm) +mlx5_crypto_klms_set(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op, + struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm) { uint32_t remain_len = op->sym->cipher.data.length; uint32_t nb_segs = mbuf->nb_segs; uint32_t klm_n = 1u; /* First mbuf needs to take the cipher offset. */ - if (unlikely(mlx5_crypto_klm_set(priv, qp, mbuf, klm, + if (unlikely(mlx5_crypto_klm_set(qp, mbuf, klm, op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; return 0; @@ -392,7 +350,7 @@ mlx5_crypto_klms_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp, op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; return 0; } - if (unlikely(mlx5_crypto_klm_set(priv, qp, mbuf, ++klm, 0, + if (unlikely(mlx5_crypto_klm_set(qp, mbuf, ++klm, 0, &remain_len) == UINT32_MAX)) { op->status = RTE_CRYPTO_OP_STATUS_ERROR; return 0; @@ -418,7 +376,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv, uint32_t ds; bool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src; /* Set UMR WQE. */ - uint32_t klm_n = mlx5_crypto_klms_set(priv, qp, op, + uint32_t klm_n = mlx5_crypto_klms_set(qp, op, ipl ? op->sym->m_src : op->sym->m_dst, klms); if (unlikely(klm_n == 0)) @@ -444,15 +402,14 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv, cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size); klms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe)); if (!ipl) { - klm_n = mlx5_crypto_klms_set(priv, qp, op, op->sym->m_src, - klms); + klm_n = mlx5_crypto_klms_set(qp, op, op->sym->m_src, klms); if (unlikely(klm_n == 0)) return 0; } else { memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n); } ds = 2 + klm_n; - cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds); + cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds); cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_RDMA_WRITE); ds = RTE_ALIGN(ds, 4); @@ -461,7 +418,7 @@ mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv, if (priv->max_rdmar_ds > ds) { cseg += ds; ds = priv->max_rdmar_ds - ds; - cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds); + cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | ds); cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_NOP); qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */ @@ -505,7 +462,8 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, do { idx = qp->pi & mask; op = *ops++; - umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * idx); + umr = RTE_PTR_ADD(qp->qp_obj.umem_buf, + priv->wqe_set_size * idx); if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) { qp->stats.enqueue_err_count++; if (remain != nb_ops) { @@ -519,7 +477,7 @@ mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, } while (--remain); qp->stats.enqueued_count += nb_ops; rte_io_wmb(); - qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi); + qp->qp_obj.db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi); rte_wmb(); mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv); rte_wmb(); @@ -585,8 +543,8 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp) uint32_t i; for (i = 0 ; i < qp->entries_n; i++) { - struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i * - priv->wqe_set_size); + struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->qp_obj.umem_buf, + i * priv->wqe_set_size); struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *) (cseg + 1); struct mlx5_wqe_umr_bsf_seg *bsf = @@ -595,7 +553,7 @@ mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp) struct mlx5_wqe_rseg *rseg; /* Init UMR WQE. */ - cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | + cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj.qp->id << 8) | (priv->umr_wqe_size / MLX5_WSEG_SIZE)); cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR << MLX5_COMP_MODE_OFFSET); @@ -623,23 +581,25 @@ mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv, struct mlx5_umr_wqe *umr; uint32_t i; struct mlx5_devx_mkey_attr attr = { - .pd = priv->pdn, + .pd = priv->cdev->pdn, .umr_en = 1, .crypto_en = 1, .set_remote_rw = 1, .klm_num = RTE_ALIGN(priv->max_segs_num, 4), }; - for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0; + for (umr = (struct mlx5_umr_wqe *)qp->qp_obj.umem_buf, i = 0; i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) { attr.klm_array = (struct mlx5_klm *)&umr->kseg[0]; - qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr); - if (!qp->mkey[i]) { - DRV_LOG(ERR, "Failed to allocate indirect mkey."); - return -1; - } + qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->cdev->ctx, &attr); + if (!qp->mkey[i]) + goto error; } return 0; +error: + DRV_LOG(ERR, "Failed to allocate indirect mkey."); + mlx5_crypto_indirect_mkeys_release(qp, i); + return -1; } static int @@ -651,9 +611,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, struct mlx5_devx_qp_attr attr = {0}; struct mlx5_crypto_qp *qp; uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors); - uint32_t umem_size = RTE_BIT32(log_nb_desc) * - priv->wqe_set_size + - sizeof(*qp->db_rec) * 2; + uint32_t ret; uint32_t alloc_size = sizeof(*qp); struct mlx5_devx_cq_attr cq_attr = { .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar), @@ -672,52 +630,36 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, rte_errno = ENOMEM; return -rte_errno; } - if (mlx5_devx_cq_create(priv->ctx, &qp->cq_obj, log_nb_desc, + if (mlx5_devx_cq_create(priv->cdev->ctx, &qp->cq_obj, log_nb_desc, &cq_attr, socket_id) != 0) { DRV_LOG(ERR, "Failed to create CQ."); goto error; } - qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id); - if (qp->umem_buf == NULL) { - DRV_LOG(ERR, "Failed to allocate QP umem."); - rte_errno = ENOMEM; - goto error; - } - qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx, - (void *)(uintptr_t)qp->umem_buf, - umem_size, - IBV_ACCESS_LOCAL_WRITE); - if (qp->umem_obj == NULL) { - DRV_LOG(ERR, "Failed to register QP umem."); + attr.pd = priv->cdev->pdn; + attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar); + attr.cqn = qp->cq_obj.cq->id; + attr.rq_size = 0; + attr.sq_size = RTE_BIT32(log_nb_desc); + attr.ts_format = + mlx5_ts_format_conv(priv->cdev->config.hca_attr.qp_ts_format); + ret = mlx5_devx_qp_create(priv->cdev->ctx, &qp->qp_obj, log_nb_desc, + &attr, socket_id); + if (ret) { + DRV_LOG(ERR, "Failed to create QP."); goto error; } - if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N, - priv->dev_config.socket_id) != 0) { + if (mlx5_mr_ctrl_init(&qp->mr_ctrl, priv->cdev, + priv->dev_config.socket_id) != 0) { DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.", (uint32_t)qp_id); rte_errno = ENOMEM; goto error; } - qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen; - attr.pd = priv->pdn; - attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar); - attr.cqn = qp->cq_obj.cq->id; - attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE)); - attr.rq_size = 0; - attr.sq_size = RTE_BIT32(log_nb_desc); - attr.dbr_umem_valid = 1; - attr.wq_umem_id = qp->umem_obj->umem_id; - attr.wq_umem_offset = 0; - attr.dbr_umem_id = qp->umem_obj->umem_id; - attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format); - attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size; - qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr); - if (qp->qp_obj == NULL) { - DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno); - goto error; - } - qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address); - if (mlx5_crypto_qp2rts(qp)) + /* + * In Order to configure self loopback, when calling devx qp2rts the + * remote QP id that is used is the id of the same QP. + */ + if (mlx5_devx_qp2rts(&qp->qp_obj, qp->qp_obj.qp->id)) goto error; qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1), RTE_CACHE_LINE_SIZE); @@ -733,7 +675,7 @@ mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id, dev->data->queue_pairs[qp_id] = qp; return 0; error: - mlx5_crypto_queue_pair_release(dev, qp_id); + mlx5_crypto_qp_release(qp); return -1; } @@ -783,12 +725,8 @@ static struct rte_cryptodev_ops mlx5_crypto_ops = { }; static void -mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv) +mlx5_crypto_uar_release(struct mlx5_crypto_priv *priv) { - if (priv->pd != NULL) { - claim_zero(mlx5_glue->dealloc_pd(priv->pd)); - priv->pd = NULL; - } if (priv->uar != NULL) { mlx5_glue->devx_free_uar(priv->uar); priv->uar = NULL; @@ -796,47 +734,13 @@ mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv) } static int -mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv) -{ -#ifdef HAVE_IBV_FLOW_DV_SUPPORT - struct mlx5dv_obj obj; - struct mlx5dv_pd pd_info; - int ret; - - priv->pd = mlx5_glue->alloc_pd(priv->ctx); - if (priv->pd == NULL) { - DRV_LOG(ERR, "Failed to allocate PD."); - return errno ? -errno : -ENOMEM; - } - obj.pd.in = priv->pd; - obj.pd.out = &pd_info; - ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD); - if (ret != 0) { - DRV_LOG(ERR, "Fail to get PD object info."); - mlx5_glue->dealloc_pd(priv->pd); - priv->pd = NULL; - return -errno; - } - priv->pdn = pd_info.pdn; - return 0; -#else - (void)priv; - DRV_LOG(ERR, "Cannot get pdn - no DV support."); - return -ENOTSUP; -#endif /* HAVE_IBV_FLOW_DV_SUPPORT */ -} - -static int -mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv) +mlx5_crypto_uar_prepare(struct mlx5_crypto_priv *priv) { - if (mlx5_crypto_pd_create(priv) != 0) - return -1; - priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1); + priv->uar = mlx5_devx_alloc_uar(priv->cdev->ctx, -1); if (priv->uar) priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar); if (priv->uar == NULL || priv->uar_addr == NULL) { rte_errno = errno; - claim_zero(mlx5_glue->dealloc_pd(priv->pd)); DRV_LOG(ERR, "Failed to allocate UAR."); return -1; } @@ -945,57 +849,21 @@ mlx5_crypto_parse_devargs(struct rte_devargs *devargs, return 0; } -/** - * Callback for memory event. - * - * @param event_type - * Memory event type. - * @param addr - * Address of memory. - * @param len - * Size of memory. - */ -static void -mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr, - size_t len, void *arg __rte_unused) -{ - struct mlx5_crypto_priv *priv; - - /* Must be called from the primary process. */ - MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY); - switch (event_type) { - case RTE_MEM_EVENT_FREE: - pthread_mutex_lock(&priv_list_lock); - /* Iterate all the existing mlx5 devices. */ - TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next) - mlx5_free_mr_by_addr(&priv->mr_scache, - priv->ctx->device->name, - addr, len); - pthread_mutex_unlock(&priv_list_lock); - break; - case RTE_MEM_EVENT_ALLOC: - default: - break; - } -} - static int -mlx5_crypto_dev_probe(struct rte_device *dev) +mlx5_crypto_dev_probe(struct mlx5_common_device *cdev) { - struct ibv_device *ibv; struct rte_cryptodev *crypto_dev; - struct ibv_context *ctx; struct mlx5_devx_obj *login; struct mlx5_crypto_priv *priv; struct mlx5_crypto_devarg_params devarg_prms = { 0 }; - struct mlx5_hca_attr attr = { 0 }; struct rte_cryptodev_pmd_init_params init_params = { .name = "", .private_data_size = sizeof(struct mlx5_crypto_priv), - .socket_id = dev->numa_node, + .socket_id = cdev->dev->numa_node, .max_nb_queue_pairs = RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS, }; + const char *ibdev_name = mlx5_os_get_ctx_device_name(cdev->ctx); uint16_t rdmw_wqe_size; int ret; @@ -1004,75 +872,50 @@ mlx5_crypto_dev_probe(struct rte_device *dev) rte_errno = ENOTSUP; return -rte_errno; } - ibv = mlx5_os_get_ibv_dev(dev); - if (ibv == NULL) - return -rte_errno; - ctx = mlx5_glue->dv_open_device(ibv); - if (ctx == NULL) { - DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name); - rte_errno = ENODEV; - return -rte_errno; - } - if (mlx5_devx_cmd_query_hca_attr(ctx, &attr) != 0 || - attr.crypto == 0 || attr.aes_xts == 0) { + if (!cdev->config.hca_attr.crypto || !cdev->config.hca_attr.aes_xts) { DRV_LOG(ERR, "Not enough capabilities to support crypto " "operations, maybe old FW/OFED version?"); - claim_zero(mlx5_glue->close_device(ctx)); rte_errno = ENOTSUP; return -ENOTSUP; } - ret = mlx5_crypto_parse_devargs(dev->devargs, &devarg_prms); + ret = mlx5_crypto_parse_devargs(cdev->dev->devargs, &devarg_prms); if (ret) { DRV_LOG(ERR, "Failed to parse devargs."); - claim_zero(mlx5_glue->close_device(ctx)); return -rte_errno; } - login = mlx5_devx_cmd_create_crypto_login_obj(ctx, + login = mlx5_devx_cmd_create_crypto_login_obj(cdev->ctx, &devarg_prms.login_attr); if (login == NULL) { DRV_LOG(ERR, "Failed to configure login."); - claim_zero(mlx5_glue->close_device(ctx)); return -rte_errno; } - crypto_dev = rte_cryptodev_pmd_create(ibv->name, dev, - &init_params); + crypto_dev = rte_cryptodev_pmd_create(ibdev_name, cdev->dev, + &init_params); if (crypto_dev == NULL) { - DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name); - claim_zero(mlx5_glue->close_device(ctx)); + DRV_LOG(ERR, "Failed to create device \"%s\".", ibdev_name); return -ENODEV; } DRV_LOG(INFO, - "Crypto device %s was created successfully.", ibv->name); + "Crypto device %s was created successfully.", ibdev_name); crypto_dev->dev_ops = &mlx5_crypto_ops; crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst; crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst; crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS; crypto_dev->driver_id = mlx5_crypto_driver_id; priv = crypto_dev->data->dev_private; - priv->ctx = ctx; + priv->cdev = cdev; priv->login_obj = login; priv->crypto_dev = crypto_dev; - priv->qp_ts_format = attr.qp_ts_format; - if (mlx5_crypto_hw_global_prepare(priv) != 0) { + if (mlx5_crypto_uar_prepare(priv) != 0) { rte_cryptodev_pmd_destroy(priv->crypto_dev); - claim_zero(mlx5_glue->close_device(priv->ctx)); return -1; } - if (mlx5_mr_btree_init(&priv->mr_scache.cache, - MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) { - DRV_LOG(ERR, "Failed to allocate shared cache MR memory."); - mlx5_crypto_hw_global_release(priv); - rte_cryptodev_pmd_destroy(priv->crypto_dev); - claim_zero(mlx5_glue->close_device(priv->ctx)); - rte_errno = ENOMEM; - return -rte_errno; - } - priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr; - priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr; priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag); priv->max_segs_num = devarg_prms.max_segs_num; priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) + - sizeof(struct mlx5_umr_wqe) + + sizeof(struct mlx5_wqe_cseg) + + sizeof(struct mlx5_wqe_umr_cseg) + + sizeof(struct mlx5_wqe_mkey_cseg) + RTE_ALIGN(priv->max_segs_num, 4) * sizeof(struct mlx5_wqe_dseg); rdmw_wqe_size = sizeof(struct mlx5_rdma_write_wqe) + @@ -1082,38 +925,31 @@ mlx5_crypto_dev_probe(struct rte_device *dev) priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size; priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB; priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg); - /* Register callback function for global shared MR cache management. */ - if (TAILQ_EMPTY(&mlx5_crypto_priv_list)) - rte_mem_event_callback_register("MLX5_MEM_EVENT_CB", - mlx5_crypto_mr_mem_event_cb, - NULL); pthread_mutex_lock(&priv_list_lock); TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); + + rte_cryptodev_pmd_probing_finish(crypto_dev); + return 0; } static int -mlx5_crypto_dev_remove(struct rte_device *dev) +mlx5_crypto_dev_remove(struct mlx5_common_device *cdev) { struct mlx5_crypto_priv *priv = NULL; pthread_mutex_lock(&priv_list_lock); TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next) - if (priv->crypto_dev->device == dev) + if (priv->crypto_dev->device == cdev->dev) break; if (priv) TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next); pthread_mutex_unlock(&priv_list_lock); if (priv) { - if (TAILQ_EMPTY(&mlx5_crypto_priv_list)) - rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB", - NULL); - mlx5_mr_release_cache(&priv->mr_scache); - mlx5_crypto_hw_global_release(priv); + mlx5_crypto_uar_release(priv); rte_cryptodev_pmd_destroy(priv->crypto_dev); claim_zero(mlx5_devx_cmd_destroy(priv->login_obj)); - claim_zero(mlx5_glue->close_device(priv->ctx)); } return 0; } @@ -1138,6 +974,7 @@ static struct mlx5_class_driver mlx5_crypto_driver = { RTE_INIT(rte_mlx5_crypto_init) { + pthread_mutex_init(&priv_list_lock, NULL); mlx5_common_init(); if (mlx5_glue != NULL) mlx5_class_driver_register(&mlx5_crypto_driver);