1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2021 NVIDIA Corporation & Affiliates
5 #include <rte_malloc.h>
6 #include <rte_mempool.h>
9 #include <rte_bus_pci.h>
10 #include <rte_memory.h>
12 #include <mlx5_glue.h>
13 #include <mlx5_common.h>
14 #include <mlx5_devx_cmds.h>
15 #include <mlx5_common_os.h>
17 #include "mlx5_crypto_utils.h"
18 #include "mlx5_crypto.h"
20 #define MLX5_CRYPTO_DRIVER_NAME crypto_mlx5
21 #define MLX5_CRYPTO_LOG_NAME pmd.crypto.mlx5
22 #define MLX5_CRYPTO_MAX_QPS 1024
23 #define MLX5_CRYPTO_MAX_SEGS 56
25 #define MLX5_CRYPTO_FEATURE_FLAGS \
26 (RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \
27 RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | \
28 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | \
29 RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | \
30 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | \
31 RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY | \
32 RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS)
34 TAILQ_HEAD(mlx5_crypto_privs, mlx5_crypto_priv) mlx5_crypto_priv_list =
35 TAILQ_HEAD_INITIALIZER(mlx5_crypto_priv_list);
36 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
38 int mlx5_crypto_logtype;
40 uint8_t mlx5_crypto_driver_id;
42 const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {
44 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46 .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
48 .algo = RTE_CRYPTO_CIPHER_AES_XTS,
61 RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES |
62 RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES,
68 static const char mlx5_crypto_drv_name[] = RTE_STR(MLX5_CRYPTO_DRIVER_NAME);
70 static const struct rte_driver mlx5_drv = {
71 .name = mlx5_crypto_drv_name,
72 .alias = mlx5_crypto_drv_name
75 static struct cryptodev_driver mlx5_cryptodev_driver;
77 struct mlx5_crypto_session {
78 uint32_t bs_bpt_eo_es;
79 /**< bsf_size, bsf_p_type, encryption_order and encryption standard,
80 * saved in big endian format.
83 /**< crypto_block_size_pointer and reserved 24 bits saved in big
86 uint32_t iv_offset:16;
87 /**< Starting point for Initialisation Vector. */
88 struct mlx5_crypto_dek *dek; /**< Pointer to dek struct. */
89 uint32_t dek_id; /**< DEK ID */
93 mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,
94 struct rte_cryptodev_info *dev_info)
97 if (dev_info != NULL) {
98 dev_info->driver_id = mlx5_crypto_driver_id;
99 dev_info->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
100 dev_info->capabilities = mlx5_crypto_caps;
101 dev_info->max_nb_queue_pairs = MLX5_CRYPTO_MAX_QPS;
102 dev_info->min_mbuf_headroom_req = 0;
103 dev_info->min_mbuf_tailroom_req = 0;
104 dev_info->sym.max_nb_sessions = 0;
106 * If 0, the device does not have any limitation in number of
107 * sessions that can be used.
113 mlx5_crypto_dev_configure(struct rte_cryptodev *dev,
114 struct rte_cryptodev_config *config)
116 struct mlx5_crypto_priv *priv = dev->data->dev_private;
118 if (config == NULL) {
119 DRV_LOG(ERR, "Invalid crypto dev configure parameters.");
122 if ((config->ff_disable & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) != 0) {
124 "Disabled symmetric crypto feature is not supported.");
127 if (mlx5_crypto_dek_setup(priv) != 0) {
128 DRV_LOG(ERR, "Dek hash list creation has failed.");
131 priv->dev_config = *config;
132 DRV_LOG(DEBUG, "Device %u was configured.", dev->driver_id);
137 mlx5_crypto_dev_stop(struct rte_cryptodev *dev)
143 mlx5_crypto_dev_start(struct rte_cryptodev *dev)
150 mlx5_crypto_dev_close(struct rte_cryptodev *dev)
152 struct mlx5_crypto_priv *priv = dev->data->dev_private;
154 mlx5_crypto_dek_unset(priv);
155 DRV_LOG(DEBUG, "Device %u was closed.", dev->driver_id);
160 mlx5_crypto_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
162 return sizeof(struct mlx5_crypto_session);
166 mlx5_crypto_sym_session_configure(struct rte_cryptodev *dev,
167 struct rte_crypto_sym_xform *xform,
168 struct rte_cryptodev_sym_session *session,
169 struct rte_mempool *mp)
171 struct mlx5_crypto_priv *priv = dev->data->dev_private;
172 struct mlx5_crypto_session *sess_private_data;
173 struct rte_crypto_cipher_xform *cipher;
174 uint8_t encryption_order;
177 if (unlikely(xform->next != NULL)) {
178 DRV_LOG(ERR, "Xform next is not supported.");
181 if (unlikely((xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) ||
182 (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_XTS))) {
183 DRV_LOG(ERR, "Only AES-XTS algorithm is supported.");
186 ret = rte_mempool_get(mp, (void *)&sess_private_data);
189 "Failed to get session %p private data from mempool.",
193 cipher = &xform->cipher;
194 sess_private_data->dek = mlx5_crypto_dek_prepare(priv, cipher);
195 if (sess_private_data->dek == NULL) {
196 rte_mempool_put(mp, sess_private_data);
197 DRV_LOG(ERR, "Failed to prepare dek.");
200 if (cipher->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
201 encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;
203 encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;
204 sess_private_data->bs_bpt_eo_es = rte_cpu_to_be_32
205 (MLX5_BSF_SIZE_64B << MLX5_BSF_SIZE_OFFSET |
206 MLX5_BSF_P_TYPE_CRYPTO << MLX5_BSF_P_TYPE_OFFSET |
207 encryption_order << MLX5_ENCRYPTION_ORDER_OFFSET |
208 MLX5_ENCRYPTION_STANDARD_AES_XTS);
209 switch (xform->cipher.dataunit_len) {
211 sess_private_data->bsp_res = 0;
214 sess_private_data->bsp_res = rte_cpu_to_be_32
215 ((uint32_t)MLX5_BLOCK_SIZE_512B <<
216 MLX5_BLOCK_SIZE_OFFSET);
219 sess_private_data->bsp_res = rte_cpu_to_be_32
220 ((uint32_t)MLX5_BLOCK_SIZE_4096B <<
221 MLX5_BLOCK_SIZE_OFFSET);
224 DRV_LOG(ERR, "Cipher data unit length is not supported.");
227 sess_private_data->iv_offset = cipher->iv.offset;
228 sess_private_data->dek_id =
229 rte_cpu_to_be_32(sess_private_data->dek->obj->id &
231 set_sym_session_private_data(session, dev->driver_id,
233 DRV_LOG(DEBUG, "Session %p was configured.", sess_private_data);
238 mlx5_crypto_sym_session_clear(struct rte_cryptodev *dev,
239 struct rte_cryptodev_sym_session *sess)
241 struct mlx5_crypto_priv *priv = dev->data->dev_private;
242 struct mlx5_crypto_session *spriv = get_sym_session_private_data(sess,
245 if (unlikely(spriv == NULL)) {
246 DRV_LOG(ERR, "Failed to get session %p private data.", spriv);
249 mlx5_crypto_dek_destroy(priv, spriv->dek);
250 set_sym_session_private_data(sess, dev->driver_id, NULL);
251 rte_mempool_put(rte_mempool_from_obj(spriv), spriv);
252 DRV_LOG(DEBUG, "Session %p was cleared.", spriv);
256 mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
258 struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
260 if (qp->qp_obj != NULL)
261 claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj));
262 if (qp->umem_obj != NULL)
263 claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
264 if (qp->umem_buf != NULL)
265 rte_free(qp->umem_buf);
266 mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
267 mlx5_devx_cq_destroy(&qp->cq_obj);
269 dev->data->queue_pairs[qp_id] = NULL;
274 mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp)
277 * In Order to configure self loopback, when calling these functions the
278 * remote QP id that is used is the id of the same QP.
280 if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP,
282 DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
286 if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP,
288 DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
292 if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP,
294 DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
301 static __rte_noinline uint32_t
302 mlx5_crypto_get_block_size(struct rte_crypto_op *op)
304 uint32_t bl = op->sym->cipher.data.length;
308 return RTE_BE32(MLX5_BLOCK_SIZE_1MB << MLX5_BLOCK_SIZE_OFFSET);
310 return RTE_BE32(MLX5_BLOCK_SIZE_4096B <<
311 MLX5_BLOCK_SIZE_OFFSET);
313 return RTE_BE32(MLX5_BLOCK_SIZE_512B << MLX5_BLOCK_SIZE_OFFSET);
315 DRV_LOG(ERR, "Unknown block size: %u.", bl);
321 * Query LKey from a packet buffer for QP. If not found, add the mempool.
324 * Pointer to the priv object.
328 * Pointer to per-queue MR control structure.
330 * Mbuf offload features.
333 * Searched LKey on success, UINT32_MAX on no match.
335 static __rte_always_inline uint32_t
336 mlx5_crypto_addr2mr(struct mlx5_crypto_priv *priv, uintptr_t addr,
337 struct mlx5_mr_ctrl *mr_ctrl, uint64_t ol_flags)
341 /* Check generation bit to see if there's any change on existing MRs. */
342 if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
343 mlx5_mr_flush_local_cache(mr_ctrl);
344 /* Linear search on MR cache array. */
345 lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
346 MLX5_MR_CACHE_N, addr);
347 if (likely(lkey != UINT32_MAX))
349 /* Take slower bottom-half on miss. */
350 return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
351 !!(ol_flags & EXT_ATTACHED_MBUF));
354 static __rte_always_inline uint32_t
355 mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp,
356 struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm,
357 uint32_t offset, uint32_t *remain)
359 uint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);
360 uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
362 if (data_len > *remain)
365 klm->bcount = rte_cpu_to_be_32(data_len);
366 klm->pbuf = rte_cpu_to_be_64(addr);
367 klm->lkey = mlx5_crypto_addr2mr(priv, addr, &qp->mr_ctrl,
373 static __rte_always_inline uint32_t
374 mlx5_crypto_klms_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp,
375 struct rte_crypto_op *op, struct rte_mbuf *mbuf,
376 struct mlx5_wqe_dseg *klm)
378 uint32_t remain_len = op->sym->cipher.data.length;
379 uint32_t nb_segs = mbuf->nb_segs;
382 /* First mbuf needs to take the cipher offset. */
383 if (unlikely(mlx5_crypto_klm_set(priv, qp, mbuf, klm,
384 op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) {
385 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
391 if (unlikely(mbuf == NULL || nb_segs == 0)) {
392 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
395 if (unlikely(mlx5_crypto_klm_set(priv, qp, mbuf, ++klm, 0,
396 &remain_len) == UINT32_MAX)) {
397 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
405 static __rte_always_inline int
406 mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
407 struct mlx5_crypto_qp *qp,
408 struct rte_crypto_op *op,
409 struct mlx5_umr_wqe *umr)
411 struct mlx5_crypto_session *sess = get_sym_session_private_data
412 (op->sym->session, mlx5_crypto_driver_id);
413 struct mlx5_wqe_cseg *cseg = &umr->ctr;
414 struct mlx5_wqe_mkey_cseg *mkc = &umr->mkc;
415 struct mlx5_wqe_dseg *klms = &umr->kseg[0];
416 struct mlx5_wqe_umr_bsf_seg *bsf = ((struct mlx5_wqe_umr_bsf_seg *)
417 RTE_PTR_ADD(umr, priv->umr_wqe_size)) - 1;
419 bool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src;
421 uint32_t klm_n = mlx5_crypto_klms_set(priv, qp, op,
422 ipl ? op->sym->m_src : op->sym->m_dst, klms);
424 if (unlikely(klm_n == 0))
426 bsf->bs_bpt_eo_es = sess->bs_bpt_eo_es;
427 if (unlikely(!sess->bsp_res)) {
428 bsf->bsp_res = mlx5_crypto_get_block_size(op);
429 if (unlikely(bsf->bsp_res == UINT32_MAX)) {
430 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
434 bsf->bsp_res = sess->bsp_res;
436 bsf->raw_data_size = rte_cpu_to_be_32(op->sym->cipher.data.length);
437 memcpy(bsf->xts_initial_tweak,
438 rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), 16);
439 bsf->res_dp = sess->dek_id;
440 mkc->len = rte_cpu_to_be_64(op->sym->cipher.data.length);
441 cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_UMR);
442 qp->db_pi += priv->umr_wqe_stride;
443 /* Set RDMA_WRITE WQE. */
444 cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
445 klms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe));
447 klm_n = mlx5_crypto_klms_set(priv, qp, op, op->sym->m_src,
449 if (unlikely(klm_n == 0))
452 memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
455 cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
456 cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
457 MLX5_OPCODE_RDMA_WRITE);
458 ds = RTE_ALIGN(ds, 4);
459 qp->db_pi += ds >> 2;
460 /* Set NOP WQE if needed. */
461 if (priv->max_rdmar_ds > ds) {
463 ds = priv->max_rdmar_ds - ds;
464 cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
465 cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
467 qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
469 qp->wqe = (uint8_t *)cseg;
473 static __rte_always_inline void
474 mlx5_crypto_uar_write(uint64_t val, struct mlx5_crypto_priv *priv)
477 *priv->uar_addr = val;
478 #else /* !RTE_ARCH_64 */
479 rte_spinlock_lock(&priv->uar32_sl);
480 *(volatile uint32_t *)priv->uar_addr = val;
482 *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32;
483 rte_spinlock_unlock(&priv->uar32_sl);
488 mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
491 struct mlx5_crypto_qp *qp = queue_pair;
492 struct mlx5_crypto_priv *priv = qp->priv;
493 struct mlx5_umr_wqe *umr;
494 struct rte_crypto_op *op;
495 uint16_t mask = qp->entries_n - 1;
496 uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
503 if (unlikely(remain == 0))
508 umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * idx);
509 if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
510 qp->stats.enqueue_err_count++;
511 if (remain != nb_ops) {
512 qp->stats.enqueued_count -= remain;
520 qp->stats.enqueued_count += nb_ops;
522 qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
524 mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv);
529 static __rte_noinline void
530 mlx5_crypto_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
532 const uint32_t idx = qp->ci & (qp->entries_n - 1);
533 volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
534 &qp->cq_obj.cqes[idx];
536 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
537 qp->stats.dequeue_err_count++;
538 DRV_LOG(ERR, "CQE ERR:%x.\n", rte_be_to_cpu_32(cqe->syndrome));
542 mlx5_crypto_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
545 struct mlx5_crypto_qp *qp = queue_pair;
546 volatile struct mlx5_cqe *restrict cqe;
547 struct rte_crypto_op *restrict op;
548 const unsigned int cq_size = qp->entries_n;
549 const unsigned int mask = cq_size - 1;
551 uint32_t next_idx = qp->ci & mask;
552 const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
556 if (unlikely(max == 0))
560 next_idx = (qp->ci + 1) & mask;
562 cqe = &qp->cq_obj.cqes[idx];
563 ret = check_cqe(cqe, cq_size, qp->ci);
565 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
566 if (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))
567 mlx5_crypto_cqe_err_handle(qp, op);
570 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
574 if (likely(i != 0)) {
576 qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);
577 qp->stats.dequeued_count += i;
583 mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
587 for (i = 0 ; i < qp->entries_n; i++) {
588 struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i *
590 struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
592 struct mlx5_wqe_umr_bsf_seg *bsf =
593 (struct mlx5_wqe_umr_bsf_seg *)(RTE_PTR_ADD(cseg,
594 priv->umr_wqe_size)) - 1;
595 struct mlx5_wqe_rseg *rseg;
598 cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) |
599 (priv->umr_wqe_size / MLX5_WSEG_SIZE));
600 cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
601 MLX5_COMP_MODE_OFFSET);
602 cseg->misc = rte_cpu_to_be_32(qp->mkey[i]->id);
603 ucseg->if_cf_toe_cq_res = RTE_BE32(1u << MLX5_UMRC_IF_OFFSET);
604 ucseg->mkey_mask = RTE_BE64(1u << 0); /* Mkey length bit. */
605 ucseg->ko_to_bs = rte_cpu_to_be_32
606 ((RTE_ALIGN(priv->max_segs_num, 4u) <<
607 MLX5_UMRC_KO_OFFSET) | (4 << MLX5_UMRC_TO_BS_OFFSET));
608 bsf->keytag = priv->keytag;
609 /* Init RDMA WRITE WQE. */
610 cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
611 cseg->flags = RTE_BE32((MLX5_COMP_ALWAYS <<
612 MLX5_COMP_MODE_OFFSET) |
613 MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);
614 rseg = (struct mlx5_wqe_rseg *)(cseg + 1);
615 rseg->rkey = rte_cpu_to_be_32(qp->mkey[i]->id);
620 mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
621 struct mlx5_crypto_qp *qp)
623 struct mlx5_umr_wqe *umr;
625 struct mlx5_devx_mkey_attr attr = {
630 .klm_num = RTE_ALIGN(priv->max_segs_num, 4),
633 for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0;
634 i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
635 attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
636 qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
638 DRV_LOG(ERR, "Failed to allocate indirect mkey.");
646 mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
647 const struct rte_cryptodev_qp_conf *qp_conf,
650 struct mlx5_crypto_priv *priv = dev->data->dev_private;
651 struct mlx5_devx_qp_attr attr = {0};
652 struct mlx5_crypto_qp *qp;
653 uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
654 uint32_t umem_size = RTE_BIT32(log_nb_desc) *
656 sizeof(*qp->db_rec) * 2;
657 uint32_t alloc_size = sizeof(*qp);
658 struct mlx5_devx_cq_attr cq_attr = {
659 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
662 if (dev->data->queue_pairs[qp_id] != NULL)
663 mlx5_crypto_queue_pair_release(dev, qp_id);
664 alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
665 alloc_size += (sizeof(struct rte_crypto_op *) +
666 sizeof(struct mlx5_devx_obj *)) *
667 RTE_BIT32(log_nb_desc);
668 qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
671 DRV_LOG(ERR, "Failed to allocate QP memory.");
675 if (mlx5_devx_cq_create(priv->ctx, &qp->cq_obj, log_nb_desc,
676 &cq_attr, socket_id) != 0) {
677 DRV_LOG(ERR, "Failed to create CQ.");
680 qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id);
681 if (qp->umem_buf == NULL) {
682 DRV_LOG(ERR, "Failed to allocate QP umem.");
686 qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
687 (void *)(uintptr_t)qp->umem_buf,
689 IBV_ACCESS_LOCAL_WRITE);
690 if (qp->umem_obj == NULL) {
691 DRV_LOG(ERR, "Failed to register QP umem.");
694 if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
695 priv->dev_config.socket_id) != 0) {
696 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
701 qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
703 attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
704 attr.cqn = qp->cq_obj.cq->id;
705 attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
707 attr.sq_size = RTE_BIT32(log_nb_desc);
708 attr.dbr_umem_valid = 1;
709 attr.wq_umem_id = qp->umem_obj->umem_id;
710 attr.wq_umem_offset = 0;
711 attr.dbr_umem_id = qp->umem_obj->umem_id;
712 attr.ts_format = mlx5_ts_format_conv(priv->qp_ts_format);
713 attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size;
714 qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
715 if (qp->qp_obj == NULL) {
716 DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno);
719 qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address);
720 if (mlx5_crypto_qp2rts(qp))
722 qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
723 RTE_CACHE_LINE_SIZE);
724 qp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));
725 qp->entries_n = 1 << log_nb_desc;
726 if (mlx5_crypto_indirect_mkeys_prepare(priv, qp)) {
727 DRV_LOG(ERR, "Cannot allocate indirect memory regions.");
731 mlx5_crypto_qp_init(priv, qp);
733 dev->data->queue_pairs[qp_id] = qp;
736 mlx5_crypto_queue_pair_release(dev, qp_id);
741 mlx5_crypto_stats_get(struct rte_cryptodev *dev,
742 struct rte_cryptodev_stats *stats)
746 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
747 struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
749 stats->enqueued_count += qp->stats.enqueued_count;
750 stats->dequeued_count += qp->stats.dequeued_count;
751 stats->enqueue_err_count += qp->stats.enqueue_err_count;
752 stats->dequeue_err_count += qp->stats.dequeue_err_count;
757 mlx5_crypto_stats_reset(struct rte_cryptodev *dev)
761 for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
762 struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
764 memset(&qp->stats, 0, sizeof(qp->stats));
768 static struct rte_cryptodev_ops mlx5_crypto_ops = {
769 .dev_configure = mlx5_crypto_dev_configure,
770 .dev_start = mlx5_crypto_dev_start,
771 .dev_stop = mlx5_crypto_dev_stop,
772 .dev_close = mlx5_crypto_dev_close,
773 .dev_infos_get = mlx5_crypto_dev_infos_get,
774 .stats_get = mlx5_crypto_stats_get,
775 .stats_reset = mlx5_crypto_stats_reset,
776 .queue_pair_setup = mlx5_crypto_queue_pair_setup,
777 .queue_pair_release = mlx5_crypto_queue_pair_release,
778 .sym_session_get_size = mlx5_crypto_sym_session_get_size,
779 .sym_session_configure = mlx5_crypto_sym_session_configure,
780 .sym_session_clear = mlx5_crypto_sym_session_clear,
781 .sym_get_raw_dp_ctx_size = NULL,
782 .sym_configure_raw_dp_ctx = NULL,
786 mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
788 if (priv->pd != NULL) {
789 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
792 if (priv->uar != NULL) {
793 mlx5_glue->devx_free_uar(priv->uar);
799 mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv)
801 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
802 struct mlx5dv_obj obj;
803 struct mlx5dv_pd pd_info;
806 priv->pd = mlx5_glue->alloc_pd(priv->ctx);
807 if (priv->pd == NULL) {
808 DRV_LOG(ERR, "Failed to allocate PD.");
809 return errno ? -errno : -ENOMEM;
811 obj.pd.in = priv->pd;
812 obj.pd.out = &pd_info;
813 ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
815 DRV_LOG(ERR, "Fail to get PD object info.");
816 mlx5_glue->dealloc_pd(priv->pd);
820 priv->pdn = pd_info.pdn;
824 DRV_LOG(ERR, "Cannot get pdn - no DV support.");
826 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
830 mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv)
832 if (mlx5_crypto_pd_create(priv) != 0)
834 priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
836 priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
837 if (priv->uar == NULL || priv->uar_addr == NULL) {
839 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
840 DRV_LOG(ERR, "Failed to allocate UAR.");
848 mlx5_crypto_args_check_handler(const char *key, const char *val, void *opaque)
850 struct mlx5_crypto_devarg_params *devarg_prms = opaque;
851 struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
857 if (strcmp(key, "class") == 0)
859 if (strcmp(key, "wcs_file") == 0) {
860 file = fopen(val, "rb");
865 for (i = 0 ; i < MLX5_CRYPTO_CREDENTIAL_SIZE ; i++) {
866 ret = fscanf(file, "%02hhX", &attr->credential[i]);
870 "Failed to read credential from file.");
876 devarg_prms->login_devarg = true;
880 tmp = strtoul(val, NULL, 0);
882 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
885 if (strcmp(key, "max_segs_num") == 0) {
886 if (!tmp || tmp > MLX5_CRYPTO_MAX_SEGS) {
887 DRV_LOG(WARNING, "Invalid max_segs_num: %d, should"
889 (uint32_t)tmp, MLX5_CRYPTO_MAX_SEGS);
893 devarg_prms->max_segs_num = (uint32_t)tmp;
894 } else if (strcmp(key, "import_kek_id") == 0) {
895 attr->session_import_kek_ptr = (uint32_t)tmp;
896 } else if (strcmp(key, "credential_id") == 0) {
897 attr->credential_pointer = (uint32_t)tmp;
898 } else if (strcmp(key, "keytag") == 0) {
899 devarg_prms->keytag = tmp;
901 DRV_LOG(WARNING, "Invalid key %s.", key);
907 mlx5_crypto_parse_devargs(struct rte_devargs *devargs,
908 struct mlx5_crypto_devarg_params *devarg_prms)
910 struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
911 struct rte_kvargs *kvlist;
913 /* Default values. */
914 attr->credential_pointer = 0;
915 attr->session_import_kek_ptr = 0;
916 devarg_prms->keytag = 0;
917 devarg_prms->max_segs_num = 8;
918 if (devargs == NULL) {
920 "No login devargs in order to enable crypto operations in the device.");
924 kvlist = rte_kvargs_parse(devargs->args, NULL);
925 if (kvlist == NULL) {
926 DRV_LOG(ERR, "Failed to parse devargs.");
930 if (rte_kvargs_process(kvlist, NULL, mlx5_crypto_args_check_handler,
932 DRV_LOG(ERR, "Devargs handler function Failed.");
933 rte_kvargs_free(kvlist);
937 rte_kvargs_free(kvlist);
938 if (devarg_prms->login_devarg == false) {
940 "No login credential devarg in order to enable crypto operations "
949 * Callback for memory event.
959 mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
960 size_t len, void *arg __rte_unused)
962 struct mlx5_crypto_priv *priv;
964 /* Must be called from the primary process. */
965 MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
966 switch (event_type) {
967 case RTE_MEM_EVENT_FREE:
968 pthread_mutex_lock(&priv_list_lock);
969 /* Iterate all the existing mlx5 devices. */
970 TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
971 mlx5_free_mr_by_addr(&priv->mr_scache,
972 priv->ctx->device->name,
974 pthread_mutex_unlock(&priv_list_lock);
976 case RTE_MEM_EVENT_ALLOC:
983 mlx5_crypto_dev_probe(struct rte_device *dev)
985 struct ibv_device *ibv;
986 struct rte_cryptodev *crypto_dev;
987 struct ibv_context *ctx;
988 struct mlx5_devx_obj *login;
989 struct mlx5_crypto_priv *priv;
990 struct mlx5_crypto_devarg_params devarg_prms = { 0 };
991 struct mlx5_hca_attr attr = { 0 };
992 struct rte_cryptodev_pmd_init_params init_params = {
994 .private_data_size = sizeof(struct mlx5_crypto_priv),
995 .socket_id = dev->numa_node,
996 .max_nb_queue_pairs =
997 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
999 uint16_t rdmw_wqe_size;
1002 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1003 DRV_LOG(ERR, "Non-primary process type is not supported.");
1004 rte_errno = ENOTSUP;
1007 ibv = mlx5_os_get_ibv_dev(dev);
1010 ctx = mlx5_glue->dv_open_device(ibv);
1012 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
1016 if (mlx5_devx_cmd_query_hca_attr(ctx, &attr) != 0 ||
1017 attr.crypto == 0 || attr.aes_xts == 0) {
1018 DRV_LOG(ERR, "Not enough capabilities to support crypto "
1019 "operations, maybe old FW/OFED version?");
1020 claim_zero(mlx5_glue->close_device(ctx));
1021 rte_errno = ENOTSUP;
1024 ret = mlx5_crypto_parse_devargs(dev->devargs, &devarg_prms);
1026 DRV_LOG(ERR, "Failed to parse devargs.");
1027 claim_zero(mlx5_glue->close_device(ctx));
1030 login = mlx5_devx_cmd_create_crypto_login_obj(ctx,
1031 &devarg_prms.login_attr);
1032 if (login == NULL) {
1033 DRV_LOG(ERR, "Failed to configure login.");
1034 claim_zero(mlx5_glue->close_device(ctx));
1037 crypto_dev = rte_cryptodev_pmd_create(ibv->name, dev,
1039 if (crypto_dev == NULL) {
1040 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
1041 claim_zero(mlx5_glue->close_device(ctx));
1045 "Crypto device %s was created successfully.", ibv->name);
1046 crypto_dev->dev_ops = &mlx5_crypto_ops;
1047 crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
1048 crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
1049 crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
1050 crypto_dev->driver_id = mlx5_crypto_driver_id;
1051 priv = crypto_dev->data->dev_private;
1053 priv->login_obj = login;
1054 priv->crypto_dev = crypto_dev;
1055 priv->qp_ts_format = attr.qp_ts_format;
1056 if (mlx5_crypto_hw_global_prepare(priv) != 0) {
1057 rte_cryptodev_pmd_destroy(priv->crypto_dev);
1058 claim_zero(mlx5_glue->close_device(priv->ctx));
1061 if (mlx5_mr_btree_init(&priv->mr_scache.cache,
1062 MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
1063 DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
1064 mlx5_crypto_hw_global_release(priv);
1065 rte_cryptodev_pmd_destroy(priv->crypto_dev);
1066 claim_zero(mlx5_glue->close_device(priv->ctx));
1070 priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
1071 priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
1072 priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
1073 priv->max_segs_num = devarg_prms.max_segs_num;
1074 priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
1075 sizeof(struct mlx5_umr_wqe) +
1076 RTE_ALIGN(priv->max_segs_num, 4) *
1077 sizeof(struct mlx5_wqe_dseg);
1078 rdmw_wqe_size = sizeof(struct mlx5_rdma_write_wqe) +
1079 sizeof(struct mlx5_wqe_dseg) *
1080 (priv->max_segs_num <= 2 ? 2 : 2 +
1081 RTE_ALIGN(priv->max_segs_num - 2, 4));
1082 priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
1083 priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
1084 priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
1085 /* Register callback function for global shared MR cache management. */
1086 if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
1087 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1088 mlx5_crypto_mr_mem_event_cb,
1090 pthread_mutex_lock(&priv_list_lock);
1091 TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
1092 pthread_mutex_unlock(&priv_list_lock);
1097 mlx5_crypto_dev_remove(struct rte_device *dev)
1099 struct mlx5_crypto_priv *priv = NULL;
1101 pthread_mutex_lock(&priv_list_lock);
1102 TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
1103 if (priv->crypto_dev->device == dev)
1106 TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
1107 pthread_mutex_unlock(&priv_list_lock);
1109 if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
1110 rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
1112 mlx5_mr_release_cache(&priv->mr_scache);
1113 mlx5_crypto_hw_global_release(priv);
1114 rte_cryptodev_pmd_destroy(priv->crypto_dev);
1115 claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
1116 claim_zero(mlx5_glue->close_device(priv->ctx));
1121 static const struct rte_pci_id mlx5_crypto_pci_id_map[] = {
1123 RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1124 PCI_DEVICE_ID_MELLANOX_CONNECTX6)
1131 static struct mlx5_class_driver mlx5_crypto_driver = {
1132 .drv_class = MLX5_CLASS_CRYPTO,
1133 .name = RTE_STR(MLX5_CRYPTO_DRIVER_NAME),
1134 .id_table = mlx5_crypto_pci_id_map,
1135 .probe = mlx5_crypto_dev_probe,
1136 .remove = mlx5_crypto_dev_remove,
1139 RTE_INIT(rte_mlx5_crypto_init)
1142 if (mlx5_glue != NULL)
1143 mlx5_class_driver_register(&mlx5_crypto_driver);
1146 RTE_PMD_REGISTER_CRYPTO_DRIVER(mlx5_cryptodev_driver, mlx5_drv,
1147 mlx5_crypto_driver_id);
1149 RTE_LOG_REGISTER_DEFAULT(mlx5_crypto_logtype, NOTICE)
1150 RTE_PMD_EXPORT_NAME(MLX5_CRYPTO_DRIVER_NAME, __COUNTER__);
1151 RTE_PMD_REGISTER_PCI_TABLE(MLX5_CRYPTO_DRIVER_NAME, mlx5_crypto_pci_id_map);
1152 RTE_PMD_REGISTER_KMOD_DEP(MLX5_CRYPTO_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");