fc05bb7d4684f8dc526bc7efa76d9374568cd6b9
[dpdk.git] / drivers / crypto / mlx5 / mlx5_crypto.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2021 NVIDIA Corporation & Affiliates
3  */
4
5 #include <rte_malloc.h>
6 #include <rte_mempool.h>
7 #include <rte_errno.h>
8 #include <rte_log.h>
9 #include <rte_pci.h>
10 #include <rte_memory.h>
11
12 #include <mlx5_glue.h>
13 #include <mlx5_common.h>
14 #include <mlx5_common_pci.h>
15 #include <mlx5_devx_cmds.h>
16 #include <mlx5_common_os.h>
17
18 #include "mlx5_crypto_utils.h"
19 #include "mlx5_crypto.h"
20
21 #define MLX5_CRYPTO_DRIVER_NAME crypto_mlx5
22 #define MLX5_CRYPTO_LOG_NAME pmd.crypto.mlx5
23 #define MLX5_CRYPTO_MAX_QPS 1024
24 #define MLX5_CRYPTO_MAX_SEGS 56
25
26 #define MLX5_CRYPTO_FEATURE_FLAGS \
27         (RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | RTE_CRYPTODEV_FF_HW_ACCELERATED | \
28          RTE_CRYPTODEV_FF_IN_PLACE_SGL | RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT | \
29          RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT | \
30          RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT | \
31          RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT | \
32          RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY | \
33          RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS)
34
35 TAILQ_HEAD(mlx5_crypto_privs, mlx5_crypto_priv) mlx5_crypto_priv_list =
36                                 TAILQ_HEAD_INITIALIZER(mlx5_crypto_priv_list);
37 static pthread_mutex_t priv_list_lock = PTHREAD_MUTEX_INITIALIZER;
38
39 int mlx5_crypto_logtype;
40
41 uint8_t mlx5_crypto_driver_id;
42
43 const struct rte_cryptodev_capabilities mlx5_crypto_caps[] = {
44         {               /* AES XTS */
45                 .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC,
46                 {.sym = {
47                         .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER,
48                         {.cipher = {
49                                 .algo = RTE_CRYPTO_CIPHER_AES_XTS,
50                                 .block_size = 16,
51                                 .key_size = {
52                                         .min = 32,
53                                         .max = 64,
54                                         .increment = 32
55                                 },
56                                 .iv_size = {
57                                         .min = 16,
58                                         .max = 16,
59                                         .increment = 0
60                                 },
61                                 .dataunit_set =
62                                 RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES |
63                                 RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES,
64                         }, }
65                 }, }
66         },
67 };
68
69 static const char mlx5_crypto_drv_name[] = RTE_STR(MLX5_CRYPTO_DRIVER_NAME);
70
71 static const struct rte_driver mlx5_drv = {
72         .name = mlx5_crypto_drv_name,
73         .alias = mlx5_crypto_drv_name
74 };
75
76 static struct cryptodev_driver mlx5_cryptodev_driver;
77
78 struct mlx5_crypto_session {
79         uint32_t bs_bpt_eo_es;
80         /**< bsf_size, bsf_p_type, encryption_order and encryption standard,
81          * saved in big endian format.
82          */
83         uint32_t bsp_res;
84         /**< crypto_block_size_pointer and reserved 24 bits saved in big
85          * endian format.
86          */
87         uint32_t iv_offset:16;
88         /**< Starting point for Initialisation Vector. */
89         struct mlx5_crypto_dek *dek; /**< Pointer to dek struct. */
90         uint32_t dek_id; /**< DEK ID */
91 } __rte_packed;
92
93 static void
94 mlx5_crypto_dev_infos_get(struct rte_cryptodev *dev,
95                           struct rte_cryptodev_info *dev_info)
96 {
97         RTE_SET_USED(dev);
98         if (dev_info != NULL) {
99                 dev_info->driver_id = mlx5_crypto_driver_id;
100                 dev_info->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
101                 dev_info->capabilities = mlx5_crypto_caps;
102                 dev_info->max_nb_queue_pairs = MLX5_CRYPTO_MAX_QPS;
103                 dev_info->min_mbuf_headroom_req = 0;
104                 dev_info->min_mbuf_tailroom_req = 0;
105                 dev_info->sym.max_nb_sessions = 0;
106                 /*
107                  * If 0, the device does not have any limitation in number of
108                  * sessions that can be used.
109                  */
110         }
111 }
112
113 static int
114 mlx5_crypto_dev_configure(struct rte_cryptodev *dev,
115                           struct rte_cryptodev_config *config)
116 {
117         struct mlx5_crypto_priv *priv = dev->data->dev_private;
118
119         if (config == NULL) {
120                 DRV_LOG(ERR, "Invalid crypto dev configure parameters.");
121                 return -EINVAL;
122         }
123         if ((config->ff_disable & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) != 0) {
124                 DRV_LOG(ERR,
125                         "Disabled symmetric crypto feature is not supported.");
126                 return -ENOTSUP;
127         }
128         if (mlx5_crypto_dek_setup(priv) != 0) {
129                 DRV_LOG(ERR, "Dek hash list creation has failed.");
130                 return -ENOMEM;
131         }
132         priv->dev_config = *config;
133         DRV_LOG(DEBUG, "Device %u was configured.", dev->driver_id);
134         return 0;
135 }
136
137 static void
138 mlx5_crypto_dev_stop(struct rte_cryptodev *dev)
139 {
140         RTE_SET_USED(dev);
141 }
142
143 static int
144 mlx5_crypto_dev_start(struct rte_cryptodev *dev)
145 {
146         RTE_SET_USED(dev);
147         return 0;
148 }
149
150 static int
151 mlx5_crypto_dev_close(struct rte_cryptodev *dev)
152 {
153         struct mlx5_crypto_priv *priv = dev->data->dev_private;
154
155         mlx5_crypto_dek_unset(priv);
156         DRV_LOG(DEBUG, "Device %u was closed.", dev->driver_id);
157         return 0;
158 }
159
160 static unsigned int
161 mlx5_crypto_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
162 {
163         return sizeof(struct mlx5_crypto_session);
164 }
165
166 static int
167 mlx5_crypto_sym_session_configure(struct rte_cryptodev *dev,
168                                   struct rte_crypto_sym_xform *xform,
169                                   struct rte_cryptodev_sym_session *session,
170                                   struct rte_mempool *mp)
171 {
172         struct mlx5_crypto_priv *priv = dev->data->dev_private;
173         struct mlx5_crypto_session *sess_private_data;
174         struct rte_crypto_cipher_xform *cipher;
175         uint8_t encryption_order;
176         int ret;
177
178         if (unlikely(xform->next != NULL)) {
179                 DRV_LOG(ERR, "Xform next is not supported.");
180                 return -ENOTSUP;
181         }
182         if (unlikely((xform->type != RTE_CRYPTO_SYM_XFORM_CIPHER) ||
183                      (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_XTS))) {
184                 DRV_LOG(ERR, "Only AES-XTS algorithm is supported.");
185                 return -ENOTSUP;
186         }
187         ret = rte_mempool_get(mp, (void *)&sess_private_data);
188         if (ret != 0) {
189                 DRV_LOG(ERR,
190                         "Failed to get session %p private data from mempool.",
191                         sess_private_data);
192                 return -ENOMEM;
193         }
194         cipher = &xform->cipher;
195         sess_private_data->dek = mlx5_crypto_dek_prepare(priv, cipher);
196         if (sess_private_data->dek == NULL) {
197                 rte_mempool_put(mp, sess_private_data);
198                 DRV_LOG(ERR, "Failed to prepare dek.");
199                 return -ENOMEM;
200         }
201         if (cipher->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
202                 encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_MEMORY;
203         else
204                 encryption_order = MLX5_ENCRYPTION_ORDER_ENCRYPTED_RAW_WIRE;
205         sess_private_data->bs_bpt_eo_es = rte_cpu_to_be_32
206                         (MLX5_BSF_SIZE_64B << MLX5_BSF_SIZE_OFFSET |
207                          MLX5_BSF_P_TYPE_CRYPTO << MLX5_BSF_P_TYPE_OFFSET |
208                          encryption_order << MLX5_ENCRYPTION_ORDER_OFFSET |
209                          MLX5_ENCRYPTION_STANDARD_AES_XTS);
210         switch (xform->cipher.dataunit_len) {
211         case 0:
212                 sess_private_data->bsp_res = 0;
213                 break;
214         case 512:
215                 sess_private_data->bsp_res = rte_cpu_to_be_32
216                                              ((uint32_t)MLX5_BLOCK_SIZE_512B <<
217                                              MLX5_BLOCK_SIZE_OFFSET);
218                 break;
219         case 4096:
220                 sess_private_data->bsp_res = rte_cpu_to_be_32
221                                              ((uint32_t)MLX5_BLOCK_SIZE_4096B <<
222                                              MLX5_BLOCK_SIZE_OFFSET);
223                 break;
224         default:
225                 DRV_LOG(ERR, "Cipher data unit length is not supported.");
226                 return -ENOTSUP;
227         }
228         sess_private_data->iv_offset = cipher->iv.offset;
229         sess_private_data->dek_id =
230                         rte_cpu_to_be_32(sess_private_data->dek->obj->id &
231                                          0xffffff);
232         set_sym_session_private_data(session, dev->driver_id,
233                                      sess_private_data);
234         DRV_LOG(DEBUG, "Session %p was configured.", sess_private_data);
235         return 0;
236 }
237
238 static void
239 mlx5_crypto_sym_session_clear(struct rte_cryptodev *dev,
240                               struct rte_cryptodev_sym_session *sess)
241 {
242         struct mlx5_crypto_priv *priv = dev->data->dev_private;
243         struct mlx5_crypto_session *spriv = get_sym_session_private_data(sess,
244                                                                 dev->driver_id);
245
246         if (unlikely(spriv == NULL)) {
247                 DRV_LOG(ERR, "Failed to get session %p private data.", spriv);
248                 return;
249         }
250         mlx5_crypto_dek_destroy(priv, spriv->dek);
251         set_sym_session_private_data(sess, dev->driver_id, NULL);
252         rte_mempool_put(rte_mempool_from_obj(spriv), spriv);
253         DRV_LOG(DEBUG, "Session %p was cleared.", spriv);
254 }
255
256 static int
257 mlx5_crypto_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
258 {
259         struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
260
261         if (qp->qp_obj != NULL)
262                 claim_zero(mlx5_devx_cmd_destroy(qp->qp_obj));
263         if (qp->umem_obj != NULL)
264                 claim_zero(mlx5_glue->devx_umem_dereg(qp->umem_obj));
265         if (qp->umem_buf != NULL)
266                 rte_free(qp->umem_buf);
267         mlx5_mr_btree_free(&qp->mr_ctrl.cache_bh);
268         mlx5_devx_cq_destroy(&qp->cq_obj);
269         rte_free(qp);
270         dev->data->queue_pairs[qp_id] = NULL;
271         return 0;
272 }
273
274 static int
275 mlx5_crypto_qp2rts(struct mlx5_crypto_qp *qp)
276 {
277         /*
278          * In Order to configure self loopback, when calling these functions the
279          * remote QP id that is used is the id of the same QP.
280          */
281         if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RST2INIT_QP,
282                                           qp->qp_obj->id)) {
283                 DRV_LOG(ERR, "Failed to modify QP to INIT state(%u).",
284                         rte_errno);
285                 return -1;
286         }
287         if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_INIT2RTR_QP,
288                                           qp->qp_obj->id)) {
289                 DRV_LOG(ERR, "Failed to modify QP to RTR state(%u).",
290                         rte_errno);
291                 return -1;
292         }
293         if (mlx5_devx_cmd_modify_qp_state(qp->qp_obj, MLX5_CMD_OP_RTR2RTS_QP,
294                                           qp->qp_obj->id)) {
295                 DRV_LOG(ERR, "Failed to modify QP to RTS state(%u).",
296                         rte_errno);
297                 return -1;
298         }
299         return 0;
300 }
301
302 static __rte_noinline uint32_t
303 mlx5_crypto_get_block_size(struct rte_crypto_op *op)
304 {
305         uint32_t bl = op->sym->cipher.data.length;
306
307         switch (bl) {
308         case (1 << 20):
309                 return RTE_BE32(MLX5_BLOCK_SIZE_1MB << MLX5_BLOCK_SIZE_OFFSET);
310         case (1 << 12):
311                 return RTE_BE32(MLX5_BLOCK_SIZE_4096B <<
312                                 MLX5_BLOCK_SIZE_OFFSET);
313         case (1 << 9):
314                 return RTE_BE32(MLX5_BLOCK_SIZE_512B << MLX5_BLOCK_SIZE_OFFSET);
315         default:
316                 DRV_LOG(ERR, "Unknown block size: %u.", bl);
317                 return UINT32_MAX;
318         }
319 }
320
321 /**
322  * Query LKey from a packet buffer for QP. If not found, add the mempool.
323  *
324  * @param priv
325  *   Pointer to the priv object.
326  * @param addr
327  *   Search key.
328  * @param mr_ctrl
329  *   Pointer to per-queue MR control structure.
330  * @param ol_flags
331  *   Mbuf offload features.
332  *
333  * @return
334  *   Searched LKey on success, UINT32_MAX on no match.
335  */
336 static __rte_always_inline uint32_t
337 mlx5_crypto_addr2mr(struct mlx5_crypto_priv *priv, uintptr_t addr,
338                     struct mlx5_mr_ctrl *mr_ctrl, uint64_t ol_flags)
339 {
340         uint32_t lkey;
341
342         /* Check generation bit to see if there's any change on existing MRs. */
343         if (unlikely(*mr_ctrl->dev_gen_ptr != mr_ctrl->cur_gen))
344                 mlx5_mr_flush_local_cache(mr_ctrl);
345         /* Linear search on MR cache array. */
346         lkey = mlx5_mr_lookup_lkey(mr_ctrl->cache, &mr_ctrl->mru,
347                                    MLX5_MR_CACHE_N, addr);
348         if (likely(lkey != UINT32_MAX))
349                 return lkey;
350         /* Take slower bottom-half on miss. */
351         return mlx5_mr_addr2mr_bh(priv->pd, 0, &priv->mr_scache, mr_ctrl, addr,
352                                   !!(ol_flags & EXT_ATTACHED_MBUF));
353 }
354
355 static __rte_always_inline uint32_t
356 mlx5_crypto_klm_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp,
357                       struct rte_mbuf *mbuf, struct mlx5_wqe_dseg *klm,
358                       uint32_t offset, uint32_t *remain)
359 {
360         uint32_t data_len = (rte_pktmbuf_data_len(mbuf) - offset);
361         uintptr_t addr = rte_pktmbuf_mtod_offset(mbuf, uintptr_t, offset);
362
363         if (data_len > *remain)
364                 data_len = *remain;
365         *remain -= data_len;
366         klm->bcount = rte_cpu_to_be_32(data_len);
367         klm->pbuf = rte_cpu_to_be_64(addr);
368         klm->lkey = mlx5_crypto_addr2mr(priv, addr, &qp->mr_ctrl,
369                                         mbuf->ol_flags);
370         return klm->lkey;
371
372 }
373
374 static __rte_always_inline uint32_t
375 mlx5_crypto_klms_set(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp,
376                      struct rte_crypto_op *op, struct rte_mbuf *mbuf,
377                      struct mlx5_wqe_dseg *klm)
378 {
379         uint32_t remain_len = op->sym->cipher.data.length;
380         uint32_t nb_segs = mbuf->nb_segs;
381         uint32_t klm_n = 1u;
382
383         /* First mbuf needs to take the cipher offset. */
384         if (unlikely(mlx5_crypto_klm_set(priv, qp, mbuf, klm,
385                      op->sym->cipher.data.offset, &remain_len) == UINT32_MAX)) {
386                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
387                 return 0;
388         }
389         while (remain_len) {
390                 nb_segs--;
391                 mbuf = mbuf->next;
392                 if (unlikely(mbuf == NULL || nb_segs == 0)) {
393                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
394                         return 0;
395                 }
396                 if (unlikely(mlx5_crypto_klm_set(priv, qp, mbuf, ++klm, 0,
397                                                  &remain_len) == UINT32_MAX)) {
398                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
399                         return 0;
400                 }
401                 klm_n++;
402         }
403         return klm_n;
404 }
405
406 static __rte_always_inline int
407 mlx5_crypto_wqe_set(struct mlx5_crypto_priv *priv,
408                          struct mlx5_crypto_qp *qp,
409                          struct rte_crypto_op *op,
410                          struct mlx5_umr_wqe *umr)
411 {
412         struct mlx5_crypto_session *sess = get_sym_session_private_data
413                                 (op->sym->session, mlx5_crypto_driver_id);
414         struct mlx5_wqe_cseg *cseg = &umr->ctr;
415         struct mlx5_wqe_mkey_cseg *mkc = &umr->mkc;
416         struct mlx5_wqe_dseg *klms = &umr->kseg[0];
417         struct mlx5_wqe_umr_bsf_seg *bsf = ((struct mlx5_wqe_umr_bsf_seg *)
418                                       RTE_PTR_ADD(umr, priv->umr_wqe_size)) - 1;
419         uint32_t ds;
420         bool ipl = op->sym->m_dst == NULL || op->sym->m_dst == op->sym->m_src;
421         /* Set UMR WQE. */
422         uint32_t klm_n = mlx5_crypto_klms_set(priv, qp, op,
423                                    ipl ? op->sym->m_src : op->sym->m_dst, klms);
424
425         if (unlikely(klm_n == 0))
426                 return 0;
427         bsf->bs_bpt_eo_es = sess->bs_bpt_eo_es;
428         if (unlikely(!sess->bsp_res)) {
429                 bsf->bsp_res = mlx5_crypto_get_block_size(op);
430                 if (unlikely(bsf->bsp_res == UINT32_MAX)) {
431                         op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
432                         return 0;
433                 }
434         } else {
435                 bsf->bsp_res = sess->bsp_res;
436         }
437         bsf->raw_data_size = rte_cpu_to_be_32(op->sym->cipher.data.length);
438         memcpy(bsf->xts_initial_tweak,
439                rte_crypto_op_ctod_offset(op, uint8_t *, sess->iv_offset), 16);
440         bsf->res_dp = sess->dek_id;
441         mkc->len = rte_cpu_to_be_64(op->sym->cipher.data.length);
442         cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) | MLX5_OPCODE_UMR);
443         qp->db_pi += priv->umr_wqe_stride;
444         /* Set RDMA_WRITE WQE. */
445         cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
446         klms = RTE_PTR_ADD(cseg, sizeof(struct mlx5_rdma_write_wqe));
447         if (!ipl) {
448                 klm_n = mlx5_crypto_klms_set(priv, qp, op, op->sym->m_src,
449                                              klms);
450                 if (unlikely(klm_n == 0))
451                         return 0;
452         } else {
453                 memcpy(klms, &umr->kseg[0], sizeof(*klms) * klm_n);
454         }
455         ds = 2 + klm_n;
456         cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
457         cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
458                                                         MLX5_OPCODE_RDMA_WRITE);
459         ds = RTE_ALIGN(ds, 4);
460         qp->db_pi += ds >> 2;
461         /* Set NOP WQE if needed. */
462         if (priv->max_rdmar_ds > ds) {
463                 cseg += ds;
464                 ds = priv->max_rdmar_ds - ds;
465                 cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) | ds);
466                 cseg->opcode = rte_cpu_to_be_32((qp->db_pi << 8) |
467                                                                MLX5_OPCODE_NOP);
468                 qp->db_pi += ds >> 2; /* Here, DS is 4 aligned for sure. */
469         }
470         qp->wqe = (uint8_t *)cseg;
471         return 1;
472 }
473
474 static __rte_always_inline void
475 mlx5_crypto_uar_write(uint64_t val, struct mlx5_crypto_priv *priv)
476 {
477 #ifdef RTE_ARCH_64
478         *priv->uar_addr = val;
479 #else /* !RTE_ARCH_64 */
480         rte_spinlock_lock(&priv->uar32_sl);
481         *(volatile uint32_t *)priv->uar_addr = val;
482         rte_io_wmb();
483         *((volatile uint32_t *)priv->uar_addr + 1) = val >> 32;
484         rte_spinlock_unlock(&priv->uar32_sl);
485 #endif
486 }
487
488 static uint16_t
489 mlx5_crypto_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops,
490                           uint16_t nb_ops)
491 {
492         struct mlx5_crypto_qp *qp = queue_pair;
493         struct mlx5_crypto_priv *priv = qp->priv;
494         struct mlx5_umr_wqe *umr;
495         struct rte_crypto_op *op;
496         uint16_t mask = qp->entries_n - 1;
497         uint16_t remain = qp->entries_n - (qp->pi - qp->ci);
498
499         if (remain < nb_ops)
500                 nb_ops = remain;
501         else
502                 remain = nb_ops;
503         if (unlikely(remain == 0))
504                 return 0;
505         do {
506                 op = *ops++;
507                 umr = RTE_PTR_ADD(qp->umem_buf, priv->wqe_set_size * qp->pi);
508                 if (unlikely(mlx5_crypto_wqe_set(priv, qp, op, umr) == 0)) {
509                         qp->stats.enqueue_err_count++;
510                         if (remain != nb_ops) {
511                                 qp->stats.enqueued_count -= remain;
512                                 break;
513                         }
514                         return 0;
515                 }
516                 qp->ops[qp->pi] = op;
517                 qp->pi = (qp->pi + 1) & mask;
518         } while (--remain);
519         qp->stats.enqueued_count += nb_ops;
520         rte_io_wmb();
521         qp->db_rec[MLX5_SND_DBR] = rte_cpu_to_be_32(qp->db_pi);
522         rte_wmb();
523         mlx5_crypto_uar_write(*(volatile uint64_t *)qp->wqe, qp->priv);
524         rte_wmb();
525         return nb_ops;
526 }
527
528 static __rte_noinline void
529 mlx5_crypto_cqe_err_handle(struct mlx5_crypto_qp *qp, struct rte_crypto_op *op)
530 {
531         const uint32_t idx = qp->ci & (qp->entries_n - 1);
532         volatile struct mlx5_err_cqe *cqe = (volatile struct mlx5_err_cqe *)
533                                                         &qp->cq_obj.cqes[idx];
534
535         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
536         qp->stats.dequeue_err_count++;
537         DRV_LOG(ERR, "CQE ERR:%x.\n", rte_be_to_cpu_32(cqe->syndrome));
538 }
539
540 static uint16_t
541 mlx5_crypto_dequeue_burst(void *queue_pair, struct rte_crypto_op **ops,
542                           uint16_t nb_ops)
543 {
544         struct mlx5_crypto_qp *qp = queue_pair;
545         volatile struct mlx5_cqe *restrict cqe;
546         struct rte_crypto_op *restrict op;
547         const unsigned int cq_size = qp->entries_n;
548         const unsigned int mask = cq_size - 1;
549         uint32_t idx;
550         uint32_t next_idx = qp->ci & mask;
551         const uint16_t max = RTE_MIN((uint16_t)(qp->pi - qp->ci), nb_ops);
552         uint16_t i = 0;
553         int ret;
554
555         if (unlikely(max == 0))
556                 return 0;
557         do {
558                 idx = next_idx;
559                 next_idx = (qp->ci + 1) & mask;
560                 op = qp->ops[idx];
561                 cqe = &qp->cq_obj.cqes[idx];
562                 ret = check_cqe(cqe, cq_size, qp->ci);
563                 rte_io_rmb();
564                 if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
565                         if (unlikely(ret != MLX5_CQE_STATUS_HW_OWN))
566                                 mlx5_crypto_cqe_err_handle(qp, op);
567                         break;
568                 }
569                 op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
570                 ops[i++] = op;
571                 qp->ci++;
572         } while (i < max);
573         if (likely(i != 0)) {
574                 rte_io_wmb();
575                 qp->cq_obj.db_rec[0] = rte_cpu_to_be_32(qp->ci);
576                 qp->stats.dequeued_count += i;
577         }
578         return i;
579 }
580
581 static void
582 mlx5_crypto_qp_init(struct mlx5_crypto_priv *priv, struct mlx5_crypto_qp *qp)
583 {
584         uint32_t i;
585
586         for (i = 0 ; i < qp->entries_n; i++) {
587                 struct mlx5_wqe_cseg *cseg = RTE_PTR_ADD(qp->umem_buf, i *
588                                                          priv->wqe_set_size);
589                 struct mlx5_wqe_umr_cseg *ucseg = (struct mlx5_wqe_umr_cseg *)
590                                                                      (cseg + 1);
591                 struct mlx5_wqe_umr_bsf_seg *bsf =
592                         (struct mlx5_wqe_umr_bsf_seg *)(RTE_PTR_ADD(cseg,
593                                                        priv->umr_wqe_size)) - 1;
594                 struct mlx5_wqe_rseg *rseg;
595
596                 /* Init UMR WQE. */
597                 cseg->sq_ds = rte_cpu_to_be_32((qp->qp_obj->id << 8) |
598                                          (priv->umr_wqe_size / MLX5_WSEG_SIZE));
599                 cseg->flags = RTE_BE32(MLX5_COMP_ONLY_FIRST_ERR <<
600                                        MLX5_COMP_MODE_OFFSET);
601                 cseg->misc = rte_cpu_to_be_32(qp->mkey[i]->id);
602                 ucseg->if_cf_toe_cq_res = RTE_BE32(1u << MLX5_UMRC_IF_OFFSET);
603                 ucseg->mkey_mask = RTE_BE64(1u << 0); /* Mkey length bit. */
604                 ucseg->ko_to_bs = rte_cpu_to_be_32
605                         ((RTE_ALIGN(priv->max_segs_num, 4u) <<
606                          MLX5_UMRC_KO_OFFSET) | (4 << MLX5_UMRC_TO_BS_OFFSET));
607                 bsf->keytag = priv->keytag;
608                 /* Init RDMA WRITE WQE. */
609                 cseg = RTE_PTR_ADD(cseg, priv->umr_wqe_size);
610                 cseg->flags = RTE_BE32((MLX5_COMP_ALWAYS <<
611                                       MLX5_COMP_MODE_OFFSET) |
612                                       MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE);
613                 rseg = (struct mlx5_wqe_rseg *)(cseg + 1);
614                 rseg->rkey = rte_cpu_to_be_32(qp->mkey[i]->id);
615         }
616 }
617
618 static int
619 mlx5_crypto_indirect_mkeys_prepare(struct mlx5_crypto_priv *priv,
620                                   struct mlx5_crypto_qp *qp)
621 {
622         struct mlx5_umr_wqe *umr;
623         uint32_t i;
624         struct mlx5_devx_mkey_attr attr = {
625                 .pd = priv->pdn,
626                 .umr_en = 1,
627                 .crypto_en = 1,
628                 .set_remote_rw = 1,
629                 .klm_num = RTE_ALIGN(priv->max_segs_num, 4),
630         };
631
632         for (umr = (struct mlx5_umr_wqe *)qp->umem_buf, i = 0;
633            i < qp->entries_n; i++, umr = RTE_PTR_ADD(umr, priv->wqe_set_size)) {
634                 attr.klm_array = (struct mlx5_klm *)&umr->kseg[0];
635                 qp->mkey[i] = mlx5_devx_cmd_mkey_create(priv->ctx, &attr);
636                 if (!qp->mkey[i]) {
637                         DRV_LOG(ERR, "Failed to allocate indirect mkey.");
638                         return -1;
639                 }
640         }
641         return 0;
642 }
643
644 static int
645 mlx5_crypto_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
646                              const struct rte_cryptodev_qp_conf *qp_conf,
647                              int socket_id)
648 {
649         struct mlx5_crypto_priv *priv = dev->data->dev_private;
650         struct mlx5_devx_qp_attr attr = {0};
651         struct mlx5_crypto_qp *qp;
652         uint16_t log_nb_desc = rte_log2_u32(qp_conf->nb_descriptors);
653         uint32_t umem_size = RTE_BIT32(log_nb_desc) *
654                               priv->wqe_set_size +
655                               sizeof(*qp->db_rec) * 2;
656         uint32_t alloc_size = sizeof(*qp);
657         struct mlx5_devx_cq_attr cq_attr = {
658                 .uar_page_id = mlx5_os_get_devx_uar_page_id(priv->uar),
659         };
660
661         if (dev->data->queue_pairs[qp_id] != NULL)
662                 mlx5_crypto_queue_pair_release(dev, qp_id);
663         alloc_size = RTE_ALIGN(alloc_size, RTE_CACHE_LINE_SIZE);
664         alloc_size += (sizeof(struct rte_crypto_op *) +
665                        sizeof(struct mlx5_devx_obj *)) *
666                        RTE_BIT32(log_nb_desc);
667         qp = rte_zmalloc_socket(__func__, alloc_size, RTE_CACHE_LINE_SIZE,
668                                 socket_id);
669         if (qp == NULL) {
670                 DRV_LOG(ERR, "Failed to allocate QP memory.");
671                 rte_errno = ENOMEM;
672                 return -rte_errno;
673         }
674         if (mlx5_devx_cq_create(priv->ctx, &qp->cq_obj, log_nb_desc,
675                                 &cq_attr, socket_id) != 0) {
676                 DRV_LOG(ERR, "Failed to create CQ.");
677                 goto error;
678         }
679         qp->umem_buf = rte_zmalloc_socket(__func__, umem_size, 4096, socket_id);
680         if (qp->umem_buf == NULL) {
681                 DRV_LOG(ERR, "Failed to allocate QP umem.");
682                 rte_errno = ENOMEM;
683                 goto error;
684         }
685         qp->umem_obj = mlx5_glue->devx_umem_reg(priv->ctx,
686                                                (void *)(uintptr_t)qp->umem_buf,
687                                                umem_size,
688                                                IBV_ACCESS_LOCAL_WRITE);
689         if (qp->umem_obj == NULL) {
690                 DRV_LOG(ERR, "Failed to register QP umem.");
691                 goto error;
692         }
693         if (mlx5_mr_btree_init(&qp->mr_ctrl.cache_bh, MLX5_MR_BTREE_CACHE_N,
694                                priv->dev_config.socket_id) != 0) {
695                 DRV_LOG(ERR, "Cannot allocate MR Btree for qp %u.",
696                         (uint32_t)qp_id);
697                 rte_errno = ENOMEM;
698                 goto error;
699         }
700         qp->mr_ctrl.dev_gen_ptr = &priv->mr_scache.dev_gen;
701         attr.pd = priv->pdn;
702         attr.uar_index = mlx5_os_get_devx_uar_page_id(priv->uar);
703         attr.cqn = qp->cq_obj.cq->id;
704         attr.log_page_size = rte_log2_u32(sysconf(_SC_PAGESIZE));
705         attr.rq_size =  0;
706         attr.sq_size = RTE_BIT32(log_nb_desc);
707         attr.dbr_umem_valid = 1;
708         attr.wq_umem_id = qp->umem_obj->umem_id;
709         attr.wq_umem_offset = 0;
710         attr.dbr_umem_id = qp->umem_obj->umem_id;
711         attr.dbr_address = RTE_BIT64(log_nb_desc) * priv->wqe_set_size;
712         qp->qp_obj = mlx5_devx_cmd_create_qp(priv->ctx, &attr);
713         if (qp->qp_obj == NULL) {
714                 DRV_LOG(ERR, "Failed to create QP(%u).", rte_errno);
715                 goto error;
716         }
717         qp->db_rec = RTE_PTR_ADD(qp->umem_buf, (uintptr_t)attr.dbr_address);
718         if (mlx5_crypto_qp2rts(qp))
719                 goto error;
720         qp->mkey = (struct mlx5_devx_obj **)RTE_ALIGN((uintptr_t)(qp + 1),
721                                                            RTE_CACHE_LINE_SIZE);
722         qp->ops = (struct rte_crypto_op **)(qp->mkey + RTE_BIT32(log_nb_desc));
723         qp->entries_n = 1 << log_nb_desc;
724         if (mlx5_crypto_indirect_mkeys_prepare(priv, qp)) {
725                 DRV_LOG(ERR, "Cannot allocate indirect memory regions.");
726                 rte_errno = ENOMEM;
727                 goto error;
728         }
729         mlx5_crypto_qp_init(priv, qp);
730         qp->priv = priv;
731         dev->data->queue_pairs[qp_id] = qp;
732         return 0;
733 error:
734         mlx5_crypto_queue_pair_release(dev, qp_id);
735         return -1;
736 }
737
738 static void
739 mlx5_crypto_stats_get(struct rte_cryptodev *dev,
740                       struct rte_cryptodev_stats *stats)
741 {
742         int qp_id;
743
744         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
745                 struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
746
747                 stats->enqueued_count += qp->stats.enqueued_count;
748                 stats->dequeued_count += qp->stats.dequeued_count;
749                 stats->enqueue_err_count += qp->stats.enqueue_err_count;
750                 stats->dequeue_err_count += qp->stats.dequeue_err_count;
751         }
752 }
753
754 static void
755 mlx5_crypto_stats_reset(struct rte_cryptodev *dev)
756 {
757         int qp_id;
758
759         for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) {
760                 struct mlx5_crypto_qp *qp = dev->data->queue_pairs[qp_id];
761
762                 memset(&qp->stats, 0, sizeof(qp->stats));
763         }
764 }
765
766 static struct rte_cryptodev_ops mlx5_crypto_ops = {
767         .dev_configure                  = mlx5_crypto_dev_configure,
768         .dev_start                      = mlx5_crypto_dev_start,
769         .dev_stop                       = mlx5_crypto_dev_stop,
770         .dev_close                      = mlx5_crypto_dev_close,
771         .dev_infos_get                  = mlx5_crypto_dev_infos_get,
772         .stats_get                      = mlx5_crypto_stats_get,
773         .stats_reset                    = mlx5_crypto_stats_reset,
774         .queue_pair_setup               = mlx5_crypto_queue_pair_setup,
775         .queue_pair_release             = mlx5_crypto_queue_pair_release,
776         .sym_session_get_size           = mlx5_crypto_sym_session_get_size,
777         .sym_session_configure          = mlx5_crypto_sym_session_configure,
778         .sym_session_clear              = mlx5_crypto_sym_session_clear,
779         .sym_get_raw_dp_ctx_size        = NULL,
780         .sym_configure_raw_dp_ctx       = NULL,
781 };
782
783 static void
784 mlx5_crypto_hw_global_release(struct mlx5_crypto_priv *priv)
785 {
786         if (priv->pd != NULL) {
787                 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
788                 priv->pd = NULL;
789         }
790         if (priv->uar != NULL) {
791                 mlx5_glue->devx_free_uar(priv->uar);
792                 priv->uar = NULL;
793         }
794 }
795
796 static int
797 mlx5_crypto_pd_create(struct mlx5_crypto_priv *priv)
798 {
799 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
800         struct mlx5dv_obj obj;
801         struct mlx5dv_pd pd_info;
802         int ret;
803
804         priv->pd = mlx5_glue->alloc_pd(priv->ctx);
805         if (priv->pd == NULL) {
806                 DRV_LOG(ERR, "Failed to allocate PD.");
807                 return errno ? -errno : -ENOMEM;
808         }
809         obj.pd.in = priv->pd;
810         obj.pd.out = &pd_info;
811         ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_PD);
812         if (ret != 0) {
813                 DRV_LOG(ERR, "Fail to get PD object info.");
814                 mlx5_glue->dealloc_pd(priv->pd);
815                 priv->pd = NULL;
816                 return -errno;
817         }
818         priv->pdn = pd_info.pdn;
819         return 0;
820 #else
821         (void)priv;
822         DRV_LOG(ERR, "Cannot get pdn - no DV support.");
823         return -ENOTSUP;
824 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
825 }
826
827 static int
828 mlx5_crypto_hw_global_prepare(struct mlx5_crypto_priv *priv)
829 {
830         if (mlx5_crypto_pd_create(priv) != 0)
831                 return -1;
832         priv->uar = mlx5_devx_alloc_uar(priv->ctx, -1);
833         if (priv->uar)
834                 priv->uar_addr = mlx5_os_get_devx_uar_reg_addr(priv->uar);
835         if (priv->uar == NULL || priv->uar_addr == NULL) {
836                 rte_errno = errno;
837                 claim_zero(mlx5_glue->dealloc_pd(priv->pd));
838                 DRV_LOG(ERR, "Failed to allocate UAR.");
839                 return -1;
840         }
841         return 0;
842 }
843
844
845 static int
846 mlx5_crypto_args_check_handler(const char *key, const char *val, void *opaque)
847 {
848         struct mlx5_crypto_devarg_params *devarg_prms = opaque;
849         struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
850         unsigned long tmp;
851         FILE *file;
852         int ret;
853         int i;
854
855         if (strcmp(key, "class") == 0)
856                 return 0;
857         if (strcmp(key, "wcs_file") == 0) {
858                 file = fopen(val, "rb");
859                 if (file == NULL) {
860                         rte_errno = ENOTSUP;
861                         return -rte_errno;
862                 }
863                 for (i = 0 ; i < MLX5_CRYPTO_CREDENTIAL_SIZE ; i++) {
864                         ret = fscanf(file, "%02hhX", &attr->credential[i]);
865                         if (ret <= 0) {
866                                 fclose(file);
867                                 DRV_LOG(ERR,
868                                         "Failed to read credential from file.");
869                                 rte_errno = EINVAL;
870                                 return -rte_errno;
871                         }
872                 }
873                 fclose(file);
874                 devarg_prms->login_devarg = true;
875                 return 0;
876         }
877         errno = 0;
878         tmp = strtoul(val, NULL, 0);
879         if (errno) {
880                 DRV_LOG(WARNING, "%s: \"%s\" is an invalid integer.", key, val);
881                 return -errno;
882         }
883         if (strcmp(key, "max_segs_num") == 0) {
884                 if (!tmp || tmp > MLX5_CRYPTO_MAX_SEGS) {
885                         DRV_LOG(WARNING, "Invalid max_segs_num: %d, should"
886                                 " be less than %d.",
887                                 (uint32_t)tmp, MLX5_CRYPTO_MAX_SEGS);
888                         rte_errno = EINVAL;
889                         return -rte_errno;
890                 }
891                 devarg_prms->max_segs_num = (uint32_t)tmp;
892         } else if (strcmp(key, "import_kek_id") == 0) {
893                 attr->session_import_kek_ptr = (uint32_t)tmp;
894         } else if (strcmp(key, "credential_id") == 0) {
895                 attr->credential_pointer = (uint32_t)tmp;
896         } else if (strcmp(key, "keytag") == 0) {
897                 devarg_prms->keytag = tmp;
898         } else {
899                 DRV_LOG(WARNING, "Invalid key %s.", key);
900         }
901         return 0;
902 }
903
904 static int
905 mlx5_crypto_parse_devargs(struct rte_devargs *devargs,
906                           struct mlx5_crypto_devarg_params *devarg_prms)
907 {
908         struct mlx5_devx_crypto_login_attr *attr = &devarg_prms->login_attr;
909         struct rte_kvargs *kvlist;
910
911         /* Default values. */
912         attr->credential_pointer = 0;
913         attr->session_import_kek_ptr = 0;
914         devarg_prms->keytag = 0;
915         devarg_prms->max_segs_num = 8;
916         if (devargs == NULL) {
917                 DRV_LOG(ERR,
918         "No login devargs in order to enable crypto operations in the device.");
919                 rte_errno = EINVAL;
920                 return -1;
921         }
922         kvlist = rte_kvargs_parse(devargs->args, NULL);
923         if (kvlist == NULL) {
924                 DRV_LOG(ERR, "Failed to parse devargs.");
925                 rte_errno = EINVAL;
926                 return -1;
927         }
928         if (rte_kvargs_process(kvlist, NULL, mlx5_crypto_args_check_handler,
929                            devarg_prms) != 0) {
930                 DRV_LOG(ERR, "Devargs handler function Failed.");
931                 rte_kvargs_free(kvlist);
932                 rte_errno = EINVAL;
933                 return -1;
934         }
935         rte_kvargs_free(kvlist);
936         if (devarg_prms->login_devarg == false) {
937                 DRV_LOG(ERR,
938         "No login credential devarg in order to enable crypto operations "
939         "in the device.");
940                 rte_errno = EINVAL;
941                 return -1;
942         }
943         return 0;
944 }
945
946 /**
947  * Callback for memory event.
948  *
949  * @param event_type
950  *   Memory event type.
951  * @param addr
952  *   Address of memory.
953  * @param len
954  *   Size of memory.
955  */
956 static void
957 mlx5_crypto_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
958                             size_t len, void *arg __rte_unused)
959 {
960         struct mlx5_crypto_priv *priv;
961
962         /* Must be called from the primary process. */
963         MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
964         switch (event_type) {
965         case RTE_MEM_EVENT_FREE:
966                 pthread_mutex_lock(&priv_list_lock);
967                 /* Iterate all the existing mlx5 devices. */
968                 TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
969                         mlx5_free_mr_by_addr(&priv->mr_scache,
970                                              priv->ctx->device->name,
971                                              addr, len);
972                 pthread_mutex_unlock(&priv_list_lock);
973                 break;
974         case RTE_MEM_EVENT_ALLOC:
975         default:
976                 break;
977         }
978 }
979
980 /**
981  * DPDK callback to register a PCI device.
982  *
983  * This function spawns crypto device out of a given PCI device.
984  *
985  * @param[in] pci_drv
986  *   PCI driver structure (mlx5_crypto_driver).
987  * @param[in] pci_dev
988  *   PCI device information.
989  *
990  * @return
991  *   0 on success, 1 to skip this driver, a negative errno value otherwise
992  *   and rte_errno is set.
993  */
994 static int
995 mlx5_crypto_pci_probe(struct rte_pci_driver *pci_drv,
996                         struct rte_pci_device *pci_dev)
997 {
998         struct ibv_device *ibv;
999         struct rte_cryptodev *crypto_dev;
1000         struct ibv_context *ctx;
1001         struct mlx5_devx_obj *login;
1002         struct mlx5_crypto_priv *priv;
1003         struct mlx5_crypto_devarg_params devarg_prms = { 0 };
1004         struct mlx5_hca_attr attr = { 0 };
1005         struct rte_cryptodev_pmd_init_params init_params = {
1006                 .name = "",
1007                 .private_data_size = sizeof(struct mlx5_crypto_priv),
1008                 .socket_id = pci_dev->device.numa_node,
1009                 .max_nb_queue_pairs =
1010                                 RTE_CRYPTODEV_PMD_DEFAULT_MAX_NB_QUEUE_PAIRS,
1011         };
1012         uint16_t rdmw_wqe_size;
1013         int ret;
1014
1015         RTE_SET_USED(pci_drv);
1016         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1017                 DRV_LOG(ERR, "Non-primary process type is not supported.");
1018                 rte_errno = ENOTSUP;
1019                 return -rte_errno;
1020         }
1021         ibv = mlx5_os_get_ibv_device(&pci_dev->addr);
1022         if (ibv == NULL) {
1023                 DRV_LOG(ERR, "No matching IB device for PCI slot "
1024                         PCI_PRI_FMT ".", pci_dev->addr.domain,
1025                         pci_dev->addr.bus, pci_dev->addr.devid,
1026                         pci_dev->addr.function);
1027                 return -rte_errno;
1028         }
1029         DRV_LOG(INFO, "PCI information matches for device \"%s\".", ibv->name);
1030         ctx = mlx5_glue->dv_open_device(ibv);
1031         if (ctx == NULL) {
1032                 DRV_LOG(ERR, "Failed to open IB device \"%s\".", ibv->name);
1033                 rte_errno = ENODEV;
1034                 return -rte_errno;
1035         }
1036         if (mlx5_devx_cmd_query_hca_attr(ctx, &attr) != 0 ||
1037             attr.crypto == 0 || attr.aes_xts == 0) {
1038                 DRV_LOG(ERR, "Not enough capabilities to support crypto "
1039                         "operations, maybe old FW/OFED version?");
1040                 claim_zero(mlx5_glue->close_device(ctx));
1041                 rte_errno = ENOTSUP;
1042                 return -ENOTSUP;
1043         }
1044         ret = mlx5_crypto_parse_devargs(pci_dev->device.devargs, &devarg_prms);
1045         if (ret) {
1046                 DRV_LOG(ERR, "Failed to parse devargs.");
1047                 return -rte_errno;
1048         }
1049         login = mlx5_devx_cmd_create_crypto_login_obj(ctx,
1050                                                       &devarg_prms.login_attr);
1051         if (login == NULL) {
1052                 DRV_LOG(ERR, "Failed to configure login.");
1053                 return -rte_errno;
1054         }
1055         crypto_dev = rte_cryptodev_pmd_create(ibv->name, &pci_dev->device,
1056                                         &init_params);
1057         if (crypto_dev == NULL) {
1058                 DRV_LOG(ERR, "Failed to create device \"%s\".", ibv->name);
1059                 claim_zero(mlx5_glue->close_device(ctx));
1060                 return -ENODEV;
1061         }
1062         DRV_LOG(INFO,
1063                 "Crypto device %s was created successfully.", ibv->name);
1064         crypto_dev->dev_ops = &mlx5_crypto_ops;
1065         crypto_dev->dequeue_burst = mlx5_crypto_dequeue_burst;
1066         crypto_dev->enqueue_burst = mlx5_crypto_enqueue_burst;
1067         crypto_dev->feature_flags = MLX5_CRYPTO_FEATURE_FLAGS;
1068         crypto_dev->driver_id = mlx5_crypto_driver_id;
1069         priv = crypto_dev->data->dev_private;
1070         priv->ctx = ctx;
1071         priv->login_obj = login;
1072         priv->pci_dev = pci_dev;
1073         priv->crypto_dev = crypto_dev;
1074         if (mlx5_crypto_hw_global_prepare(priv) != 0) {
1075                 rte_cryptodev_pmd_destroy(priv->crypto_dev);
1076                 claim_zero(mlx5_glue->close_device(priv->ctx));
1077                 return -1;
1078         }
1079         if (mlx5_mr_btree_init(&priv->mr_scache.cache,
1080                              MLX5_MR_BTREE_CACHE_N * 2, rte_socket_id()) != 0) {
1081                 DRV_LOG(ERR, "Failed to allocate shared cache MR memory.");
1082                 mlx5_crypto_hw_global_release(priv);
1083                 rte_cryptodev_pmd_destroy(priv->crypto_dev);
1084                 claim_zero(mlx5_glue->close_device(priv->ctx));
1085                 rte_errno = ENOMEM;
1086                 return -rte_errno;
1087         }
1088         priv->mr_scache.reg_mr_cb = mlx5_common_verbs_reg_mr;
1089         priv->mr_scache.dereg_mr_cb = mlx5_common_verbs_dereg_mr;
1090         priv->keytag = rte_cpu_to_be_64(devarg_prms.keytag);
1091         priv->max_segs_num = devarg_prms.max_segs_num;
1092         priv->umr_wqe_size = sizeof(struct mlx5_wqe_umr_bsf_seg) +
1093                              sizeof(struct mlx5_umr_wqe) +
1094                              RTE_ALIGN(priv->max_segs_num, 4) *
1095                              sizeof(struct mlx5_wqe_dseg);
1096         rdmw_wqe_size = sizeof(struct mlx5_rdma_write_wqe) +
1097                               sizeof(struct mlx5_wqe_dseg) *
1098                               (priv->max_segs_num <= 2 ? 2 : 2 +
1099                                RTE_ALIGN(priv->max_segs_num - 2, 4));
1100         priv->wqe_set_size = priv->umr_wqe_size + rdmw_wqe_size;
1101         priv->umr_wqe_stride = priv->umr_wqe_size / MLX5_SEND_WQE_BB;
1102         priv->max_rdmar_ds = rdmw_wqe_size / sizeof(struct mlx5_wqe_dseg);
1103         /* Register callback function for global shared MR cache management. */
1104         if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
1105                 rte_mem_event_callback_register("MLX5_MEM_EVENT_CB",
1106                                                 mlx5_crypto_mr_mem_event_cb,
1107                                                 NULL);
1108         pthread_mutex_lock(&priv_list_lock);
1109         TAILQ_INSERT_TAIL(&mlx5_crypto_priv_list, priv, next);
1110         pthread_mutex_unlock(&priv_list_lock);
1111         return 0;
1112 }
1113
1114 static int
1115 mlx5_crypto_pci_remove(struct rte_pci_device *pdev)
1116 {
1117         struct mlx5_crypto_priv *priv = NULL;
1118
1119         pthread_mutex_lock(&priv_list_lock);
1120         TAILQ_FOREACH(priv, &mlx5_crypto_priv_list, next)
1121                 if (rte_pci_addr_cmp(&priv->pci_dev->addr, &pdev->addr) != 0)
1122                         break;
1123         if (priv)
1124                 TAILQ_REMOVE(&mlx5_crypto_priv_list, priv, next);
1125         pthread_mutex_unlock(&priv_list_lock);
1126         if (priv) {
1127                 if (TAILQ_EMPTY(&mlx5_crypto_priv_list))
1128                         rte_mem_event_callback_unregister("MLX5_MEM_EVENT_CB",
1129                                                           NULL);
1130                 mlx5_mr_release_cache(&priv->mr_scache);
1131                 mlx5_crypto_hw_global_release(priv);
1132                 rte_cryptodev_pmd_destroy(priv->crypto_dev);
1133                 claim_zero(mlx5_devx_cmd_destroy(priv->login_obj));
1134                 claim_zero(mlx5_glue->close_device(priv->ctx));
1135         }
1136         return 0;
1137 }
1138
1139 static const struct rte_pci_id mlx5_crypto_pci_id_map[] = {
1140                 {
1141                         RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX,
1142                                         PCI_DEVICE_ID_MELLANOX_CONNECTX6)
1143                 },
1144                 {
1145                         .vendor_id = 0
1146                 }
1147 };
1148
1149 static struct mlx5_pci_driver mlx5_crypto_driver = {
1150         .driver_class = MLX5_CLASS_CRYPTO,
1151         .pci_driver = {
1152                 .driver = {
1153                         .name = RTE_STR(MLX5_CRYPTO_DRIVER_NAME),
1154                 },
1155                 .id_table = mlx5_crypto_pci_id_map,
1156                 .probe = mlx5_crypto_pci_probe,
1157                 .remove = mlx5_crypto_pci_remove,
1158                 .drv_flags = 0,
1159         },
1160 };
1161
1162 RTE_INIT(rte_mlx5_crypto_init)
1163 {
1164         mlx5_common_init();
1165         if (mlx5_glue != NULL)
1166                 mlx5_pci_driver_register(&mlx5_crypto_driver);
1167 }
1168
1169 RTE_PMD_REGISTER_CRYPTO_DRIVER(mlx5_cryptodev_driver, mlx5_drv,
1170                                mlx5_crypto_driver_id);
1171
1172 RTE_LOG_REGISTER_DEFAULT(mlx5_crypto_logtype, NOTICE)
1173 RTE_PMD_EXPORT_NAME(MLX5_CRYPTO_DRIVER_NAME, __COUNTER__);
1174 RTE_PMD_REGISTER_PCI_TABLE(MLX5_CRYPTO_DRIVER_NAME, mlx5_crypto_pci_id_map);
1175 RTE_PMD_REGISTER_KMOD_DEP(MLX5_CRYPTO_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");