1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_cryptodev_capabilities.h"
17 #define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
18 #define CNXK_CPT_MAX_ASYM_OP_MOD_LEN 1024
21 cnxk_cpt_get_mlen(void)
26 len = 2 * sizeof(uint64_t);
27 len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
29 len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
30 len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
31 (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
32 2) * ROC_SE_SG_ENTRY_SIZE),
39 cnxk_cpt_asym_get_mlen(void)
44 len = sizeof(uint64_t);
46 /* Get meta len for asymmetric operations */
47 len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
53 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
54 struct rte_cryptodev_config *conf)
56 struct cnxk_cpt_vf *vf = dev->data->dev_private;
57 struct roc_cpt *roc_cpt = &vf->cpt;
58 uint16_t nb_lf_avail, nb_lf;
61 dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
63 nb_lf_avail = roc_cpt->nb_lf_avail;
64 nb_lf = conf->nb_queue_pairs;
66 if (nb_lf > nb_lf_avail)
69 ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
71 plt_err("Could not configure device");
75 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76 /* Initialize shared FPM table */
77 ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
79 plt_err("Could not get FPM table");
83 /* Init EC grp table */
84 ret = roc_ae_ec_grp_get(vf->ec_grp);
86 plt_err("Could not get EC grp table");
96 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
98 struct cnxk_cpt_vf *vf = dev->data->dev_private;
99 struct roc_cpt *roc_cpt = &vf->cpt;
100 uint16_t nb_lf = roc_cpt->nb_lf;
103 for (qp_id = 0; qp_id < nb_lf; qp_id++) {
104 /* Application may not setup all queue pair */
105 if (roc_cpt->lf[qp_id] == NULL)
108 roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
115 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
117 struct cnxk_cpt_vf *vf = dev->data->dev_private;
118 struct roc_cpt *roc_cpt = &vf->cpt;
119 uint16_t nb_lf = roc_cpt->nb_lf;
122 for (qp_id = 0; qp_id < nb_lf; qp_id++) {
123 if (roc_cpt->lf[qp_id] == NULL)
126 roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
131 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
133 struct cnxk_cpt_vf *vf = dev->data->dev_private;
137 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
138 ret = cnxk_cpt_queue_pair_release(dev, i);
140 plt_err("Could not release queue pair %u", i);
145 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
150 roc_cpt_dev_clear(&vf->cpt);
156 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
157 struct rte_cryptodev_info *info)
159 struct cnxk_cpt_vf *vf = dev->data->dev_private;
160 struct roc_cpt *roc_cpt = &vf->cpt;
162 info->max_nb_queue_pairs =
163 RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
164 plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
166 info->feature_flags = cnxk_cpt_default_ff_get();
167 info->capabilities = cnxk_crypto_capabilities_get(vf);
168 info->sym.max_nb_sessions = 0;
169 info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
170 info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
174 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
176 snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
180 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
181 struct cnxk_cpt_qp *qp, uint8_t qp_id,
182 uint32_t nb_elements)
184 char mempool_name[RTE_MEMPOOL_NAMESIZE];
185 struct cpt_qp_meta_info *meta_info;
186 int lcore_cnt = rte_lcore_count();
187 struct rte_mempool *pool;
188 int mb_pool_sz, mlen = 8;
191 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
193 mlen = cnxk_cpt_get_mlen();
196 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
198 /* Get meta len required for asymmetric operations */
199 mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
202 mb_pool_sz = nb_elements;
203 cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
205 /* For poll mode, core that enqueues and core that dequeues can be
206 * different. For event mode, all cores are allowed to use same crypto
210 mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
212 /* Allocate mempool */
214 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
215 dev->data->dev_id, qp_id);
217 pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
218 NULL, NULL, NULL, NULL, rte_socket_id(), 0);
221 plt_err("Could not create mempool for metabuf");
225 meta_info = &qp->meta_info;
227 meta_info->pool = pool;
228 meta_info->mlen = mlen;
234 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
236 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
238 rte_mempool_free(meta_info->pool);
240 meta_info->pool = NULL;
244 static struct cnxk_cpt_qp *
245 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
248 const struct rte_memzone *pq_mem;
249 char name[RTE_MEMZONE_NAMESIZE];
250 struct cnxk_cpt_qp *qp;
255 /* Allocate queue pair */
256 qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
259 plt_err("Could not allocate queue pair");
263 /* For pending queue */
264 len = iq_len * sizeof(struct cpt_inflight_req);
266 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
269 pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
270 RTE_MEMZONE_SIZE_HINT_ONLY |
272 RTE_CACHE_LINE_SIZE);
273 if (pq_mem == NULL) {
274 plt_err("Could not allocate reserved memzone");
282 ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
284 plt_err("Could not create mempool for metabuf");
288 /* Initialize pending queue */
289 qp->pend_q.req_queue = pq_mem->addr;
296 rte_memzone_free(pq_mem);
303 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
305 const struct rte_memzone *pq_mem;
306 char name[RTE_MEMZONE_NAMESIZE];
309 cnxk_cpt_metabuf_mempool_destroy(qp);
311 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
314 pq_mem = rte_memzone_lookup(name);
316 ret = rte_memzone_free(pq_mem);
326 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
328 struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
329 struct cnxk_cpt_vf *vf = dev->data->dev_private;
330 struct roc_cpt *roc_cpt = &vf->cpt;
331 struct roc_cpt_lf *lf;
337 lf = roc_cpt->lf[qp_id];
343 ret = cnxk_cpt_qp_destroy(dev, qp);
345 plt_err("Could not destroy queue pair %d", qp_id);
349 roc_cpt->lf[qp_id] = NULL;
350 dev->data->queue_pairs[qp_id] = NULL;
356 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
357 const struct rte_cryptodev_qp_conf *conf,
358 int socket_id __rte_unused)
360 struct cnxk_cpt_vf *vf = dev->data->dev_private;
361 struct roc_cpt *roc_cpt = &vf->cpt;
362 struct rte_pci_device *pci_dev;
363 struct cnxk_cpt_qp *qp;
366 if (dev->data->queue_pairs[qp_id] != NULL)
367 cnxk_cpt_queue_pair_release(dev, qp_id);
369 pci_dev = RTE_DEV_TO_PCI(dev->device);
371 if (pci_dev->mem_resource[2].addr == NULL) {
372 plt_err("Invalid PCI mem address");
376 qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
378 plt_err("Could not create queue pair %d", qp_id);
382 qp->lf.lf_id = qp_id;
383 qp->lf.nb_desc = conf->nb_descriptors;
385 ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
387 plt_err("Could not initialize queue pair %d", qp_id);
392 qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
394 roc_cpt->lf[qp_id] = &qp->lf;
396 ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
398 roc_cpt->lf[qp_id] = NULL;
399 plt_err("Could not init lmtline for queue pair %d", qp_id);
403 qp->sess_mp = conf->mp_session;
404 qp->sess_mp_priv = conf->mp_session_private;
405 dev->data->queue_pairs[qp_id] = qp;
410 cnxk_cpt_qp_destroy(dev, qp);
415 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
417 return sizeof(struct cnxk_se_sess);
421 cnxk_sess_fill(struct rte_crypto_sym_xform *xform, struct cnxk_se_sess *sess)
423 struct rte_crypto_sym_xform *aead_xfrm = NULL;
424 struct rte_crypto_sym_xform *c_xfrm = NULL;
425 struct rte_crypto_sym_xform *a_xfrm = NULL;
426 bool ciph_then_auth = false;
431 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
433 a_xfrm = xform->next;
434 ciph_then_auth = true;
435 } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
436 c_xfrm = xform->next;
438 ciph_then_auth = false;
443 if (c_xfrm != NULL && c_xfrm->type != RTE_CRYPTO_SYM_XFORM_CIPHER) {
444 plt_dp_err("Invalid type in cipher xform");
448 if (a_xfrm != NULL && a_xfrm->type != RTE_CRYPTO_SYM_XFORM_AUTH) {
449 plt_dp_err("Invalid type in auth xform");
453 if (aead_xfrm != NULL && aead_xfrm->type != RTE_CRYPTO_SYM_XFORM_AEAD) {
454 plt_dp_err("Invalid type in AEAD xform");
458 if ((c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL) &&
459 a_xfrm != NULL && a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL &&
460 a_xfrm->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
461 plt_dp_err("Null cipher + null auth verify is not supported");
466 if (c_xfrm != NULL &&
467 (a_xfrm == NULL || a_xfrm->auth.algo == RTE_CRYPTO_AUTH_NULL)) {
468 if (fill_sess_cipher(c_xfrm, sess))
475 if (a_xfrm != NULL &&
476 (c_xfrm == NULL || c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_NULL)) {
477 if (fill_sess_auth(a_xfrm, sess))
484 if (aead_xfrm != NULL) {
485 if (fill_sess_aead(aead_xfrm, sess))
492 if (c_xfrm == NULL || a_xfrm == NULL) {
493 plt_dp_err("Invalid xforms");
497 if (c_xfrm->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
498 a_xfrm->auth.algo == RTE_CRYPTO_AUTH_SHA1) {
499 plt_dp_err("3DES-CBC + SHA1 is not supported");
503 /* Cipher then auth */
504 if (ciph_then_auth) {
505 if (fill_sess_cipher(c_xfrm, sess))
507 if (fill_sess_auth(a_xfrm, sess))
515 if (c_xfrm->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
516 switch (a_xfrm->auth.algo) {
517 case RTE_CRYPTO_AUTH_SHA1_HMAC:
518 switch (c_xfrm->cipher.algo) {
519 case RTE_CRYPTO_CIPHER_AES_CBC:
530 if (fill_sess_auth(a_xfrm, sess))
532 if (fill_sess_cipher(c_xfrm, sess))
539 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
541 union cpt_inst_w7 inst_w7;
543 inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
545 /* Set the engine group */
546 if (sess->zsk_flag || sess->chacha_poly)
547 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
549 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
555 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
556 struct rte_crypto_sym_xform *xform,
557 struct rte_cryptodev_sym_session *sess,
558 struct rte_mempool *pool)
560 struct cnxk_se_sess *sess_priv;
564 if (unlikely(rte_mempool_get(pool, &priv))) {
565 plt_dp_err("Could not allocate session private data");
569 memset(priv, 0, sizeof(struct cnxk_se_sess));
573 ret = cnxk_sess_fill(xform, sess_priv);
577 if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
578 cpt_mac_len_verify(&xform->auth)) {
579 plt_dp_err("MAC length is not supported");
580 if (sess_priv->roc_se_ctx.auth_key != NULL) {
581 plt_free(sess_priv->roc_se_ctx.auth_key);
582 sess_priv->roc_se_ctx.auth_key = NULL;
589 sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
591 set_sym_session_private_data(sess, driver_id, sess_priv);
596 rte_mempool_put(pool, priv);
602 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
603 struct rte_crypto_sym_xform *xform,
604 struct rte_cryptodev_sym_session *sess,
605 struct rte_mempool *pool)
607 struct cnxk_cpt_vf *vf = dev->data->dev_private;
608 struct roc_cpt *roc_cpt = &vf->cpt;
611 driver_id = dev->driver_id;
613 return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
617 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
619 void *priv = get_sym_session_private_data(sess, driver_id);
620 struct cnxk_se_sess *sess_priv;
621 struct rte_mempool *pool;
628 if (sess_priv->roc_se_ctx.auth_key != NULL)
629 plt_free(sess_priv->roc_se_ctx.auth_key);
631 memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
633 pool = rte_mempool_from_obj(priv);
635 set_sym_session_private_data(sess, driver_id, NULL);
637 rte_mempool_put(pool, priv);
641 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
642 struct rte_cryptodev_sym_session *sess)
644 return sym_session_clear(dev->driver_id, sess);
648 cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
650 return sizeof(struct cnxk_ae_sess);
654 cnxk_ae_session_clear(struct rte_cryptodev *dev,
655 struct rte_cryptodev_asym_session *sess)
657 struct rte_mempool *sess_mp;
658 struct cnxk_ae_sess *priv;
660 priv = get_asym_session_private_data(sess, dev->driver_id);
664 /* Free resources allocated in session_cfg */
665 cnxk_ae_free_session_parameters(priv);
667 /* Reset and free object back to pool */
668 memset(priv, 0, cnxk_ae_session_size_get(dev));
669 sess_mp = rte_mempool_from_obj(priv);
670 set_asym_session_private_data(sess, dev->driver_id, NULL);
671 rte_mempool_put(sess_mp, priv);
675 cnxk_ae_session_cfg(struct rte_cryptodev *dev,
676 struct rte_crypto_asym_xform *xform,
677 struct rte_cryptodev_asym_session *sess,
678 struct rte_mempool *pool)
680 struct cnxk_cpt_vf *vf = dev->data->dev_private;
681 struct roc_cpt *roc_cpt = &vf->cpt;
682 struct cnxk_ae_sess *priv;
683 union cpt_inst_w7 w7;
686 if (rte_mempool_get(pool, (void **)&priv))
689 memset(priv, 0, sizeof(struct cnxk_ae_sess));
691 ret = cnxk_ae_fill_session_parameters(priv, xform);
693 rte_mempool_put(pool, priv);
698 w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
699 priv->cpt_inst_w7 = w7.u64;
700 priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
701 priv->ec_grp = vf->ec_grp;
702 set_asym_session_private_data(sess, dev->driver_id, priv);
708 cnxk_cpt_dump_on_err(struct cnxk_cpt_qp *qp)
710 struct pending_queue *pend_q = &qp->pend_q;
711 uint64_t inflight, enq_ptr, deq_ptr, insts;
712 union cpt_lf_q_inst_ptr inst_ptr;
713 union cpt_lf_inprog lf_inprog;
715 plt_print("Lcore ID: %d, LF/QP ID: %d", rte_lcore_id(), qp->lf.lf_id);
717 plt_print("S/w pending queue:");
718 plt_print("\tHead: %"PRIu64"", pend_q->head);
719 plt_print("\tTail: %"PRIu64"", pend_q->tail);
720 plt_print("\tMask: 0x%"PRIx64"", pend_q->pq_mask);
721 plt_print("\tInflight count: %"PRIu64"",
722 pending_queue_infl_cnt(pend_q->head, pend_q->tail,
726 plt_print("H/w pending queue:");
728 lf_inprog.u = plt_read64(qp->lf.rbase + CPT_LF_INPROG);
729 inflight = lf_inprog.s.inflight;
730 plt_print("\tInflight in engines: %"PRIu64"", inflight);
732 inst_ptr.u = plt_read64(qp->lf.rbase + CPT_LF_Q_INST_PTR);
734 enq_ptr = inst_ptr.s.nq_ptr;
735 deq_ptr = inst_ptr.s.dq_ptr;
737 if (enq_ptr >= deq_ptr)
738 insts = enq_ptr - deq_ptr;
740 insts = (enq_ptr + pend_q->pq_mask + 1 + 320 + 40) - deq_ptr;
742 plt_print("\tNQ ptr: 0x%"PRIx64"", enq_ptr);
743 plt_print("\tDQ ptr: 0x%"PRIx64"", deq_ptr);
744 plt_print("Insts waiting in CPT: %"PRIu64"", insts);
747 roc_cpt_afs_print(qp->lf.roc_cpt);