1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_cryptodev_capabilities.h"
17 #define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
18 #define CNXK_CPT_MAX_ASYM_OP_MOD_LEN 1024
21 cnxk_cpt_get_mlen(void)
26 len = 2 * sizeof(uint64_t);
27 len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
29 len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
30 len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
31 (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
32 2) * ROC_SE_SG_ENTRY_SIZE),
39 cnxk_cpt_asym_get_mlen(void)
44 len = sizeof(uint64_t);
46 /* Get meta len for asymmetric operations */
47 len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
53 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
54 struct rte_cryptodev_config *conf)
56 struct cnxk_cpt_vf *vf = dev->data->dev_private;
57 struct roc_cpt *roc_cpt = &vf->cpt;
58 uint16_t nb_lf_avail, nb_lf;
61 dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
63 nb_lf_avail = roc_cpt->nb_lf_avail;
64 nb_lf = conf->nb_queue_pairs;
66 if (nb_lf > nb_lf_avail)
69 ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
71 plt_err("Could not configure device");
75 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76 /* Initialize shared FPM table */
77 ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
79 plt_err("Could not get FPM table");
83 /* Init EC grp table */
84 ret = roc_ae_ec_grp_get(vf->ec_grp);
86 plt_err("Could not get EC grp table");
96 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
98 struct cnxk_cpt_vf *vf = dev->data->dev_private;
99 struct roc_cpt *roc_cpt = &vf->cpt;
100 uint16_t nb_lf = roc_cpt->nb_lf;
103 for (qp_id = 0; qp_id < nb_lf; qp_id++)
104 roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
110 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
112 struct cnxk_cpt_vf *vf = dev->data->dev_private;
113 struct roc_cpt *roc_cpt = &vf->cpt;
114 uint16_t nb_lf = roc_cpt->nb_lf;
117 for (qp_id = 0; qp_id < nb_lf; qp_id++)
118 roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
122 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
124 struct cnxk_cpt_vf *vf = dev->data->dev_private;
128 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
129 ret = cnxk_cpt_queue_pair_release(dev, i);
131 plt_err("Could not release queue pair %u", i);
136 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
141 roc_cpt_dev_clear(&vf->cpt);
147 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
148 struct rte_cryptodev_info *info)
150 struct cnxk_cpt_vf *vf = dev->data->dev_private;
151 struct roc_cpt *roc_cpt = &vf->cpt;
153 info->max_nb_queue_pairs =
154 RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
155 plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
157 info->feature_flags = cnxk_cpt_default_ff_get();
158 info->capabilities = cnxk_crypto_capabilities_get(vf);
159 info->sym.max_nb_sessions = 0;
160 info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
161 info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
165 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
167 snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
171 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
172 struct cnxk_cpt_qp *qp, uint8_t qp_id,
173 uint32_t nb_elements)
175 char mempool_name[RTE_MEMPOOL_NAMESIZE];
176 struct cpt_qp_meta_info *meta_info;
177 int lcore_cnt = rte_lcore_count();
178 struct rte_mempool *pool;
179 int mb_pool_sz, mlen = 8;
182 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
184 mlen = cnxk_cpt_get_mlen();
187 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
189 /* Get meta len required for asymmetric operations */
190 mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
193 mb_pool_sz = nb_elements;
194 cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
196 /* For poll mode, core that enqueues and core that dequeues can be
197 * different. For event mode, all cores are allowed to use same crypto
201 mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
203 /* Allocate mempool */
205 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
206 dev->data->dev_id, qp_id);
208 pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
209 NULL, NULL, NULL, NULL, rte_socket_id(), 0);
212 plt_err("Could not create mempool for metabuf");
216 meta_info = &qp->meta_info;
218 meta_info->pool = pool;
219 meta_info->mlen = mlen;
225 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
227 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
229 rte_mempool_free(meta_info->pool);
231 meta_info->pool = NULL;
235 static struct cnxk_cpt_qp *
236 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
239 const struct rte_memzone *pq_mem;
240 char name[RTE_MEMZONE_NAMESIZE];
241 struct cnxk_cpt_qp *qp;
246 /* Allocate queue pair */
247 qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
250 plt_err("Could not allocate queue pair");
254 /* For pending queue */
255 len = iq_len * sizeof(struct cpt_inflight_req);
257 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
260 pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
261 RTE_MEMZONE_SIZE_HINT_ONLY |
263 RTE_CACHE_LINE_SIZE);
264 if (pq_mem == NULL) {
265 plt_err("Could not allocate reserved memzone");
273 ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
275 plt_err("Could not create mempool for metabuf");
279 /* Initialize pending queue */
280 qp->pend_q.req_queue = pq_mem->addr;
287 rte_memzone_free(pq_mem);
294 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
296 const struct rte_memzone *pq_mem;
297 char name[RTE_MEMZONE_NAMESIZE];
300 cnxk_cpt_metabuf_mempool_destroy(qp);
302 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
305 pq_mem = rte_memzone_lookup(name);
307 ret = rte_memzone_free(pq_mem);
317 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
319 struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
320 struct cnxk_cpt_vf *vf = dev->data->dev_private;
321 struct roc_cpt *roc_cpt = &vf->cpt;
322 struct roc_cpt_lf *lf;
328 lf = roc_cpt->lf[qp_id];
334 ret = cnxk_cpt_qp_destroy(dev, qp);
336 plt_err("Could not destroy queue pair %d", qp_id);
340 roc_cpt->lf[qp_id] = NULL;
341 dev->data->queue_pairs[qp_id] = NULL;
347 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
348 const struct rte_cryptodev_qp_conf *conf,
349 int socket_id __rte_unused)
351 struct cnxk_cpt_vf *vf = dev->data->dev_private;
352 struct roc_cpt *roc_cpt = &vf->cpt;
353 struct rte_pci_device *pci_dev;
354 struct cnxk_cpt_qp *qp;
357 if (dev->data->queue_pairs[qp_id] != NULL)
358 cnxk_cpt_queue_pair_release(dev, qp_id);
360 pci_dev = RTE_DEV_TO_PCI(dev->device);
362 if (pci_dev->mem_resource[2].addr == NULL) {
363 plt_err("Invalid PCI mem address");
367 qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
369 plt_err("Could not create queue pair %d", qp_id);
373 qp->lf.lf_id = qp_id;
374 qp->lf.nb_desc = conf->nb_descriptors;
376 ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
378 plt_err("Could not initialize queue pair %d", qp_id);
383 qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
385 roc_cpt->lf[qp_id] = &qp->lf;
387 ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
389 roc_cpt->lf[qp_id] = NULL;
390 plt_err("Could not init lmtline for queue pair %d", qp_id);
394 qp->sess_mp = conf->mp_session;
395 qp->sess_mp_priv = conf->mp_session_private;
396 dev->data->queue_pairs[qp_id] = qp;
401 cnxk_cpt_qp_destroy(dev, qp);
406 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
408 return sizeof(struct cnxk_se_sess);
412 sym_xform_verify(struct rte_crypto_sym_xform *xform)
414 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
415 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
416 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
419 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
420 return CNXK_CPT_CIPHER;
422 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
423 return CNXK_CPT_AUTH;
425 if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
426 return CNXK_CPT_AEAD;
428 if (xform->next == NULL)
431 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
432 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
433 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
434 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
437 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
438 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
439 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
440 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
443 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
444 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
445 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
446 xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
447 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
449 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
450 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
451 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
452 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
453 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
455 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
456 xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
457 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
458 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
459 switch (xform->auth.algo) {
460 case RTE_CRYPTO_AUTH_SHA1_HMAC:
461 switch (xform->next->cipher.algo) {
462 case RTE_CRYPTO_CIPHER_AES_CBC:
463 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
472 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
473 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
474 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
475 xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
476 switch (xform->cipher.algo) {
477 case RTE_CRYPTO_CIPHER_AES_CBC:
478 switch (xform->next->auth.algo) {
479 case RTE_CRYPTO_AUTH_SHA1_HMAC:
480 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
493 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
495 union cpt_inst_w7 inst_w7;
497 inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
499 /* Set the engine group */
500 if (sess->zsk_flag || sess->chacha_poly)
501 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
503 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
509 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
510 struct rte_crypto_sym_xform *xform,
511 struct rte_cryptodev_sym_session *sess,
512 struct rte_mempool *pool)
514 struct cnxk_se_sess *sess_priv;
518 ret = sym_xform_verify(xform);
519 if (unlikely(ret < 0))
522 if (unlikely(rte_mempool_get(pool, &priv))) {
523 plt_dp_err("Could not allocate session private data");
527 memset(priv, 0, sizeof(struct cnxk_se_sess));
532 case CNXK_CPT_CIPHER:
533 ret = fill_sess_cipher(xform, sess_priv);
536 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
537 ret = fill_sess_gmac(xform, sess_priv);
539 ret = fill_sess_auth(xform, sess_priv);
542 ret = fill_sess_aead(xform, sess_priv);
544 case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
545 case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
546 ret = fill_sess_cipher(xform, sess_priv);
549 ret = fill_sess_auth(xform->next, sess_priv);
551 case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
552 case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
553 ret = fill_sess_auth(xform, sess_priv);
556 ret = fill_sess_cipher(xform->next, sess_priv);
565 if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
566 cpt_mac_len_verify(&xform->auth)) {
567 plt_dp_err("MAC length is not supported");
568 if (sess_priv->roc_se_ctx.auth_key != NULL) {
569 plt_free(sess_priv->roc_se_ctx.auth_key);
570 sess_priv->roc_se_ctx.auth_key = NULL;
577 sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
579 set_sym_session_private_data(sess, driver_id, sess_priv);
584 rte_mempool_put(pool, priv);
590 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
591 struct rte_crypto_sym_xform *xform,
592 struct rte_cryptodev_sym_session *sess,
593 struct rte_mempool *pool)
595 struct cnxk_cpt_vf *vf = dev->data->dev_private;
596 struct roc_cpt *roc_cpt = &vf->cpt;
599 driver_id = dev->driver_id;
601 return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
605 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
607 void *priv = get_sym_session_private_data(sess, driver_id);
608 struct cnxk_se_sess *sess_priv;
609 struct rte_mempool *pool;
616 if (sess_priv->roc_se_ctx.auth_key != NULL)
617 plt_free(sess_priv->roc_se_ctx.auth_key);
619 memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
621 pool = rte_mempool_from_obj(priv);
623 set_sym_session_private_data(sess, driver_id, NULL);
625 rte_mempool_put(pool, priv);
629 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
630 struct rte_cryptodev_sym_session *sess)
632 return sym_session_clear(dev->driver_id, sess);
636 cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
638 return sizeof(struct cnxk_ae_sess);
642 cnxk_ae_session_clear(struct rte_cryptodev *dev,
643 struct rte_cryptodev_asym_session *sess)
645 struct rte_mempool *sess_mp;
646 struct cnxk_ae_sess *priv;
648 priv = get_asym_session_private_data(sess, dev->driver_id);
652 /* Free resources allocated in session_cfg */
653 cnxk_ae_free_session_parameters(priv);
655 /* Reset and free object back to pool */
656 memset(priv, 0, cnxk_ae_session_size_get(dev));
657 sess_mp = rte_mempool_from_obj(priv);
658 set_asym_session_private_data(sess, dev->driver_id, NULL);
659 rte_mempool_put(sess_mp, priv);
663 cnxk_ae_session_cfg(struct rte_cryptodev *dev,
664 struct rte_crypto_asym_xform *xform,
665 struct rte_cryptodev_asym_session *sess,
666 struct rte_mempool *pool)
668 struct cnxk_cpt_vf *vf = dev->data->dev_private;
669 struct roc_cpt *roc_cpt = &vf->cpt;
670 struct cnxk_ae_sess *priv;
671 union cpt_inst_w7 w7;
674 if (rte_mempool_get(pool, (void **)&priv))
677 memset(priv, 0, sizeof(struct cnxk_ae_sess));
679 ret = cnxk_ae_fill_session_parameters(priv, xform);
681 rte_mempool_put(pool, priv);
686 w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
687 priv->cpt_inst_w7 = w7.u64;
688 priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
689 priv->ec_grp = vf->ec_grp;
690 set_asym_session_private_data(sess, dev->driver_id, priv);