1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
7 #include <rte_cryptodev_pmd.h>
9 #include <rte_ethdev.h>
11 #include "otx2_cryptodev.h"
12 #include "otx2_cryptodev_capabilities.h"
13 #include "otx2_cryptodev_hw_access.h"
14 #include "otx2_cryptodev_mbox.h"
15 #include "otx2_cryptodev_ops.h"
16 #include "otx2_cryptodev_ops_helper.h"
17 #include "otx2_ipsec_po_ops.h"
18 #include "otx2_mbox.h"
19 #include "otx2_sec_idev.h"
20 #include "otx2_security.h"
22 #include "cpt_hw_types.h"
23 #include "cpt_pmd_logs.h"
24 #include "cpt_pmd_ops_helper.h"
25 #include "cpt_ucode.h"
26 #include "cpt_ucode_asym.h"
28 #define METABUF_POOL_CACHE_SIZE 512
30 static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
32 /* Forward declarations */
35 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
38 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
40 snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
44 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
45 struct otx2_cpt_qp *qp, uint8_t qp_id,
48 char mempool_name[RTE_MEMPOOL_NAMESIZE];
49 struct cpt_qp_meta_info *meta_info;
50 struct rte_mempool *pool;
56 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
58 /* Get meta len for scatter gather mode */
59 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
61 /* Extra 32B saved for future considerations */
62 sg_mlen += 4 * sizeof(uint64_t);
64 /* Get meta len for linear buffer (direct) mode */
65 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
67 /* Extra 32B saved for future considerations */
68 lb_mlen += 4 * sizeof(uint64_t);
71 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
73 /* Get meta len required for asymmetric operations */
74 asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
78 * Check max requirement for meta buffer to
79 * support crypto op of any type (sym/asym).
81 max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
83 /* Allocate mempool */
85 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
86 dev->data->dev_id, qp_id);
88 pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
89 METABUF_POOL_CACHE_SIZE, 0,
93 CPT_LOG_ERR("Could not create mempool for metabuf");
97 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
100 CPT_LOG_ERR("Could not set mempool ops");
104 ret = rte_mempool_populate_default(pool);
106 CPT_LOG_ERR("Could not populate metabuf pool");
110 meta_info = &qp->meta_info;
112 meta_info->pool = pool;
113 meta_info->lb_mlen = lb_mlen;
114 meta_info->sg_mlen = sg_mlen;
119 rte_mempool_free(pool);
124 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
126 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
128 rte_mempool_free(meta_info->pool);
130 meta_info->pool = NULL;
131 meta_info->lb_mlen = 0;
132 meta_info->sg_mlen = 0;
136 otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
138 static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
139 uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
142 for (i = 0; i < nb_ethport; i++) {
143 port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
144 if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
151 ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
155 /* Publish inline Tx QP to eth dev security */
156 ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
163 static struct otx2_cpt_qp *
164 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
167 struct otx2_cpt_vf *vf = dev->data->dev_private;
168 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
169 const struct rte_memzone *lf_mem;
170 uint32_t len, iq_len, size_div40;
171 char name[RTE_MEMZONE_NAMESIZE];
172 uint64_t used_len, iova;
173 struct otx2_cpt_qp *qp;
178 /* Allocate queue pair */
179 qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
182 CPT_LOG_ERR("Could not allocate queue pair");
186 iq_len = OTX2_CPT_IQ_LEN;
189 * Queue size must be a multiple of 40 and effective queue size to
190 * software is (size_div40 - 1) * 40
192 size_div40 = (iq_len + 40 - 1) / 40 + 1;
194 /* For pending queue */
195 len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
197 /* Space for instruction group memory */
198 len += size_div40 * 16;
200 /* So that instruction queues start as pg size aligned */
201 len = RTE_ALIGN(len, pg_sz);
203 /* For instruction queues */
204 len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
206 /* Wastage after instruction queues */
207 len = RTE_ALIGN(len, pg_sz);
209 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
212 lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
213 RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
214 RTE_CACHE_LINE_SIZE);
215 if (lf_mem == NULL) {
216 CPT_LOG_ERR("Could not allocate reserved memzone");
225 ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
227 CPT_LOG_ERR("Could not create mempool for metabuf");
231 /* Initialize pending queue */
232 qp->pend_q.rid_queue = (struct rid *)va;
233 qp->pend_q.enq_tail = 0;
234 qp->pend_q.deq_head = 0;
235 qp->pend_q.pending_count = 0;
237 used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
238 used_len += size_div40 * 16;
239 used_len = RTE_ALIGN(used_len, pg_sz);
242 qp->iq_dma_addr = iova;
244 qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
246 lmtline = vf->otx2_dev.bar2 +
247 (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
248 OTX2_LMT_LF_LMTLINE(0);
250 qp->lmtline = (void *)lmtline;
252 qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
254 ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
255 if (ret && (ret != -ENOENT)) {
256 CPT_LOG_ERR("Could not delete inline configuration");
257 goto mempool_destroy;
260 otx2_cpt_iq_disable(qp);
262 ret = otx2_cpt_qp_inline_cfg(dev, qp);
264 CPT_LOG_ERR("Could not configure queue for inline IPsec");
265 goto mempool_destroy;
268 ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
271 CPT_LOG_ERR("Could not enable instruction queue");
272 goto mempool_destroy;
278 otx2_cpt_metabuf_mempool_destroy(qp);
280 rte_memzone_free(lf_mem);
287 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
289 const struct rte_memzone *lf_mem;
290 char name[RTE_MEMZONE_NAMESIZE];
293 ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
294 if (ret && (ret != -ENOENT)) {
295 CPT_LOG_ERR("Could not delete inline configuration");
299 otx2_cpt_iq_disable(qp);
301 otx2_cpt_metabuf_mempool_destroy(qp);
303 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
306 lf_mem = rte_memzone_lookup(name);
308 ret = rte_memzone_free(lf_mem);
318 sym_xform_verify(struct rte_crypto_sym_xform *xform)
321 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
322 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
323 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
326 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
327 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
328 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
331 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
332 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
333 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
334 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
337 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
338 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
339 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
340 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
344 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
345 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
346 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
353 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
354 struct rte_cryptodev_sym_session *sess,
355 struct rte_mempool *pool)
357 struct rte_crypto_sym_xform *temp_xform = xform;
358 struct cpt_sess_misc *misc;
362 ret = sym_xform_verify(xform);
366 if (unlikely(rte_mempool_get(pool, &priv))) {
367 CPT_LOG_ERR("Could not allocate session private data");
371 memset(priv, 0, sizeof(struct cpt_sess_misc) +
372 offsetof(struct cpt_ctx, fctx));
376 for ( ; xform != NULL; xform = xform->next) {
377 switch (xform->type) {
378 case RTE_CRYPTO_SYM_XFORM_AEAD:
379 ret = fill_sess_aead(xform, misc);
381 case RTE_CRYPTO_SYM_XFORM_CIPHER:
382 ret = fill_sess_cipher(xform, misc);
384 case RTE_CRYPTO_SYM_XFORM_AUTH:
385 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
386 ret = fill_sess_gmac(xform, misc);
388 ret = fill_sess_auth(xform, misc);
398 if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
399 cpt_mac_len_verify(&temp_xform->auth)) {
400 CPT_LOG_ERR("MAC length is not supported");
405 set_sym_session_private_data(sess, driver_id, misc);
407 misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
408 sizeof(struct cpt_sess_misc);
411 * IE engines support IPsec operations
412 * SE engines support IPsec operations, Chacha-Poly and
413 * Air-Crypto operations
415 if (misc->zsk_flag || misc->chacha_poly)
416 misc->egrp = OTX2_CPT_EGRP_SE;
418 misc->egrp = OTX2_CPT_EGRP_SE_IE;
423 rte_mempool_put(pool, priv);
428 static __rte_always_inline int32_t __rte_hot
429 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
430 struct pending_queue *pend_q,
431 struct cpt_request_info *req)
433 void *lmtline = qp->lmtline;
434 union cpt_inst_s inst;
437 if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
441 inst.s9x.res_addr = req->comp_baddr;
445 inst.s9x.ei0 = req->ist.ei0;
446 inst.s9x.ei1 = req->ist.ei1;
447 inst.s9x.ei2 = req->ist.ei2;
448 inst.s9x.ei3 = req->ist.ei3;
450 req->time_out = rte_get_timer_cycles() +
451 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
454 /* Copy CPT command to LMTLINE */
455 memcpy(lmtline, &inst, sizeof(inst));
458 * Make sure compiler does not reorder memcpy and ldeor.
459 * LMTST transactions are always flushed from the write
460 * buffer immediately, a DMB is not required to push out
464 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
465 } while (lmt_status == 0);
467 pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
469 /* We will use soft queue length here to limit requests */
470 MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
471 pend_q->pending_count += 1;
476 static __rte_always_inline int32_t __rte_hot
477 otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
478 struct rte_crypto_op *op,
479 struct pending_queue *pend_q)
481 struct cpt_qp_meta_info *minfo = &qp->meta_info;
482 struct rte_crypto_asym_op *asym_op = op->asym;
483 struct asym_op_params params = {0};
484 struct cpt_asym_sess_misc *sess;
490 if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
491 CPT_LOG_ERR("Could not allocate meta buffer for request");
495 sess = get_asym_session_private_data(asym_op->session,
496 otx2_cryptodev_driver_id);
498 /* Store IO address of the mdata to meta_buf */
499 params.meta_buf = rte_mempool_virt2iova(mdata);
502 cop[0] = (uintptr_t)mdata;
503 cop[1] = (uintptr_t)op;
504 cop[2] = cop[3] = 0ULL;
506 params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
507 params.req->op = cop;
509 /* Adjust meta_buf to point to end of cpt_request_info structure */
510 params.meta_buf += (4 * sizeof(uintptr_t)) +
511 sizeof(struct cpt_request_info);
512 switch (sess->xfrm_type) {
513 case RTE_CRYPTO_ASYM_XFORM_MODEX:
514 ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
518 case RTE_CRYPTO_ASYM_XFORM_RSA:
519 ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
523 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
524 ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx2_fpm_iova);
528 case RTE_CRYPTO_ASYM_XFORM_ECPM:
529 ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
530 sess->ec_ctx.curveid);
535 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
540 /* Set engine group of AE */
541 w3 = (vq_cmd_word3_t *)¶ms.req->ist.ei3;
542 w3->s.grp = OTX2_CPT_EGRP_AE;
544 ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
547 CPT_LOG_DP_ERR("Could not enqueue crypto req");
554 free_op_meta(mdata, minfo->pool);
559 static __rte_always_inline int __rte_hot
560 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
561 struct pending_queue *pend_q)
563 struct rte_crypto_sym_op *sym_op = op->sym;
564 struct cpt_request_info *req;
565 struct cpt_sess_misc *sess;
571 sess = get_sym_session_private_data(sym_op->session,
572 otx2_cryptodev_driver_id);
574 cpt_op = sess->cpt_op;
576 if (cpt_op & CPT_OP_CIPHER_MASK)
577 ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
580 ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
584 CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
585 op, (unsigned int)cpt_op, ret);
589 w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
590 w3->s.grp = sess->egrp;
592 ret = otx2_cpt_enqueue_req(qp, pend_q, req);
595 /* Free buffer allocated by fill params routines */
596 free_op_meta(mdata, qp->meta_info.pool);
602 static __rte_always_inline int __rte_hot
603 otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
604 struct pending_queue *pend_q)
606 struct otx2_sec_session_ipsec_lp *sess;
607 struct otx2_ipsec_po_sa_ctl *ctl_wrd;
608 struct otx2_sec_session *priv;
609 struct cpt_request_info *req;
612 priv = get_sec_session_private_data(op->sym->sec_session);
613 sess = &priv->ipsec.lp;
615 ctl_wrd = &sess->in_sa.ctl;
617 if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
618 ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
620 ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
623 otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
627 ret = otx2_cpt_enqueue_req(qp, pend_q, req);
632 static __rte_always_inline int __rte_hot
633 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
634 struct pending_queue *pend_q)
636 const int driver_id = otx2_cryptodev_driver_id;
637 struct rte_crypto_sym_op *sym_op = op->sym;
638 struct rte_cryptodev_sym_session *sess;
641 /* Create temporary session */
642 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
646 ret = sym_session_configure(driver_id, sym_op->xform, sess,
651 sym_op->session = sess;
653 ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
661 sym_session_clear(driver_id, sess);
663 rte_mempool_put(qp->sess_mp, sess);
668 otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
670 uint16_t nb_allowed, count = 0;
671 struct otx2_cpt_qp *qp = qptr;
672 struct pending_queue *pend_q;
673 struct rte_crypto_op *op;
676 pend_q = &qp->pend_q;
678 nb_allowed = OTX2_CPT_DEFAULT_CMD_QLEN - pend_q->pending_count;
679 if (nb_ops > nb_allowed)
682 for (count = 0; count < nb_ops; count++) {
684 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
685 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
686 ret = otx2_cpt_enqueue_sec(qp, op, pend_q);
687 else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
688 ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
690 ret = otx2_cpt_enqueue_sym_sessless(qp, op,
692 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
693 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
694 ret = otx2_cpt_enqueue_asym(qp, op, pend_q);
707 static __rte_always_inline void
708 otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
709 struct rte_crypto_rsa_xform *rsa_ctx)
711 struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
713 switch (rsa->op_type) {
714 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
715 rsa->cipher.length = rsa_ctx->n.length;
716 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
718 case RTE_CRYPTO_ASYM_OP_DECRYPT:
719 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
720 rsa->message.length = rsa_ctx->n.length;
721 memcpy(rsa->message.data, req->rptr,
722 rsa->message.length);
724 /* Get length of decrypted output */
725 rsa->message.length = rte_cpu_to_be_16
726 (*((uint16_t *)req->rptr));
728 * Offset output data pointer by length field
729 * (2 bytes) and copy decrypted data.
731 memcpy(rsa->message.data, req->rptr + 2,
732 rsa->message.length);
735 case RTE_CRYPTO_ASYM_OP_SIGN:
736 rsa->sign.length = rsa_ctx->n.length;
737 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
739 case RTE_CRYPTO_ASYM_OP_VERIFY:
740 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
741 rsa->sign.length = rsa_ctx->n.length;
742 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
744 /* Get length of signed output */
745 rsa->sign.length = rte_cpu_to_be_16
746 (*((uint16_t *)req->rptr));
748 * Offset output data pointer by length field
749 * (2 bytes) and copy signed data.
751 memcpy(rsa->sign.data, req->rptr + 2,
754 if (memcmp(rsa->sign.data, rsa->message.data,
755 rsa->message.length)) {
756 CPT_LOG_DP_ERR("RSA verification failed");
757 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
761 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
762 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
767 static __rte_always_inline void
768 otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
769 struct cpt_request_info *req,
770 struct cpt_asym_ec_ctx *ec)
772 int prime_len = ec_grp[ec->curveid].prime.length;
774 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
777 /* Separate out sign r and s components */
778 memcpy(ecdsa->r.data, req->rptr, prime_len);
779 memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
780 ecdsa->r.length = prime_len;
781 ecdsa->s.length = prime_len;
784 static __rte_always_inline void
785 otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
786 struct cpt_request_info *req,
787 struct cpt_asym_ec_ctx *ec)
789 int prime_len = ec_grp[ec->curveid].prime.length;
791 memcpy(ecpm->r.x.data, req->rptr, prime_len);
792 memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
793 ecpm->r.x.length = prime_len;
794 ecpm->r.y.length = prime_len;
798 otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
799 struct cpt_request_info *req)
801 struct rte_crypto_asym_op *op = cop->asym;
802 struct cpt_asym_sess_misc *sess;
804 sess = get_asym_session_private_data(op->session,
805 otx2_cryptodev_driver_id);
807 switch (sess->xfrm_type) {
808 case RTE_CRYPTO_ASYM_XFORM_RSA:
809 otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
811 case RTE_CRYPTO_ASYM_XFORM_MODEX:
812 op->modex.result.length = sess->mod_ctx.modulus.length;
813 memcpy(op->modex.result.data, req->rptr,
814 op->modex.result.length);
816 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
817 otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
819 case RTE_CRYPTO_ASYM_XFORM_ECPM:
820 otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
823 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
824 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
830 otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
832 struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
833 vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
834 struct rte_crypto_sym_op *sym_op = cop->sym;
835 struct rte_mbuf *m = sym_op->m_src;
836 struct rte_ipv6_hdr *ip6;
837 struct rte_ipv4_hdr *ip;
842 mdata_len = (int)rsp[3];
843 rte_pktmbuf_trim(m, mdata_len);
845 if ((word0->s.opcode & 0xff) == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
846 data = rte_pktmbuf_mtod(m, char *);
848 if (rsp[4] == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
849 ip = (struct rte_ipv4_hdr *)(data +
850 OTX2_IPSEC_PO_INB_RPTR_HDR);
851 m_len = rte_be_to_cpu_16(ip->total_length);
853 ip6 = (struct rte_ipv6_hdr *)(data +
854 OTX2_IPSEC_PO_INB_RPTR_HDR);
855 m_len = rte_be_to_cpu_16(ip6->payload_len) +
856 sizeof(struct rte_ipv6_hdr);
861 m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
866 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
867 uintptr_t *rsp, uint8_t cc)
871 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
872 if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
873 if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
874 otx2_cpt_sec_post_process(cop, rsp);
875 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
877 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
882 if (likely(cc == NO_ERR)) {
883 /* Verify authentication data if required */
884 if (unlikely(rsp[2]))
885 compl_auth_verify(cop, (uint8_t *)rsp[2],
888 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
890 if (cc == ERR_GC_ICV_MISCOMPARE)
891 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
893 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
896 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
897 sym_session_clear(otx2_cryptodev_driver_id,
899 sz = rte_cryptodev_sym_get_existing_header_session_size(
901 memset(cop->sym->session, 0, sz);
902 rte_mempool_put(qp->sess_mp, cop->sym->session);
903 cop->sym->session = NULL;
907 if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
908 if (likely(cc == NO_ERR)) {
909 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
911 * Pass cpt_req_info stored in metabuf during
914 rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
915 otx2_cpt_asym_post_process(cop,
916 (struct cpt_request_info *)rsp);
918 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
923 otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
925 int i, nb_pending, nb_completed;
926 struct otx2_cpt_qp *qp = qptr;
927 struct pending_queue *pend_q;
928 struct cpt_request_info *req;
929 struct rte_crypto_op *cop;
935 pend_q = &qp->pend_q;
937 nb_pending = pend_q->pending_count;
939 if (nb_ops > nb_pending)
942 for (i = 0; i < nb_ops; i++) {
943 rid = &pend_q->rid_queue[pend_q->deq_head];
944 req = (struct cpt_request_info *)(rid->rid);
946 cc[i] = otx2_cpt_compcode_get(req);
948 if (unlikely(cc[i] == ERR_REQ_PENDING))
953 MOD_INC(pend_q->deq_head, OTX2_CPT_DEFAULT_CMD_QLEN);
954 pend_q->pending_count -= 1;
959 for (i = 0; i < nb_completed; i++) {
960 rsp = (void *)ops[i];
962 metabuf = (void *)rsp[0];
963 cop = (void *)rsp[1];
967 otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
969 free_op_meta(metabuf, qp->meta_info.pool);
978 otx2_cpt_dev_config(struct rte_cryptodev *dev,
979 struct rte_cryptodev_config *conf)
981 struct otx2_cpt_vf *vf = dev->data->dev_private;
984 if (conf->nb_queue_pairs > vf->max_queues) {
985 CPT_LOG_ERR("Invalid number of queue pairs requested");
989 dev->feature_flags &= ~conf->ff_disable;
991 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
992 /* Initialize shared FPM table */
993 ret = cpt_fpm_init(otx2_fpm_iova);
998 /* Unregister error interrupts */
999 if (vf->err_intr_registered)
1000 otx2_cpt_err_intr_unregister(dev);
1003 if (vf->nb_queues) {
1004 ret = otx2_cpt_queues_detach(dev);
1006 CPT_LOG_ERR("Could not detach CPT queues");
1012 ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
1014 CPT_LOG_ERR("Could not attach CPT queues");
1018 ret = otx2_cpt_msix_offsets_get(dev);
1020 CPT_LOG_ERR("Could not get MSI-X offsets");
1024 /* Register error interrupts */
1025 ret = otx2_cpt_err_intr_register(dev);
1027 CPT_LOG_ERR("Could not register error interrupts");
1031 ret = otx2_cpt_inline_init(dev);
1033 CPT_LOG_ERR("Could not enable inline IPsec");
1034 goto intr_unregister;
1037 dev->enqueue_burst = otx2_cpt_enqueue_burst;
1038 dev->dequeue_burst = otx2_cpt_dequeue_burst;
1044 otx2_cpt_err_intr_unregister(dev);
1046 otx2_cpt_queues_detach(dev);
1051 otx2_cpt_dev_start(struct rte_cryptodev *dev)
1055 CPT_PMD_INIT_FUNC_TRACE();
1061 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
1063 CPT_PMD_INIT_FUNC_TRACE();
1065 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
1070 otx2_cpt_dev_close(struct rte_cryptodev *dev)
1072 struct otx2_cpt_vf *vf = dev->data->dev_private;
1075 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1076 ret = otx2_cpt_queue_pair_release(dev, i);
1081 /* Unregister error interrupts */
1082 if (vf->err_intr_registered)
1083 otx2_cpt_err_intr_unregister(dev);
1086 if (vf->nb_queues) {
1087 ret = otx2_cpt_queues_detach(dev);
1089 CPT_LOG_ERR("Could not detach CPT queues");
1096 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
1097 struct rte_cryptodev_info *info)
1099 struct otx2_cpt_vf *vf = dev->data->dev_private;
1102 info->max_nb_queue_pairs = vf->max_queues;
1103 info->feature_flags = dev->feature_flags;
1104 info->capabilities = otx2_cpt_capabilities_get();
1105 info->sym.max_nb_sessions = 0;
1106 info->driver_id = otx2_cryptodev_driver_id;
1107 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
1108 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
1113 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1114 const struct rte_cryptodev_qp_conf *conf,
1115 int socket_id __rte_unused)
1117 uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
1118 struct rte_pci_device *pci_dev;
1119 struct otx2_cpt_qp *qp;
1121 CPT_PMD_INIT_FUNC_TRACE();
1123 if (dev->data->queue_pairs[qp_id] != NULL)
1124 otx2_cpt_queue_pair_release(dev, qp_id);
1126 if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
1127 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
1128 conf->nb_descriptors);
1132 pci_dev = RTE_DEV_TO_PCI(dev->device);
1134 if (pci_dev->mem_resource[2].addr == NULL) {
1135 CPT_LOG_ERR("Invalid PCI mem address");
1139 qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
1141 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
1145 qp->sess_mp = conf->mp_session;
1146 qp->sess_mp_priv = conf->mp_session_private;
1147 dev->data->queue_pairs[qp_id] = qp;
1153 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
1155 struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
1158 CPT_PMD_INIT_FUNC_TRACE();
1163 CPT_LOG_INFO("Releasing queue pair %d", qp_id);
1165 ret = otx2_cpt_qp_destroy(dev, qp);
1167 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
1171 dev->data->queue_pairs[qp_id] = NULL;
1177 otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1179 return cpt_get_session_size();
1183 otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
1184 struct rte_crypto_sym_xform *xform,
1185 struct rte_cryptodev_sym_session *sess,
1186 struct rte_mempool *pool)
1188 CPT_PMD_INIT_FUNC_TRACE();
1190 return sym_session_configure(dev->driver_id, xform, sess, pool);
1194 otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
1195 struct rte_cryptodev_sym_session *sess)
1197 CPT_PMD_INIT_FUNC_TRACE();
1199 return sym_session_clear(dev->driver_id, sess);
1203 otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
1205 return sizeof(struct cpt_asym_sess_misc);
1209 otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
1210 struct rte_crypto_asym_xform *xform,
1211 struct rte_cryptodev_asym_session *sess,
1212 struct rte_mempool *pool)
1214 struct cpt_asym_sess_misc *priv;
1217 CPT_PMD_INIT_FUNC_TRACE();
1219 if (rte_mempool_get(pool, (void **)&priv)) {
1220 CPT_LOG_ERR("Could not allocate session_private_data");
1224 memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
1226 ret = cpt_fill_asym_session_parameters(priv, xform);
1228 CPT_LOG_ERR("Could not configure session parameters");
1230 /* Return session to mempool */
1231 rte_mempool_put(pool, priv);
1235 set_asym_session_private_data(sess, dev->driver_id, priv);
1240 otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
1241 struct rte_cryptodev_asym_session *sess)
1243 struct cpt_asym_sess_misc *priv;
1244 struct rte_mempool *sess_mp;
1246 CPT_PMD_INIT_FUNC_TRACE();
1248 priv = get_asym_session_private_data(sess, dev->driver_id);
1252 /* Free resources allocated in session_cfg */
1253 cpt_free_asym_session_parameters(priv);
1255 /* Reset and free object back to pool */
1256 memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
1257 sess_mp = rte_mempool_from_obj(priv);
1258 set_asym_session_private_data(sess, dev->driver_id, NULL);
1259 rte_mempool_put(sess_mp, priv);
1262 struct rte_cryptodev_ops otx2_cpt_ops = {
1263 /* Device control ops */
1264 .dev_configure = otx2_cpt_dev_config,
1265 .dev_start = otx2_cpt_dev_start,
1266 .dev_stop = otx2_cpt_dev_stop,
1267 .dev_close = otx2_cpt_dev_close,
1268 .dev_infos_get = otx2_cpt_dev_info_get,
1271 .stats_reset = NULL,
1272 .queue_pair_setup = otx2_cpt_queue_pair_setup,
1273 .queue_pair_release = otx2_cpt_queue_pair_release,
1275 /* Symmetric crypto ops */
1276 .sym_session_get_size = otx2_cpt_sym_session_get_size,
1277 .sym_session_configure = otx2_cpt_sym_session_configure,
1278 .sym_session_clear = otx2_cpt_sym_session_clear,
1280 /* Asymmetric crypto ops */
1281 .asym_session_get_size = otx2_cpt_asym_session_size_get,
1282 .asym_session_configure = otx2_cpt_asym_session_cfg,
1283 .asym_session_clear = otx2_cpt_asym_session_clear,