1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (C) 2019 Marvell International Ltd.
7 #include <rte_cryptodev_pmd.h>
9 #include <rte_ethdev.h>
11 #include "otx2_cryptodev.h"
12 #include "otx2_cryptodev_capabilities.h"
13 #include "otx2_cryptodev_hw_access.h"
14 #include "otx2_cryptodev_mbox.h"
15 #include "otx2_cryptodev_ops.h"
16 #include "otx2_cryptodev_ops_helper.h"
17 #include "otx2_ipsec_anti_replay.h"
18 #include "otx2_ipsec_po_ops.h"
19 #include "otx2_mbox.h"
20 #include "otx2_sec_idev.h"
21 #include "otx2_security.h"
23 #include "cpt_hw_types.h"
24 #include "cpt_pmd_logs.h"
25 #include "cpt_pmd_ops_helper.h"
26 #include "cpt_ucode.h"
27 #include "cpt_ucode_asym.h"
29 #define METABUF_POOL_CACHE_SIZE 512
31 static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
33 /* Forward declarations */
36 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
39 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
41 snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
45 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
46 struct otx2_cpt_qp *qp, uint8_t qp_id,
49 char mempool_name[RTE_MEMPOOL_NAMESIZE];
50 struct cpt_qp_meta_info *meta_info;
51 struct rte_mempool *pool;
57 if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
59 /* Get meta len for scatter gather mode */
60 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
62 /* Extra 32B saved for future considerations */
63 sg_mlen += 4 * sizeof(uint64_t);
65 /* Get meta len for linear buffer (direct) mode */
66 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
68 /* Extra 32B saved for future considerations */
69 lb_mlen += 4 * sizeof(uint64_t);
72 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
74 /* Get meta len required for asymmetric operations */
75 asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
79 * Check max requirement for meta buffer to
80 * support crypto op of any type (sym/asym).
82 max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
84 /* Allocate mempool */
86 snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
87 dev->data->dev_id, qp_id);
89 pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
90 METABUF_POOL_CACHE_SIZE, 0,
94 CPT_LOG_ERR("Could not create mempool for metabuf");
98 ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
101 CPT_LOG_ERR("Could not set mempool ops");
105 ret = rte_mempool_populate_default(pool);
107 CPT_LOG_ERR("Could not populate metabuf pool");
111 meta_info = &qp->meta_info;
113 meta_info->pool = pool;
114 meta_info->lb_mlen = lb_mlen;
115 meta_info->sg_mlen = sg_mlen;
120 rte_mempool_free(pool);
125 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
127 struct cpt_qp_meta_info *meta_info = &qp->meta_info;
129 rte_mempool_free(meta_info->pool);
131 meta_info->pool = NULL;
132 meta_info->lb_mlen = 0;
133 meta_info->sg_mlen = 0;
137 otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
139 static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
140 uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
143 for (i = 0; i < nb_ethport; i++) {
144 port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
145 if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
152 ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
156 /* Publish inline Tx QP to eth dev security */
157 ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
164 static struct otx2_cpt_qp *
165 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
168 struct otx2_cpt_vf *vf = dev->data->dev_private;
169 uint64_t pg_sz = sysconf(_SC_PAGESIZE);
170 const struct rte_memzone *lf_mem;
171 uint32_t len, iq_len, size_div40;
172 char name[RTE_MEMZONE_NAMESIZE];
173 uint64_t used_len, iova;
174 struct otx2_cpt_qp *qp;
179 /* Allocate queue pair */
180 qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
183 CPT_LOG_ERR("Could not allocate queue pair");
187 iq_len = OTX2_CPT_IQ_LEN;
190 * Queue size must be a multiple of 40 and effective queue size to
191 * software is (size_div40 - 1) * 40
193 size_div40 = (iq_len + 40 - 1) / 40 + 1;
195 /* For pending queue */
196 len = iq_len * sizeof(uintptr_t);
198 /* Space for instruction group memory */
199 len += size_div40 * 16;
201 /* So that instruction queues start as pg size aligned */
202 len = RTE_ALIGN(len, pg_sz);
204 /* For instruction queues */
205 len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
207 /* Wastage after instruction queues */
208 len = RTE_ALIGN(len, pg_sz);
210 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
213 lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
214 RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
215 RTE_CACHE_LINE_SIZE);
216 if (lf_mem == NULL) {
217 CPT_LOG_ERR("Could not allocate reserved memzone");
226 ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
228 CPT_LOG_ERR("Could not create mempool for metabuf");
232 /* Initialize pending queue */
233 qp->pend_q.req_queue = (uintptr_t *)va;
234 qp->pend_q.enq_tail = 0;
235 qp->pend_q.deq_head = 0;
236 qp->pend_q.pending_count = 0;
238 used_len = iq_len * sizeof(uintptr_t);
239 used_len += size_div40 * 16;
240 used_len = RTE_ALIGN(used_len, pg_sz);
243 qp->iq_dma_addr = iova;
245 qp->blkaddr = vf->lf_blkaddr[qp_id];
246 qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
248 lmtline = vf->otx2_dev.bar2 +
249 (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
250 OTX2_LMT_LF_LMTLINE(0);
252 qp->lmtline = (void *)lmtline;
254 qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
256 ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
257 if (ret && (ret != -ENOENT)) {
258 CPT_LOG_ERR("Could not delete inline configuration");
259 goto mempool_destroy;
262 otx2_cpt_iq_disable(qp);
264 ret = otx2_cpt_qp_inline_cfg(dev, qp);
266 CPT_LOG_ERR("Could not configure queue for inline IPsec");
267 goto mempool_destroy;
270 ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
273 CPT_LOG_ERR("Could not enable instruction queue");
274 goto mempool_destroy;
280 otx2_cpt_metabuf_mempool_destroy(qp);
282 rte_memzone_free(lf_mem);
289 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
291 const struct rte_memzone *lf_mem;
292 char name[RTE_MEMZONE_NAMESIZE];
295 ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
296 if (ret && (ret != -ENOENT)) {
297 CPT_LOG_ERR("Could not delete inline configuration");
301 otx2_cpt_iq_disable(qp);
303 otx2_cpt_metabuf_mempool_destroy(qp);
305 qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
308 lf_mem = rte_memzone_lookup(name);
310 ret = rte_memzone_free(lf_mem);
320 sym_xform_verify(struct rte_crypto_sym_xform *xform)
323 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
324 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
325 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
326 (xform->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC ||
327 xform->next->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC))
330 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
331 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
332 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
333 (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC ||
334 xform->next->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC))
337 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
338 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
339 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
340 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
343 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
344 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
345 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
346 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
350 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
351 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
352 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
359 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
360 struct rte_cryptodev_sym_session *sess,
361 struct rte_mempool *pool)
363 struct rte_crypto_sym_xform *temp_xform = xform;
364 struct cpt_sess_misc *misc;
365 vq_cmd_word3_t vq_cmd_w3;
369 ret = sym_xform_verify(xform);
373 if (unlikely(rte_mempool_get(pool, &priv))) {
374 CPT_LOG_ERR("Could not allocate session private data");
378 memset(priv, 0, sizeof(struct cpt_sess_misc) +
379 offsetof(struct cpt_ctx, mc_ctx));
383 for ( ; xform != NULL; xform = xform->next) {
384 switch (xform->type) {
385 case RTE_CRYPTO_SYM_XFORM_AEAD:
386 ret = fill_sess_aead(xform, misc);
388 case RTE_CRYPTO_SYM_XFORM_CIPHER:
389 ret = fill_sess_cipher(xform, misc);
391 case RTE_CRYPTO_SYM_XFORM_AUTH:
392 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
393 ret = fill_sess_gmac(xform, misc);
395 ret = fill_sess_auth(xform, misc);
405 if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
406 cpt_mac_len_verify(&temp_xform->auth)) {
407 CPT_LOG_ERR("MAC length is not supported");
412 set_sym_session_private_data(sess, driver_id, misc);
414 misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
415 sizeof(struct cpt_sess_misc);
418 vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
422 * IE engines support IPsec operations
423 * SE engines support IPsec operations, Chacha-Poly and
424 * Air-Crypto operations
426 if (misc->zsk_flag || misc->chacha_poly)
427 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
429 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
431 misc->cpt_inst_w7 = vq_cmd_w3.u64;
436 rte_mempool_put(pool, priv);
441 static __rte_always_inline void __rte_hot
442 otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
443 struct cpt_request_info *req,
445 uint64_t cpt_inst_w7)
447 union cpt_inst_s inst;
451 inst.s9x.res_addr = req->comp_baddr;
455 inst.s9x.ei0 = req->ist.ei0;
456 inst.s9x.ei1 = req->ist.ei1;
457 inst.s9x.ei2 = req->ist.ei2;
458 inst.s9x.ei3 = cpt_inst_w7;
461 inst.s9x.grp = qp->ev.queue_id;
462 inst.s9x.tt = qp->ev.sched_type;
463 inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
465 inst.s9x.wq_ptr = (uint64_t)req >> 3;
469 /* Copy CPT command to LMTLINE */
470 memcpy(lmtline, &inst, sizeof(inst));
473 * Make sure compiler does not reorder memcpy and ldeor.
474 * LMTST transactions are always flushed from the write
475 * buffer immediately, a DMB is not required to push out
479 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
480 } while (lmt_status == 0);
484 static __rte_always_inline int32_t __rte_hot
485 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
486 struct pending_queue *pend_q,
487 struct cpt_request_info *req,
488 uint64_t cpt_inst_w7)
490 void *lmtline = qp->lmtline;
491 union cpt_inst_s inst;
495 otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
499 if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
503 inst.s9x.res_addr = req->comp_baddr;
507 inst.s9x.ei0 = req->ist.ei0;
508 inst.s9x.ei1 = req->ist.ei1;
509 inst.s9x.ei2 = req->ist.ei2;
510 inst.s9x.ei3 = cpt_inst_w7;
512 req->time_out = rte_get_timer_cycles() +
513 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
516 /* Copy CPT command to LMTLINE */
517 memcpy(lmtline, &inst, sizeof(inst));
520 * Make sure compiler does not reorder memcpy and ldeor.
521 * LMTST transactions are always flushed from the write
522 * buffer immediately, a DMB is not required to push out
526 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
527 } while (lmt_status == 0);
529 pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req;
531 /* We will use soft queue length here to limit requests */
532 MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
533 pend_q->pending_count += 1;
538 static __rte_always_inline int32_t __rte_hot
539 otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
540 struct rte_crypto_op *op,
541 struct pending_queue *pend_q)
543 struct cpt_qp_meta_info *minfo = &qp->meta_info;
544 struct rte_crypto_asym_op *asym_op = op->asym;
545 struct asym_op_params params = {0};
546 struct cpt_asym_sess_misc *sess;
551 if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
552 CPT_LOG_ERR("Could not allocate meta buffer for request");
556 sess = get_asym_session_private_data(asym_op->session,
557 otx2_cryptodev_driver_id);
559 /* Store IO address of the mdata to meta_buf */
560 params.meta_buf = rte_mempool_virt2iova(mdata);
563 cop[0] = (uintptr_t)mdata;
564 cop[1] = (uintptr_t)op;
565 cop[2] = cop[3] = 0ULL;
567 params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
568 params.req->op = cop;
570 /* Adjust meta_buf to point to end of cpt_request_info structure */
571 params.meta_buf += (4 * sizeof(uintptr_t)) +
572 sizeof(struct cpt_request_info);
573 switch (sess->xfrm_type) {
574 case RTE_CRYPTO_ASYM_XFORM_MODEX:
575 ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
579 case RTE_CRYPTO_ASYM_XFORM_RSA:
580 ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
584 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
585 ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx2_fpm_iova);
589 case RTE_CRYPTO_ASYM_XFORM_ECPM:
590 ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
591 sess->ec_ctx.curveid);
596 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
601 ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
604 CPT_LOG_DP_ERR("Could not enqueue crypto req");
611 free_op_meta(mdata, minfo->pool);
616 static __rte_always_inline int __rte_hot
617 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
618 struct pending_queue *pend_q)
620 struct rte_crypto_sym_op *sym_op = op->sym;
621 struct cpt_request_info *req;
622 struct cpt_sess_misc *sess;
627 sess = get_sym_session_private_data(sym_op->session,
628 otx2_cryptodev_driver_id);
630 cpt_op = sess->cpt_op;
632 if (cpt_op & CPT_OP_CIPHER_MASK)
633 ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
636 ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
640 CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
641 op, (unsigned int)cpt_op, ret);
645 ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
648 /* Free buffer allocated by fill params routines */
649 free_op_meta(mdata, qp->meta_info.pool);
655 static __rte_always_inline int __rte_hot
656 otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
657 struct pending_queue *pend_q)
659 uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
660 struct rte_mbuf *m_src = op->sym->m_src;
661 struct otx2_sec_session_ipsec_lp *sess;
662 struct otx2_ipsec_po_sa_ctl *ctl_wrd;
663 struct otx2_ipsec_po_in_sa *sa;
664 struct otx2_sec_session *priv;
665 struct cpt_request_info *req;
666 uint64_t seq_in_sa, seq = 0;
670 priv = get_sec_session_private_data(op->sym->sec_session);
671 sess = &priv->ipsec.lp;
675 esn = ctl_wrd->esn_en;
676 winsz = sa->replay_win_sz;
678 if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
679 ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
682 esn_low = rte_be_to_cpu_32(sa->esn_low);
683 esn_hi = rte_be_to_cpu_32(sa->esn_hi);
684 seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
685 sizeof(struct rte_ipv4_hdr) + 4);
686 seql = rte_be_to_cpu_32(seql);
689 seq = (uint64_t)seql;
691 seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
693 seq = ((uint64_t)seqh << 32) | seql;
696 if (unlikely(seq == 0))
697 return IPSEC_ANTI_REPLAY_FAILED;
699 ret = anti_replay_check(sa->replay, seq, winsz);
701 otx2_err("Anti replay check failed");
702 return IPSEC_ANTI_REPLAY_FAILED;
706 ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
710 otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
714 ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
717 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
718 if (seq > seq_in_sa) {
719 sa->esn_low = rte_cpu_to_be_32(seql);
720 sa->esn_hi = rte_cpu_to_be_32(seqh);
727 static __rte_always_inline int __rte_hot
728 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
729 struct pending_queue *pend_q)
731 const int driver_id = otx2_cryptodev_driver_id;
732 struct rte_crypto_sym_op *sym_op = op->sym;
733 struct rte_cryptodev_sym_session *sess;
736 /* Create temporary session */
737 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
741 ret = sym_session_configure(driver_id, sym_op->xform, sess,
746 sym_op->session = sess;
748 ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
756 sym_session_clear(driver_id, sess);
758 rte_mempool_put(qp->sess_mp, sess);
763 otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
765 uint16_t nb_allowed, count = 0;
766 struct otx2_cpt_qp *qp = qptr;
767 struct pending_queue *pend_q;
768 struct rte_crypto_op *op;
771 pend_q = &qp->pend_q;
773 nb_allowed = OTX2_CPT_DEFAULT_CMD_QLEN - pend_q->pending_count;
774 if (nb_ops > nb_allowed)
777 for (count = 0; count < nb_ops; count++) {
779 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
780 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
781 ret = otx2_cpt_enqueue_sec(qp, op, pend_q);
782 else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
783 ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
785 ret = otx2_cpt_enqueue_sym_sessless(qp, op,
787 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
788 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
789 ret = otx2_cpt_enqueue_asym(qp, op, pend_q);
802 static __rte_always_inline void
803 otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
804 struct rte_crypto_rsa_xform *rsa_ctx)
806 struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
808 switch (rsa->op_type) {
809 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
810 rsa->cipher.length = rsa_ctx->n.length;
811 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
813 case RTE_CRYPTO_ASYM_OP_DECRYPT:
814 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
815 rsa->message.length = rsa_ctx->n.length;
816 memcpy(rsa->message.data, req->rptr,
817 rsa->message.length);
819 /* Get length of decrypted output */
820 rsa->message.length = rte_cpu_to_be_16
821 (*((uint16_t *)req->rptr));
823 * Offset output data pointer by length field
824 * (2 bytes) and copy decrypted data.
826 memcpy(rsa->message.data, req->rptr + 2,
827 rsa->message.length);
830 case RTE_CRYPTO_ASYM_OP_SIGN:
831 rsa->sign.length = rsa_ctx->n.length;
832 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
834 case RTE_CRYPTO_ASYM_OP_VERIFY:
835 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
836 rsa->sign.length = rsa_ctx->n.length;
837 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
839 /* Get length of signed output */
840 rsa->sign.length = rte_cpu_to_be_16
841 (*((uint16_t *)req->rptr));
843 * Offset output data pointer by length field
844 * (2 bytes) and copy signed data.
846 memcpy(rsa->sign.data, req->rptr + 2,
849 if (memcmp(rsa->sign.data, rsa->message.data,
850 rsa->message.length)) {
851 CPT_LOG_DP_ERR("RSA verification failed");
852 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
856 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
857 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
862 static __rte_always_inline void
863 otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
864 struct cpt_request_info *req,
865 struct cpt_asym_ec_ctx *ec)
867 int prime_len = ec_grp[ec->curveid].prime.length;
869 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
872 /* Separate out sign r and s components */
873 memcpy(ecdsa->r.data, req->rptr, prime_len);
874 memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
876 ecdsa->r.length = prime_len;
877 ecdsa->s.length = prime_len;
880 static __rte_always_inline void
881 otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
882 struct cpt_request_info *req,
883 struct cpt_asym_ec_ctx *ec)
885 int prime_len = ec_grp[ec->curveid].prime.length;
887 memcpy(ecpm->r.x.data, req->rptr, prime_len);
888 memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
890 ecpm->r.x.length = prime_len;
891 ecpm->r.y.length = prime_len;
895 otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
896 struct cpt_request_info *req)
898 struct rte_crypto_asym_op *op = cop->asym;
899 struct cpt_asym_sess_misc *sess;
901 sess = get_asym_session_private_data(op->session,
902 otx2_cryptodev_driver_id);
904 switch (sess->xfrm_type) {
905 case RTE_CRYPTO_ASYM_XFORM_RSA:
906 otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
908 case RTE_CRYPTO_ASYM_XFORM_MODEX:
909 op->modex.result.length = sess->mod_ctx.modulus.length;
910 memcpy(op->modex.result.data, req->rptr,
911 op->modex.result.length);
913 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
914 otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
916 case RTE_CRYPTO_ASYM_XFORM_ECPM:
917 otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
920 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
921 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
927 otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
929 struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
930 vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
931 struct rte_crypto_sym_op *sym_op = cop->sym;
932 struct rte_mbuf *m = sym_op->m_src;
933 struct rte_ipv6_hdr *ip6;
934 struct rte_ipv4_hdr *ip;
939 mdata_len = (int)rsp[3];
940 rte_pktmbuf_trim(m, mdata_len);
942 if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
943 data = rte_pktmbuf_mtod(m, char *);
945 if (rsp[4] == OTX2_IPSEC_PO_TRANSPORT ||
946 rsp[4] == OTX2_IPSEC_PO_TUNNEL_IPV4) {
947 ip = (struct rte_ipv4_hdr *)(data +
948 OTX2_IPSEC_PO_INB_RPTR_HDR);
949 m_len = rte_be_to_cpu_16(ip->total_length);
950 } else if (rsp[4] == OTX2_IPSEC_PO_TUNNEL_IPV6) {
951 ip6 = (struct rte_ipv6_hdr *)(data +
952 OTX2_IPSEC_PO_INB_RPTR_HDR);
953 m_len = rte_be_to_cpu_16(ip6->payload_len) +
954 sizeof(struct rte_ipv6_hdr);
959 m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
964 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
965 uintptr_t *rsp, uint8_t cc)
969 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
970 if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
971 if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
972 otx2_cpt_sec_post_process(cop, rsp);
973 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
975 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
980 if (likely(cc == NO_ERR)) {
981 /* Verify authentication data if required */
982 if (unlikely(rsp[2]))
983 compl_auth_verify(cop, (uint8_t *)rsp[2],
986 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
988 if (cc == ERR_GC_ICV_MISCOMPARE)
989 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
991 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
994 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
995 sym_session_clear(otx2_cryptodev_driver_id,
997 sz = rte_cryptodev_sym_get_existing_header_session_size(
999 memset(cop->sym->session, 0, sz);
1000 rte_mempool_put(qp->sess_mp, cop->sym->session);
1001 cop->sym->session = NULL;
1005 if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1006 if (likely(cc == NO_ERR)) {
1007 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1009 * Pass cpt_req_info stored in metabuf during
1012 rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
1013 otx2_cpt_asym_post_process(cop,
1014 (struct cpt_request_info *)rsp);
1016 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1021 otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
1023 int i, nb_pending, nb_completed;
1024 struct otx2_cpt_qp *qp = qptr;
1025 struct pending_queue *pend_q;
1026 struct cpt_request_info *req;
1027 struct rte_crypto_op *cop;
1032 pend_q = &qp->pend_q;
1034 nb_pending = pend_q->pending_count;
1036 if (nb_ops > nb_pending)
1037 nb_ops = nb_pending;
1039 for (i = 0; i < nb_ops; i++) {
1040 req = (struct cpt_request_info *)
1041 pend_q->req_queue[pend_q->deq_head];
1043 cc[i] = otx2_cpt_compcode_get(req);
1045 if (unlikely(cc[i] == ERR_REQ_PENDING))
1050 MOD_INC(pend_q->deq_head, OTX2_CPT_DEFAULT_CMD_QLEN);
1051 pend_q->pending_count -= 1;
1056 for (i = 0; i < nb_completed; i++) {
1057 rsp = (void *)ops[i];
1059 metabuf = (void *)rsp[0];
1060 cop = (void *)rsp[1];
1064 otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
1066 free_op_meta(metabuf, qp->meta_info.pool);
1069 return nb_completed;
1073 otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
1075 dev->enqueue_burst = otx2_cpt_enqueue_burst;
1076 dev->dequeue_burst = otx2_cpt_dequeue_burst;
1084 otx2_cpt_dev_config(struct rte_cryptodev *dev,
1085 struct rte_cryptodev_config *conf)
1087 struct otx2_cpt_vf *vf = dev->data->dev_private;
1090 if (conf->nb_queue_pairs > vf->max_queues) {
1091 CPT_LOG_ERR("Invalid number of queue pairs requested");
1095 dev->feature_flags &= ~conf->ff_disable;
1097 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
1098 /* Initialize shared FPM table */
1099 ret = cpt_fpm_init(otx2_fpm_iova);
1104 /* Unregister error interrupts */
1105 if (vf->err_intr_registered)
1106 otx2_cpt_err_intr_unregister(dev);
1109 if (vf->nb_queues) {
1110 ret = otx2_cpt_queues_detach(dev);
1112 CPT_LOG_ERR("Could not detach CPT queues");
1118 ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
1120 CPT_LOG_ERR("Could not attach CPT queues");
1124 ret = otx2_cpt_msix_offsets_get(dev);
1126 CPT_LOG_ERR("Could not get MSI-X offsets");
1130 /* Register error interrupts */
1131 ret = otx2_cpt_err_intr_register(dev);
1133 CPT_LOG_ERR("Could not register error interrupts");
1137 ret = otx2_cpt_inline_init(dev);
1139 CPT_LOG_ERR("Could not enable inline IPsec");
1140 goto intr_unregister;
1143 otx2_cpt_set_enqdeq_fns(dev);
1148 otx2_cpt_err_intr_unregister(dev);
1150 otx2_cpt_queues_detach(dev);
1155 otx2_cpt_dev_start(struct rte_cryptodev *dev)
1159 CPT_PMD_INIT_FUNC_TRACE();
1165 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
1167 CPT_PMD_INIT_FUNC_TRACE();
1169 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
1174 otx2_cpt_dev_close(struct rte_cryptodev *dev)
1176 struct otx2_cpt_vf *vf = dev->data->dev_private;
1179 for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1180 ret = otx2_cpt_queue_pair_release(dev, i);
1185 /* Unregister error interrupts */
1186 if (vf->err_intr_registered)
1187 otx2_cpt_err_intr_unregister(dev);
1190 if (vf->nb_queues) {
1191 ret = otx2_cpt_queues_detach(dev);
1193 CPT_LOG_ERR("Could not detach CPT queues");
1200 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
1201 struct rte_cryptodev_info *info)
1203 struct otx2_cpt_vf *vf = dev->data->dev_private;
1206 info->max_nb_queue_pairs = vf->max_queues;
1207 info->feature_flags = dev->feature_flags;
1208 info->capabilities = otx2_cpt_capabilities_get();
1209 info->sym.max_nb_sessions = 0;
1210 info->driver_id = otx2_cryptodev_driver_id;
1211 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
1212 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
1217 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1218 const struct rte_cryptodev_qp_conf *conf,
1219 int socket_id __rte_unused)
1221 uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
1222 struct rte_pci_device *pci_dev;
1223 struct otx2_cpt_qp *qp;
1225 CPT_PMD_INIT_FUNC_TRACE();
1227 if (dev->data->queue_pairs[qp_id] != NULL)
1228 otx2_cpt_queue_pair_release(dev, qp_id);
1230 if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
1231 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
1232 conf->nb_descriptors);
1236 pci_dev = RTE_DEV_TO_PCI(dev->device);
1238 if (pci_dev->mem_resource[2].addr == NULL) {
1239 CPT_LOG_ERR("Invalid PCI mem address");
1243 qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
1245 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
1249 qp->sess_mp = conf->mp_session;
1250 qp->sess_mp_priv = conf->mp_session_private;
1251 dev->data->queue_pairs[qp_id] = qp;
1257 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
1259 struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
1262 CPT_PMD_INIT_FUNC_TRACE();
1267 CPT_LOG_INFO("Releasing queue pair %d", qp_id);
1269 ret = otx2_cpt_qp_destroy(dev, qp);
1271 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
1275 dev->data->queue_pairs[qp_id] = NULL;
1281 otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1283 return cpt_get_session_size();
1287 otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
1288 struct rte_crypto_sym_xform *xform,
1289 struct rte_cryptodev_sym_session *sess,
1290 struct rte_mempool *pool)
1292 CPT_PMD_INIT_FUNC_TRACE();
1294 return sym_session_configure(dev->driver_id, xform, sess, pool);
1298 otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
1299 struct rte_cryptodev_sym_session *sess)
1301 CPT_PMD_INIT_FUNC_TRACE();
1303 return sym_session_clear(dev->driver_id, sess);
1307 otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
1309 return sizeof(struct cpt_asym_sess_misc);
1313 otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
1314 struct rte_crypto_asym_xform *xform,
1315 struct rte_cryptodev_asym_session *sess,
1316 struct rte_mempool *pool)
1318 struct cpt_asym_sess_misc *priv;
1319 vq_cmd_word3_t vq_cmd_w3;
1322 CPT_PMD_INIT_FUNC_TRACE();
1324 if (rte_mempool_get(pool, (void **)&priv)) {
1325 CPT_LOG_ERR("Could not allocate session_private_data");
1329 memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
1331 ret = cpt_fill_asym_session_parameters(priv, xform);
1333 CPT_LOG_ERR("Could not configure session parameters");
1335 /* Return session to mempool */
1336 rte_mempool_put(pool, priv);
1341 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
1342 priv->cpt_inst_w7 = vq_cmd_w3.u64;
1344 set_asym_session_private_data(sess, dev->driver_id, priv);
1350 otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
1351 struct rte_cryptodev_asym_session *sess)
1353 struct cpt_asym_sess_misc *priv;
1354 struct rte_mempool *sess_mp;
1356 CPT_PMD_INIT_FUNC_TRACE();
1358 priv = get_asym_session_private_data(sess, dev->driver_id);
1362 /* Free resources allocated in session_cfg */
1363 cpt_free_asym_session_parameters(priv);
1365 /* Reset and free object back to pool */
1366 memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
1367 sess_mp = rte_mempool_from_obj(priv);
1368 set_asym_session_private_data(sess, dev->driver_id, NULL);
1369 rte_mempool_put(sess_mp, priv);
1372 struct rte_cryptodev_ops otx2_cpt_ops = {
1373 /* Device control ops */
1374 .dev_configure = otx2_cpt_dev_config,
1375 .dev_start = otx2_cpt_dev_start,
1376 .dev_stop = otx2_cpt_dev_stop,
1377 .dev_close = otx2_cpt_dev_close,
1378 .dev_infos_get = otx2_cpt_dev_info_get,
1381 .stats_reset = NULL,
1382 .queue_pair_setup = otx2_cpt_queue_pair_setup,
1383 .queue_pair_release = otx2_cpt_queue_pair_release,
1385 /* Symmetric crypto ops */
1386 .sym_session_get_size = otx2_cpt_sym_session_get_size,
1387 .sym_session_configure = otx2_cpt_sym_session_configure,
1388 .sym_session_clear = otx2_cpt_sym_session_clear,
1390 /* Asymmetric crypto ops */
1391 .asym_session_get_size = otx2_cpt_asym_session_size_get,
1392 .asym_session_configure = otx2_cpt_asym_session_cfg,
1393 .asym_session_clear = otx2_cpt_asym_session_clear,