X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fcrypto%2Focteontx2%2Fotx2_cryptodev_ops.c;h=7eebb4963618cbceeccdac74bbc0d07a50127a67;hb=refs%2Fheads%2Fdelay;hp=b06d8f3844e6476fcaf24b3b30d978ea20b806c6;hpb=3e72548830acc0c5e4bd996eb6daf6b59cfc51e5;p=dpdk.git diff --git a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c index b06d8f3844..7eebb49636 100644 --- a/drivers/crypto/octeontx2/otx2_cryptodev_ops.c +++ b/drivers/crypto/octeontx2/otx2_cryptodev_ops.c @@ -6,6 +6,7 @@ #include #include +#include #include "otx2_cryptodev.h" #include "otx2_cryptodev_capabilities.h" @@ -13,14 +14,18 @@ #include "otx2_cryptodev_mbox.h" #include "otx2_cryptodev_ops.h" #include "otx2_mbox.h" +#include "otx2_sec_idev.h" #include "cpt_hw_types.h" #include "cpt_pmd_logs.h" #include "cpt_pmd_ops_helper.h" #include "cpt_ucode.h" +#include "cpt_ucode_asym.h" #define METABUF_POOL_CACHE_SIZE 512 +static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX]; + /* Forward declarations */ static int @@ -38,24 +43,39 @@ otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev, int nb_elements) { char mempool_name[RTE_MEMPOOL_NAMESIZE]; - int sg_mlen, lb_mlen, max_mlen, ret; struct cpt_qp_meta_info *meta_info; struct rte_mempool *pool; + int ret, max_mlen; + int asym_mlen = 0; + int lb_mlen = 0; + int sg_mlen = 0; + + if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) { - /* Get meta len for scatter gather mode */ - sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode(); + /* Get meta len for scatter gather mode */ + sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode(); - /* Extra 32B saved for future considerations */ - sg_mlen += 4 * sizeof(uint64_t); + /* Extra 32B saved for future considerations */ + sg_mlen += 4 * sizeof(uint64_t); - /* Get meta len for linear buffer (direct) mode */ - lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode(); + /* Get meta len for linear buffer (direct) mode */ + lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode(); + + /* Extra 32B saved for future considerations */ + lb_mlen += 4 * sizeof(uint64_t); + } - /* Extra 32B saved for future considerations */ - lb_mlen += 4 * sizeof(uint64_t); + if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) { - /* Check max requirement for meta buffer */ - max_mlen = RTE_MAX(lb_mlen, sg_mlen); + /* Get meta len required for asymmetric operations */ + asym_mlen = cpt_pmd_ops_helper_asym_get_mlen(); + } + + /* + * Check max requirement for meta buffer to + * support crypto op of any type (sym/asym). + */ + max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen); /* Allocate mempool */ @@ -109,6 +129,34 @@ otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp) meta_info->sg_mlen = 0; } +static int +otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp) +{ + static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1); + uint16_t port_id, nb_ethport = rte_eth_dev_count_avail(); + int i, ret; + + for (i = 0; i < nb_ethport; i++) { + port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport; + if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id])) + break; + } + + if (i >= nb_ethport) + return 0; + + ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id); + if (ret) + return ret; + + /* Publish inline Tx QP to eth dev security */ + ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp); + if (ret) + return ret; + + return 0; +} + static struct otx2_cpt_qp * otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id, uint8_t group) @@ -200,8 +248,20 @@ otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id, qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0); + ret = otx2_sec_idev_tx_cpt_qp_remove(qp); + if (ret && (ret != -ENOENT)) { + CPT_LOG_ERR("Could not delete inline configuration"); + goto mempool_destroy; + } + otx2_cpt_iq_disable(qp); + ret = otx2_cpt_qp_inline_cfg(dev, qp); + if (ret) { + CPT_LOG_ERR("Could not configure queue for inline IPsec"); + goto mempool_destroy; + } + ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO, size_div40); if (ret) { @@ -227,6 +287,12 @@ otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp) char name[RTE_MEMZONE_NAMESIZE]; int ret; + ret = otx2_sec_idev_tx_cpt_qp_remove(qp); + if (ret && (ret != -ENOENT)) { + CPT_LOG_ERR("Could not delete inline configuration"); + return ret; + } + otx2_cpt_iq_disable(qp); otx2_cpt_metabuf_mempool_destroy(qp); @@ -329,6 +395,516 @@ sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess) rte_mempool_put(pool, priv); } +static __rte_always_inline int32_t __hot +otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp, + struct pending_queue *pend_q, + struct cpt_request_info *req) +{ + void *lmtline = qp->lmtline; + union cpt_inst_s inst; + uint64_t lmt_status; + + if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN)) + return -EAGAIN; + + inst.u[0] = 0; + inst.s9x.res_addr = req->comp_baddr; + inst.u[2] = 0; + inst.u[3] = 0; + + inst.s9x.ei0 = req->ist.ei0; + inst.s9x.ei1 = req->ist.ei1; + inst.s9x.ei2 = req->ist.ei2; + inst.s9x.ei3 = req->ist.ei3; + + req->time_out = rte_get_timer_cycles() + + DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz(); + + do { + /* Copy CPT command to LMTLINE */ + memcpy(lmtline, &inst, sizeof(inst)); + + /* + * Make sure compiler does not reorder memcpy and ldeor. + * LMTST transactions are always flushed from the write + * buffer immediately, a DMB is not required to push out + * LMTSTs. + */ + rte_cio_wmb(); + lmt_status = otx2_lmt_submit(qp->lf_nq_reg); + } while (lmt_status == 0); + + pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req; + + /* We will use soft queue length here to limit requests */ + MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN); + pend_q->pending_count += 1; + + return 0; +} + +static __rte_always_inline int32_t __hot +otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp, + struct rte_crypto_op *op, + struct pending_queue *pend_q) +{ + struct cpt_qp_meta_info *minfo = &qp->meta_info; + struct rte_crypto_asym_op *asym_op = op->asym; + struct asym_op_params params = {0}; + struct cpt_asym_sess_misc *sess; + vq_cmd_word3_t *w3; + uintptr_t *cop; + void *mdata; + int ret; + + if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) { + CPT_LOG_ERR("Could not allocate meta buffer for request"); + return -ENOMEM; + } + + sess = get_asym_session_private_data(asym_op->session, + otx2_cryptodev_driver_id); + + /* Store IO address of the mdata to meta_buf */ + params.meta_buf = rte_mempool_virt2iova(mdata); + + cop = mdata; + cop[0] = (uintptr_t)mdata; + cop[1] = (uintptr_t)op; + cop[2] = cop[3] = 0ULL; + + params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t)); + params.req->op = cop; + + /* Adjust meta_buf to point to end of cpt_request_info structure */ + params.meta_buf += (4 * sizeof(uintptr_t)) + + sizeof(struct cpt_request_info); + switch (sess->xfrm_type) { + case RTE_CRYPTO_ASYM_XFORM_MODEX: + ret = cpt_modex_prep(¶ms, &sess->mod_ctx); + if (unlikely(ret)) + goto req_fail; + break; + case RTE_CRYPTO_ASYM_XFORM_RSA: + ret = cpt_enqueue_rsa_op(op, ¶ms, sess); + if (unlikely(ret)) + goto req_fail; + break; + case RTE_CRYPTO_ASYM_XFORM_ECDSA: + ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx2_fpm_iova); + if (unlikely(ret)) + goto req_fail; + break; + case RTE_CRYPTO_ASYM_XFORM_ECPM: + ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms, + sess->ec_ctx.curveid); + if (unlikely(ret)) + goto req_fail; + break; + default: + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + ret = -EINVAL; + goto req_fail; + } + + /* Set engine group of AE */ + w3 = (vq_cmd_word3_t *)¶ms.req->ist.ei3; + w3->s.grp = OTX2_CPT_EGRP_AE; + + ret = otx2_cpt_enqueue_req(qp, pend_q, params.req); + + if (unlikely(ret)) { + CPT_LOG_DP_ERR("Could not enqueue crypto req"); + goto req_fail; + } + + return 0; + +req_fail: + free_op_meta(mdata, minfo->pool); + + return ret; +} + +static __rte_always_inline int __hot +otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op, + struct pending_queue *pend_q) +{ + struct rte_crypto_sym_op *sym_op = op->sym; + struct cpt_request_info *req; + struct cpt_sess_misc *sess; + vq_cmd_word3_t *w3; + uint64_t cpt_op; + void *mdata; + int ret; + + sess = get_sym_session_private_data(sym_op->session, + otx2_cryptodev_driver_id); + + cpt_op = sess->cpt_op; + + if (cpt_op & CPT_OP_CIPHER_MASK) + ret = fill_fc_params(op, sess, &qp->meta_info, &mdata, + (void **)&req); + else + ret = fill_digest_params(op, sess, &qp->meta_info, &mdata, + (void **)&req); + + if (unlikely(ret)) { + CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x", + op, (unsigned int)cpt_op, ret); + return ret; + } + + w3 = ((vq_cmd_word3_t *)(&req->ist.ei3)); + w3->s.grp = sess->egrp; + + ret = otx2_cpt_enqueue_req(qp, pend_q, req); + + if (unlikely(ret)) { + /* Free buffer allocated by fill params routines */ + free_op_meta(mdata, qp->meta_info.pool); + } + + return ret; +} + +static __rte_always_inline int __hot +otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op, + struct pending_queue *pend_q) +{ + const int driver_id = otx2_cryptodev_driver_id; + struct rte_crypto_sym_op *sym_op = op->sym; + struct rte_cryptodev_sym_session *sess; + int ret; + + /* Create temporary session */ + + if (rte_mempool_get(qp->sess_mp, (void **)&sess)) + return -ENOMEM; + + ret = sym_session_configure(driver_id, sym_op->xform, sess, + qp->sess_mp_priv); + if (ret) + goto sess_put; + + sym_op->session = sess; + + ret = otx2_cpt_enqueue_sym(qp, op, pend_q); + + if (unlikely(ret)) + goto priv_put; + + return 0; + +priv_put: + sym_session_clear(driver_id, sess); +sess_put: + rte_mempool_put(qp->sess_mp, sess); + return ret; +} + +static uint16_t +otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) +{ + uint16_t nb_allowed, count = 0; + struct otx2_cpt_qp *qp = qptr; + struct pending_queue *pend_q; + struct rte_crypto_op *op; + int ret; + + pend_q = &qp->pend_q; + + nb_allowed = OTX2_CPT_DEFAULT_CMD_QLEN - pend_q->pending_count; + if (nb_ops > nb_allowed) + nb_ops = nb_allowed; + + for (count = 0; count < nb_ops; count++) { + op = ops[count]; + if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ret = otx2_cpt_enqueue_sym(qp, op, pend_q); + else + ret = otx2_cpt_enqueue_sym_sessless(qp, op, + pend_q); + } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { + if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) + ret = otx2_cpt_enqueue_asym(qp, op, pend_q); + else + break; + } else + break; + + if (unlikely(ret)) + break; + } + + return count; +} + +static __rte_always_inline void +otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req, + struct rte_crypto_rsa_xform *rsa_ctx) +{ + struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa; + + switch (rsa->op_type) { + case RTE_CRYPTO_ASYM_OP_ENCRYPT: + rsa->cipher.length = rsa_ctx->n.length; + memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length); + break; + case RTE_CRYPTO_ASYM_OP_DECRYPT: + if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) { + rsa->message.length = rsa_ctx->n.length; + memcpy(rsa->message.data, req->rptr, + rsa->message.length); + } else { + /* Get length of decrypted output */ + rsa->message.length = rte_cpu_to_be_16 + (*((uint16_t *)req->rptr)); + /* + * Offset output data pointer by length field + * (2 bytes) and copy decrypted data. + */ + memcpy(rsa->message.data, req->rptr + 2, + rsa->message.length); + } + break; + case RTE_CRYPTO_ASYM_OP_SIGN: + rsa->sign.length = rsa_ctx->n.length; + memcpy(rsa->sign.data, req->rptr, rsa->sign.length); + break; + case RTE_CRYPTO_ASYM_OP_VERIFY: + if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) { + rsa->sign.length = rsa_ctx->n.length; + memcpy(rsa->sign.data, req->rptr, rsa->sign.length); + } else { + /* Get length of signed output */ + rsa->sign.length = rte_cpu_to_be_16 + (*((uint16_t *)req->rptr)); + /* + * Offset output data pointer by length field + * (2 bytes) and copy signed data. + */ + memcpy(rsa->sign.data, req->rptr + 2, + rsa->sign.length); + } + if (memcmp(rsa->sign.data, rsa->message.data, + rsa->message.length)) { + CPT_LOG_DP_ERR("RSA verification failed"); + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + break; + default: + CPT_LOG_DP_DEBUG("Invalid RSA operation type"); + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } +} + +static __rte_always_inline void +otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa, + struct cpt_request_info *req, + struct cpt_asym_ec_ctx *ec) +{ + int prime_len = ec_grp[ec->curveid].prime.length; + + if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY) + return; + + /* Separate out sign r and s components */ + memcpy(ecdsa->r.data, req->rptr, prime_len); + memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len); + ecdsa->r.length = prime_len; + ecdsa->s.length = prime_len; +} + +static __rte_always_inline void +otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm, + struct cpt_request_info *req, + struct cpt_asym_ec_ctx *ec) +{ + int prime_len = ec_grp[ec->curveid].prime.length; + + memcpy(ecpm->r.x.data, req->rptr, prime_len); + memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len); + ecpm->r.x.length = prime_len; + ecpm->r.y.length = prime_len; +} + +static void +otx2_cpt_asym_post_process(struct rte_crypto_op *cop, + struct cpt_request_info *req) +{ + struct rte_crypto_asym_op *op = cop->asym; + struct cpt_asym_sess_misc *sess; + + sess = get_asym_session_private_data(op->session, + otx2_cryptodev_driver_id); + + switch (sess->xfrm_type) { + case RTE_CRYPTO_ASYM_XFORM_RSA: + otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx); + break; + case RTE_CRYPTO_ASYM_XFORM_MODEX: + op->modex.result.length = sess->mod_ctx.modulus.length; + memcpy(op->modex.result.data, req->rptr, + op->modex.result.length); + break; + case RTE_CRYPTO_ASYM_XFORM_ECDSA: + otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx); + break; + case RTE_CRYPTO_ASYM_XFORM_ECPM: + otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx); + break; + default: + CPT_LOG_DP_DEBUG("Invalid crypto xform type"); + cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + break; + } +} + +static inline void +otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop, + uintptr_t *rsp, uint8_t cc) +{ + if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) { + if (likely(cc == NO_ERR)) { + /* Verify authentication data if required */ + if (unlikely(rsp[2])) + compl_auth_verify(cop, (uint8_t *)rsp[2], + rsp[3]); + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else { + if (cc == ERR_GC_ICV_MISCOMPARE) + cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + else + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } + + if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) { + sym_session_clear(otx2_cryptodev_driver_id, + cop->sym->session); + rte_mempool_put(qp->sess_mp, cop->sym->session); + cop->sym->session = NULL; + } + } + + if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) { + if (likely(cc == NO_ERR)) { + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + /* + * Pass cpt_req_info stored in metabuf during + * enqueue. + */ + rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t)); + otx2_cpt_asym_post_process(cop, + (struct cpt_request_info *)rsp); + } else + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + } +} + +static __rte_always_inline uint8_t +otx2_cpt_compcode_get(struct cpt_request_info *req) +{ + volatile struct cpt_res_s_9s *res; + uint8_t ret; + + res = (volatile struct cpt_res_s_9s *)req->completion_addr; + + if (unlikely(res->compcode == CPT_9X_COMP_E_NOTDONE)) { + if (rte_get_timer_cycles() < req->time_out) + return ERR_REQ_PENDING; + + CPT_LOG_DP_ERR("Request timed out"); + return ERR_REQ_TIMEOUT; + } + + if (likely(res->compcode == CPT_9X_COMP_E_GOOD)) { + ret = NO_ERR; + if (unlikely(res->uc_compcode)) { + ret = res->uc_compcode; + CPT_LOG_DP_DEBUG("Request failed with microcode error"); + CPT_LOG_DP_DEBUG("MC completion code 0x%x", + res->uc_compcode); + } + } else { + CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode); + + ret = res->compcode; + switch (res->compcode) { + case CPT_9X_COMP_E_INSTERR: + CPT_LOG_DP_ERR("Request failed with instruction error"); + break; + case CPT_9X_COMP_E_FAULT: + CPT_LOG_DP_ERR("Request failed with DMA fault"); + break; + case CPT_9X_COMP_E_HWERR: + CPT_LOG_DP_ERR("Request failed with hardware error"); + break; + default: + CPT_LOG_DP_ERR("Request failed with unknown completion code"); + } + } + + return ret; +} + +static uint16_t +otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops) +{ + int i, nb_pending, nb_completed; + struct otx2_cpt_qp *qp = qptr; + struct pending_queue *pend_q; + struct cpt_request_info *req; + struct rte_crypto_op *cop; + uint8_t cc[nb_ops]; + struct rid *rid; + uintptr_t *rsp; + void *metabuf; + + pend_q = &qp->pend_q; + + nb_pending = pend_q->pending_count; + + if (nb_ops > nb_pending) + nb_ops = nb_pending; + + for (i = 0; i < nb_ops; i++) { + rid = &pend_q->rid_queue[pend_q->deq_head]; + req = (struct cpt_request_info *)(rid->rid); + + cc[i] = otx2_cpt_compcode_get(req); + + if (unlikely(cc[i] == ERR_REQ_PENDING)) + break; + + ops[i] = req->op; + + MOD_INC(pend_q->deq_head, OTX2_CPT_DEFAULT_CMD_QLEN); + pend_q->pending_count -= 1; + } + + nb_completed = i; + + for (i = 0; i < nb_completed; i++) { + rsp = (void *)ops[i]; + + metabuf = (void *)rsp[0]; + cop = (void *)rsp[1]; + + ops[i] = cop; + + otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]); + + free_op_meta(metabuf, qp->meta_info.pool); + } + + return nb_completed; +} + /* PMD ops */ static int @@ -345,6 +921,13 @@ otx2_cpt_dev_config(struct rte_cryptodev *dev, dev->feature_flags &= ~conf->ff_disable; + if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) { + /* Initialize shared FPM table */ + ret = cpt_fpm_init(otx2_fpm_iova); + if (ret) + return ret; + } + /* Unregister error interrupts */ if (vf->err_intr_registered) otx2_cpt_err_intr_unregister(dev); @@ -378,9 +961,20 @@ otx2_cpt_dev_config(struct rte_cryptodev *dev, goto queues_detach; } + ret = otx2_cpt_inline_init(dev); + if (ret) { + CPT_LOG_ERR("Could not enable inline IPsec"); + goto intr_unregister; + } + + dev->enqueue_burst = otx2_cpt_enqueue_burst; + dev->dequeue_burst = otx2_cpt_dequeue_burst; + rte_mb(); return 0; +intr_unregister: + otx2_cpt_err_intr_unregister(dev); queues_detach: otx2_cpt_queues_detach(dev); return ret; @@ -399,9 +993,10 @@ otx2_cpt_dev_start(struct rte_cryptodev *dev) static void otx2_cpt_dev_stop(struct rte_cryptodev *dev) { - RTE_SET_USED(dev); - CPT_PMD_INIT_FUNC_TRACE(); + + if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) + cpt_fpm_clear(); } static int @@ -537,6 +1132,66 @@ otx2_cpt_sym_session_clear(struct rte_cryptodev *dev, return sym_session_clear(dev->driver_id, sess); } +static unsigned int +otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct cpt_asym_sess_misc); +} + +static int +otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev, + struct rte_crypto_asym_xform *xform, + struct rte_cryptodev_asym_session *sess, + struct rte_mempool *pool) +{ + struct cpt_asym_sess_misc *priv; + int ret; + + CPT_PMD_INIT_FUNC_TRACE(); + + if (rte_mempool_get(pool, (void **)&priv)) { + CPT_LOG_ERR("Could not allocate session_private_data"); + return -ENOMEM; + } + + memset(priv, 0, sizeof(struct cpt_asym_sess_misc)); + + ret = cpt_fill_asym_session_parameters(priv, xform); + if (ret) { + CPT_LOG_ERR("Could not configure session parameters"); + + /* Return session to mempool */ + rte_mempool_put(pool, priv); + return ret; + } + + set_asym_session_private_data(sess, dev->driver_id, priv); + return 0; +} + +static void +otx2_cpt_asym_session_clear(struct rte_cryptodev *dev, + struct rte_cryptodev_asym_session *sess) +{ + struct cpt_asym_sess_misc *priv; + struct rte_mempool *sess_mp; + + CPT_PMD_INIT_FUNC_TRACE(); + + priv = get_asym_session_private_data(sess, dev->driver_id); + if (priv == NULL) + return; + + /* Free resources allocated in session_cfg */ + cpt_free_asym_session_parameters(priv); + + /* Reset and free object back to pool */ + memset(priv, 0, otx2_cpt_asym_session_size_get(dev)); + sess_mp = rte_mempool_from_obj(priv); + set_asym_session_private_data(sess, dev->driver_id, NULL); + rte_mempool_put(sess_mp, priv); +} + struct rte_cryptodev_ops otx2_cpt_ops = { /* Device control ops */ .dev_configure = otx2_cpt_dev_config, @@ -555,4 +1210,10 @@ struct rte_cryptodev_ops otx2_cpt_ops = { .sym_session_get_size = otx2_cpt_sym_session_get_size, .sym_session_configure = otx2_cpt_sym_session_configure, .sym_session_clear = otx2_cpt_sym_session_clear, + + /* Asymmetric crypto ops */ + .asym_session_get_size = otx2_cpt_asym_session_size_get, + .asym_session_configure = otx2_cpt_asym_session_cfg, + .asym_session_clear = otx2_cpt_asym_session_clear, + };