1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <cryptodev_pmd.h>
9 #include <rte_eventdev.h>
10 #include <rte_event_crypto_adapter.h>
11 #include <rte_errno.h>
12 #include <rte_malloc.h>
13 #include <rte_mempool.h>
15 #include "otx_cryptodev.h"
16 #include "otx_cryptodev_capabilities.h"
17 #include "otx_cryptodev_hw_access.h"
18 #include "otx_cryptodev_mbox.h"
19 #include "otx_cryptodev_ops.h"
21 #include "cpt_pmd_logs.h"
22 #include "cpt_pmd_ops_helper.h"
23 #include "cpt_ucode.h"
24 #include "cpt_ucode_asym.h"
26 #include "ssovf_worker.h"
28 static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
30 /* Forward declarations */
33 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
38 otx_cpt_alarm_cb(void *arg)
40 struct cpt_vf *cptvf = arg;
41 otx_cpt_poll_misc(cptvf);
42 rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
43 otx_cpt_alarm_cb, cptvf);
47 otx_cpt_periodic_alarm_start(void *arg)
49 return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
50 otx_cpt_alarm_cb, arg);
54 otx_cpt_periodic_alarm_stop(void *arg)
56 return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
62 otx_cpt_dev_config(struct rte_cryptodev *dev,
63 struct rte_cryptodev_config *config __rte_unused)
67 CPT_PMD_INIT_FUNC_TRACE();
69 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
70 /* Initialize shared FPM table */
71 ret = cpt_fpm_init(otx_fpm_iova);
77 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
79 void *cptvf = c_dev->data->dev_private;
81 CPT_PMD_INIT_FUNC_TRACE();
83 return otx_cpt_start_device(cptvf);
87 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
89 void *cptvf = c_dev->data->dev_private;
91 CPT_PMD_INIT_FUNC_TRACE();
93 if (c_dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
96 otx_cpt_stop_device(cptvf);
100 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
102 void *cptvf = c_dev->data->dev_private;
105 CPT_PMD_INIT_FUNC_TRACE();
107 for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
108 ret = otx_cpt_que_pair_release(c_dev, i);
113 otx_cpt_periodic_alarm_stop(cptvf);
114 otx_cpt_deinit_device(cptvf);
120 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
122 CPT_PMD_INIT_FUNC_TRACE();
124 info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
125 info->feature_flags = dev->feature_flags;
126 info->capabilities = otx_get_capabilities(info->feature_flags);
127 info->sym.max_nb_sessions = 0;
128 info->driver_id = otx_cryptodev_driver_id;
129 info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
130 info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
135 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
136 uint16_t que_pair_id,
137 const struct rte_cryptodev_qp_conf *qp_conf,
138 int socket_id __rte_unused)
140 struct cpt_instance *instance = NULL;
141 struct rte_pci_device *pci_dev;
144 CPT_PMD_INIT_FUNC_TRACE();
146 if (dev->data->queue_pairs[que_pair_id] != NULL) {
147 ret = otx_cpt_que_pair_release(dev, que_pair_id);
152 if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
153 CPT_LOG_INFO("Number of descriptors too big %d, using default "
154 "queue length of %d", qp_conf->nb_descriptors,
158 pci_dev = RTE_DEV_TO_PCI(dev->device);
160 if (pci_dev->mem_resource[0].addr == NULL) {
161 CPT_LOG_ERR("PCI mem address null");
165 ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id);
166 if (ret != 0 || instance == NULL) {
167 CPT_LOG_ERR("Error getting instance handle from device %s : "
168 "ret = %d", dev->data->name, ret);
172 instance->queue_id = que_pair_id;
173 instance->sess_mp = qp_conf->mp_session;
174 instance->sess_mp_priv = qp_conf->mp_session_private;
175 dev->data->queue_pairs[que_pair_id] = instance;
181 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
183 struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
186 CPT_PMD_INIT_FUNC_TRACE();
188 ret = otx_cpt_put_resource(instance);
190 CPT_LOG_ERR("Error putting instance handle of device %s : "
191 "ret = %d", dev->data->name, ret);
195 dev->data->queue_pairs[que_pair_id] = NULL;
201 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
203 return cpt_get_session_size();
207 sym_xform_verify(struct rte_crypto_sym_xform *xform)
210 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
211 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
212 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
213 (xform->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC ||
214 xform->next->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC))
217 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
218 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
219 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
220 (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC ||
221 xform->next->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC))
224 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
225 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
226 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
227 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
230 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
231 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
232 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
233 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
237 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
238 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
239 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
246 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
247 struct rte_cryptodev_sym_session *sess,
248 struct rte_mempool *pool)
250 struct rte_crypto_sym_xform *temp_xform = xform;
251 struct cpt_sess_misc *misc;
252 vq_cmd_word3_t vq_cmd_w3;
256 ret = sym_xform_verify(xform);
260 if (unlikely(rte_mempool_get(pool, &priv))) {
261 CPT_LOG_ERR("Could not allocate session private data");
265 memset(priv, 0, sizeof(struct cpt_sess_misc) +
266 offsetof(struct cpt_ctx, mc_ctx));
270 for ( ; xform != NULL; xform = xform->next) {
271 switch (xform->type) {
272 case RTE_CRYPTO_SYM_XFORM_AEAD:
273 ret = fill_sess_aead(xform, misc);
275 case RTE_CRYPTO_SYM_XFORM_CIPHER:
276 ret = fill_sess_cipher(xform, misc);
278 case RTE_CRYPTO_SYM_XFORM_AUTH:
279 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
280 ret = fill_sess_gmac(xform, misc);
282 ret = fill_sess_auth(xform, misc);
292 if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
293 cpt_mac_len_verify(&temp_xform->auth)) {
294 CPT_LOG_ERR("MAC length is not supported");
295 struct cpt_ctx *ctx = SESS_PRIV(misc);
296 if (ctx->auth_key != NULL) {
297 rte_free(ctx->auth_key);
298 ctx->auth_key = NULL;
304 set_sym_session_private_data(sess, driver_id, priv);
306 misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
307 sizeof(struct cpt_sess_misc);
311 vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
314 misc->cpt_inst_w7 = vq_cmd_w3.u64;
320 rte_mempool_put(pool, priv);
325 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
327 void *priv = get_sym_session_private_data(sess, driver_id);
328 struct cpt_sess_misc *misc;
329 struct rte_mempool *pool;
336 ctx = SESS_PRIV(misc);
338 rte_free(ctx->auth_key);
340 memset(priv, 0, cpt_get_session_size());
342 pool = rte_mempool_from_obj(priv);
344 set_sym_session_private_data(sess, driver_id, NULL);
346 rte_mempool_put(pool, priv);
350 otx_cpt_session_cfg(struct rte_cryptodev *dev,
351 struct rte_crypto_sym_xform *xform,
352 struct rte_cryptodev_sym_session *sess,
353 struct rte_mempool *pool)
355 CPT_PMD_INIT_FUNC_TRACE();
357 return sym_session_configure(dev->driver_id, xform, sess, pool);
362 otx_cpt_session_clear(struct rte_cryptodev *dev,
363 struct rte_cryptodev_sym_session *sess)
365 CPT_PMD_INIT_FUNC_TRACE();
367 return sym_session_clear(dev->driver_id, sess);
371 otx_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
373 return sizeof(struct cpt_asym_sess_misc);
377 otx_cpt_asym_session_cfg(struct rte_cryptodev *dev __rte_unused,
378 struct rte_crypto_asym_xform *xform __rte_unused,
379 struct rte_cryptodev_asym_session *sess)
381 struct cpt_asym_sess_misc *priv = (struct cpt_asym_sess_misc *)
382 sess->sess_private_data;
385 CPT_PMD_INIT_FUNC_TRACE();
387 ret = cpt_fill_asym_session_parameters(priv, xform);
389 CPT_LOG_ERR("Could not configure session parameters");
393 priv->cpt_inst_w7 = 0;
399 otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
400 struct rte_cryptodev_asym_session *sess)
402 struct cpt_asym_sess_misc *priv;
404 CPT_PMD_INIT_FUNC_TRACE();
406 priv = (struct cpt_asym_sess_misc *) sess->sess_private_data;
411 /* Free resources allocated during session configure */
412 cpt_free_asym_session_parameters(priv);
413 memset(priv, 0, otx_cpt_asym_session_size_get(dev));
416 static __rte_always_inline void * __rte_hot
417 otx_cpt_request_enqueue(struct cpt_instance *instance,
418 void *req, uint64_t cpt_inst_w7)
420 struct cpt_request_info *user_req = (struct cpt_request_info *)req;
422 fill_cpt_inst(instance, req, cpt_inst_w7);
424 CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
426 /* Fill time_out cycles */
427 user_req->time_out = rte_get_timer_cycles() +
428 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
429 user_req->extra_time = 0;
431 /* Default mode of software queue */
432 mark_cpt_inst(instance);
434 CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
435 "op: %p", user_req, user_req->op);
439 static __rte_always_inline void * __rte_hot
440 otx_cpt_enq_single_asym(struct cpt_instance *instance,
441 struct rte_crypto_op *op)
443 struct cpt_qp_meta_info *minfo = &instance->meta_info;
444 struct rte_crypto_asym_op *asym_op = op->asym;
445 struct asym_op_params params = {0};
446 struct cpt_asym_sess_misc *sess;
452 if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
453 CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
458 sess = (struct cpt_asym_sess_misc *)
459 asym_op->session->sess_private_data;
461 /* Store phys_addr of the mdata to meta_buf */
462 params.meta_buf = rte_mempool_virt2iova(mdata);
465 cop[0] = (uintptr_t)mdata;
466 cop[1] = (uintptr_t)op;
467 cop[2] = cop[3] = 0ULL;
469 params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
470 params.req->op = cop;
472 /* Adjust meta_buf by crypto_op data and request_info struct */
473 params.meta_buf += (4 * sizeof(uintptr_t)) +
474 sizeof(struct cpt_request_info);
476 switch (sess->xfrm_type) {
477 case RTE_CRYPTO_ASYM_XFORM_MODEX:
478 ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
482 case RTE_CRYPTO_ASYM_XFORM_RSA:
483 ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
487 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
488 ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx_fpm_iova);
492 case RTE_CRYPTO_ASYM_XFORM_ECPM:
493 ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
494 sess->ec_ctx.curveid);
500 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
505 req = otx_cpt_request_enqueue(instance, params.req, sess->cpt_inst_w7);
506 if (unlikely(req == NULL)) {
507 CPT_LOG_DP_ERR("Could not enqueue crypto req");
514 free_op_meta(mdata, minfo->pool);
519 static __rte_always_inline void * __rte_hot
520 otx_cpt_enq_single_sym(struct cpt_instance *instance,
521 struct rte_crypto_op *op)
523 struct cpt_sess_misc *sess;
524 struct rte_crypto_sym_op *sym_op = op->sym;
525 struct cpt_request_info *prep_req;
531 sess = (struct cpt_sess_misc *)
532 get_sym_session_private_data(sym_op->session,
533 otx_cryptodev_driver_id);
535 cpt_op = sess->cpt_op;
537 if (likely(cpt_op & CPT_OP_CIPHER_MASK))
538 ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
541 ret = fill_digest_params(op, sess, &instance->meta_info,
542 &mdata, (void **)&prep_req);
545 CPT_LOG_DP_ERR("prep crypto req : op %p, cpt_op 0x%x "
546 "ret 0x%x", op, (unsigned int)cpt_op, ret);
550 /* Enqueue prepared instruction to h/w */
551 req = otx_cpt_request_enqueue(instance, prep_req, sess->cpt_inst_w7);
552 if (unlikely(req == NULL))
553 /* Buffer allocated for request preparation need to be freed */
554 free_op_meta(mdata, instance->meta_info.pool);
559 static __rte_always_inline void * __rte_hot
560 otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
561 struct rte_crypto_op *op)
563 const int driver_id = otx_cryptodev_driver_id;
564 struct rte_crypto_sym_op *sym_op = op->sym;
565 struct rte_cryptodev_sym_session *sess;
569 /* Create temporary session */
570 sess = rte_cryptodev_sym_session_create(instance->sess_mp);
576 ret = sym_session_configure(driver_id, sym_op->xform, sess,
577 instance->sess_mp_priv);
581 sym_op->session = sess;
583 /* Enqueue op with the tmp session set */
584 req = otx_cpt_enq_single_sym(instance, op);
585 if (unlikely(req == NULL))
591 sym_session_clear(driver_id, sess);
593 rte_mempool_put(instance->sess_mp, sess);
597 #define OP_TYPE_SYM 0
598 #define OP_TYPE_ASYM 1
600 static __rte_always_inline void *__rte_hot
601 otx_cpt_enq_single(struct cpt_instance *inst,
602 struct rte_crypto_op *op,
603 const uint8_t op_type)
605 /* Check for the type */
607 if (op_type == OP_TYPE_SYM) {
608 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
609 return otx_cpt_enq_single_sym(inst, op);
611 return otx_cpt_enq_single_sym_sessless(inst, op);
614 if (op_type == OP_TYPE_ASYM) {
615 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
616 return otx_cpt_enq_single_asym(inst, op);
619 /* Should not reach here */
624 static __rte_always_inline uint16_t __rte_hot
625 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
626 const uint8_t op_type)
628 struct cpt_instance *instance = (struct cpt_instance *)qptr;
629 uint16_t count, free_slots;
631 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
632 struct pending_queue *pqueue = &cptvf->pqueue;
634 free_slots = pending_queue_free_slots(pqueue, DEFAULT_CMD_QLEN,
635 DEFAULT_CMD_QRSVD_SLOTS);
636 if (nb_ops > free_slots)
640 while (likely(count < nb_ops)) {
642 /* Enqueue single op */
643 req = otx_cpt_enq_single(instance, ops[count], op_type);
645 if (unlikely(req == NULL))
648 pending_queue_push(pqueue, req, count, DEFAULT_CMD_QLEN);
653 pending_queue_commit(pqueue, count, DEFAULT_CMD_QLEN);
654 otx_cpt_ring_dbell(instance, count);
660 otx_cpt_enqueue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
662 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM);
666 otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
668 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
671 static __rte_always_inline void
672 submit_request_to_sso(struct ssows *ws, uintptr_t req,
673 struct rte_event *rsp_info)
677 add_work = rsp_info->flow_id | (RTE_EVENT_TYPE_CRYPTODEV << 28) |
678 ((uint64_t)(rsp_info->sched_type) << 32);
680 if (!rsp_info->sched_type)
683 rte_atomic_thread_fence(__ATOMIC_RELEASE);
684 ssovf_store_pair(add_work, req, ws->grps[rsp_info->queue_id]);
688 otx_crypto_adapter_enqueue(void *port, struct rte_crypto_op *op)
690 union rte_event_crypto_metadata *ec_mdata;
691 struct cpt_instance *instance;
692 struct cpt_request_info *req;
693 struct rte_event *rsp_info;
694 uint8_t op_type, cdev_id;
697 ec_mdata = rte_cryptodev_session_event_mdata_get(op);
698 if (unlikely(ec_mdata == NULL)) {
703 cdev_id = ec_mdata->request_info.cdev_id;
704 qp_id = ec_mdata->request_info.queue_pair_id;
705 rsp_info = &ec_mdata->response_info;
706 instance = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
708 if (unlikely(!instance->ca_enabled)) {
713 op_type = op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
715 req = otx_cpt_enq_single(instance, op, op_type);
716 if (unlikely(req == NULL))
719 otx_cpt_ring_dbell(instance, 1);
721 submit_request_to_sso(port, (uintptr_t)req, rsp_info);
727 otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
728 struct rte_crypto_rsa_xform *rsa_ctx)
731 struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
733 switch (rsa->op_type) {
734 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
735 rsa->cipher.length = rsa_ctx->n.length;
736 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
738 case RTE_CRYPTO_ASYM_OP_DECRYPT:
739 if (rsa->padding.type == RTE_CRYPTO_RSA_PADDING_NONE)
740 rsa->message.length = rsa_ctx->n.length;
742 /* Get length of decrypted output */
743 rsa->message.length = rte_cpu_to_be_16
744 (*((uint16_t *)req->rptr));
746 /* Offset data pointer by length fields */
749 memcpy(rsa->message.data, req->rptr, rsa->message.length);
751 case RTE_CRYPTO_ASYM_OP_SIGN:
752 rsa->sign.length = rsa_ctx->n.length;
753 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
755 case RTE_CRYPTO_ASYM_OP_VERIFY:
756 if (rsa->padding.type == RTE_CRYPTO_RSA_PADDING_NONE)
757 rsa->sign.length = rsa_ctx->n.length;
759 /* Get length of decrypted output */
760 rsa->sign.length = rte_cpu_to_be_16
761 (*((uint16_t *)req->rptr));
763 /* Offset data pointer by length fields */
766 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
768 if (memcmp(rsa->sign.data, rsa->message.data,
769 rsa->message.length)) {
770 CPT_LOG_DP_ERR("RSA verification failed");
771 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
775 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
776 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
781 static __rte_always_inline void
782 otx_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
783 struct cpt_request_info *req,
784 struct cpt_asym_ec_ctx *ec)
787 int prime_len = ec_grp[ec->curveid].prime.length;
789 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
792 /* Separate out sign r and s components */
793 memcpy(ecdsa->r.data, req->rptr, prime_len);
794 memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
796 ecdsa->r.length = prime_len;
797 ecdsa->s.length = prime_len;
800 static __rte_always_inline void
801 otx_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
802 struct cpt_request_info *req,
803 struct cpt_asym_ec_ctx *ec)
805 int prime_len = ec_grp[ec->curveid].prime.length;
807 memcpy(ecpm->r.x.data, req->rptr, prime_len);
808 memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
810 ecpm->r.x.length = prime_len;
811 ecpm->r.y.length = prime_len;
814 static __rte_always_inline void __rte_hot
815 otx_cpt_asym_post_process(struct rte_crypto_op *cop,
816 struct cpt_request_info *req)
818 struct rte_crypto_asym_op *op = cop->asym;
819 struct cpt_asym_sess_misc *sess;
821 sess = (struct cpt_asym_sess_misc *) op->session->sess_private_data;
823 switch (sess->xfrm_type) {
824 case RTE_CRYPTO_ASYM_XFORM_RSA:
825 otx_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
827 case RTE_CRYPTO_ASYM_XFORM_MODEX:
828 op->modex.result.length = sess->mod_ctx.modulus.length;
829 memcpy(op->modex.result.data, req->rptr,
830 op->modex.result.length);
832 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
833 otx_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
835 case RTE_CRYPTO_ASYM_XFORM_ECPM:
836 otx_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
839 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
840 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
845 static __rte_always_inline void __rte_hot
846 otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
847 const uint8_t op_type)
849 /* H/w has returned success */
850 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
852 /* Perform further post processing */
854 if ((op_type == OP_TYPE_SYM) &&
855 (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
856 /* Check if auth verify need to be completed */
857 if (unlikely(rsp[2]))
858 compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
862 if ((op_type == OP_TYPE_ASYM) &&
863 (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)) {
864 rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
865 otx_cpt_asym_post_process(cop, (struct cpt_request_info *)rsp);
872 free_sym_session_data(const struct cpt_instance *instance,
873 struct rte_crypto_op *cop)
875 void *sess_private_data_t = get_sym_session_private_data(
876 cop->sym->session, otx_cryptodev_driver_id);
877 memset(sess_private_data_t, 0, cpt_get_session_size());
878 memset(cop->sym->session, 0,
879 rte_cryptodev_sym_get_existing_header_session_size(
881 rte_mempool_put(instance->sess_mp_priv, sess_private_data_t);
882 rte_mempool_put(instance->sess_mp, cop->sym->session);
883 cop->sym->session = NULL;
886 static __rte_always_inline struct rte_crypto_op *
887 otx_cpt_process_response(const struct cpt_instance *instance, uintptr_t *rsp,
888 uint8_t cc, const uint8_t op_type)
890 struct rte_crypto_op *cop;
893 metabuf = (void *)rsp[0];
894 cop = (void *)rsp[1];
896 /* Check completion code */
897 if (likely(cc == 0)) {
898 /* H/w success pkt. Post process */
899 otx_cpt_dequeue_post_process(cop, rsp, op_type);
900 } else if (cc == ERR_GC_ICV_MISCOMPARE) {
901 /* auth data mismatch */
902 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
905 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
908 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS))
909 free_sym_session_data(instance, cop);
910 free_op_meta(metabuf, instance->meta_info.pool);
915 static __rte_always_inline uint16_t __rte_hot
916 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
917 const uint8_t op_type)
919 struct cpt_instance *instance = (struct cpt_instance *)qptr;
920 struct cpt_request_info *user_req;
921 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
923 int i, count, pcount;
926 struct pending_queue *pqueue = &cptvf->pqueue;
928 pcount = pending_queue_level(pqueue, DEFAULT_CMD_QLEN);
930 /* Ensure pcount isn't read before data lands */
931 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
933 count = (nb_ops > pcount) ? pcount : nb_ops;
935 for (i = 0; i < count; i++) {
936 pending_queue_peek(pqueue, (void **) &user_req,
937 DEFAULT_CMD_QLEN, i + 1 < count);
939 ret = check_nb_command_id(user_req, instance);
941 if (unlikely(ret == ERR_REQ_PENDING)) {
942 /* Stop checking for completions */
946 /* Return completion code and op handle */
948 ops[i] = user_req->op;
950 CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
951 user_req, user_req->op, ret);
953 pending_queue_pop(pqueue, DEFAULT_CMD_QLEN);
958 for (i = 0; i < nb_completed; i++) {
959 if (likely((i + 1) < nb_completed))
960 rte_prefetch0(ops[i+1]);
962 ops[i] = otx_cpt_process_response(instance, (void *)ops[i],
970 otx_cpt_dequeue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
972 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM);
976 otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
978 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
982 otx_crypto_adapter_dequeue(uintptr_t get_work1)
984 const struct cpt_instance *instance;
985 struct cpt_request_info *req;
986 struct rte_crypto_op *cop;
990 req = (struct cpt_request_info *)get_work1;
993 cop = (void *)rsp[1];
994 op_type = cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC ? OP_TYPE_SYM :
998 cc = check_nb_command_id(
999 req, (struct cpt_instance *)(uintptr_t)instance);
1000 } while (cc == ERR_REQ_PENDING);
1002 cop = otx_cpt_process_response(instance, (void *)req->op, cc, op_type);
1004 return (uintptr_t)(cop);
1007 static struct rte_cryptodev_ops cptvf_ops = {
1008 /* Device related operations */
1009 .dev_configure = otx_cpt_dev_config,
1010 .dev_start = otx_cpt_dev_start,
1011 .dev_stop = otx_cpt_dev_stop,
1012 .dev_close = otx_cpt_dev_close,
1013 .dev_infos_get = otx_cpt_dev_info_get,
1016 .stats_reset = NULL,
1017 .queue_pair_setup = otx_cpt_que_pair_setup,
1018 .queue_pair_release = otx_cpt_que_pair_release,
1020 /* Crypto related operations */
1021 .sym_session_get_size = otx_cpt_get_session_size,
1022 .sym_session_configure = otx_cpt_session_cfg,
1023 .sym_session_clear = otx_cpt_session_clear,
1025 .asym_session_get_size = otx_cpt_asym_session_size_get,
1026 .asym_session_configure = otx_cpt_asym_session_cfg,
1027 .asym_session_clear = otx_cpt_asym_session_clear,
1031 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
1033 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
1034 struct cpt_vf *cptvf = NULL;
1039 if (pdev->mem_resource[0].phys_addr == 0ULL)
1042 /* for secondary processes, we don't initialise any further as primary
1043 * has already done this work.
1045 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1048 cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
1049 sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
1052 if (cptvf == NULL) {
1053 CPT_LOG_ERR("Cannot allocate memory for device private data");
1057 snprintf(dev_name, 32, "%02x:%02x.%x",
1058 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1060 reg_base = pdev->mem_resource[0].addr;
1062 CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
1067 ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
1069 CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
1074 switch (cptvf->vftype) {
1075 case OTX_CPT_VF_TYPE_AE:
1076 /* Set asymmetric cpt feature flags */
1077 c_dev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
1078 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1079 RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
1081 case OTX_CPT_VF_TYPE_SE:
1082 /* Set symmetric cpt feature flags */
1083 c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1084 RTE_CRYPTODEV_FF_HW_ACCELERATED |
1085 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
1086 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
1087 RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT |
1088 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
1089 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
1090 RTE_CRYPTODEV_FF_SYM_SESSIONLESS |
1091 RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED;
1094 /* Feature not supported. Abort */
1095 CPT_LOG_ERR("VF type not supported by %s", dev_name);
1100 /* Start off timer for mailbox interrupts */
1101 otx_cpt_periodic_alarm_start(cptvf);
1103 c_dev->dev_ops = &cptvf_ops;
1105 if (c_dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
1106 c_dev->enqueue_burst = otx_cpt_enqueue_sym;
1107 c_dev->dequeue_burst = otx_cpt_dequeue_sym;
1109 c_dev->enqueue_burst = otx_cpt_enqueue_asym;
1110 c_dev->dequeue_burst = otx_cpt_dequeue_asym;
1113 /* Save dev private data */
1114 c_dev->data->dev_private = cptvf;
1119 otx_cpt_deinit_device(cptvf);
1123 /* Free private data allocated */