1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
13 #include "otx_cryptodev.h"
14 #include "otx_cryptodev_capabilities.h"
15 #include "otx_cryptodev_hw_access.h"
16 #include "otx_cryptodev_mbox.h"
17 #include "otx_cryptodev_ops.h"
19 #include "cpt_pmd_logs.h"
20 #include "cpt_pmd_ops_helper.h"
21 #include "cpt_ucode.h"
22 #include "cpt_ucode_asym.h"
24 static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
26 /* Forward declarations */
29 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
34 otx_cpt_alarm_cb(void *arg)
36 struct cpt_vf *cptvf = arg;
37 otx_cpt_poll_misc(cptvf);
38 rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
39 otx_cpt_alarm_cb, cptvf);
43 otx_cpt_periodic_alarm_start(void *arg)
45 return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
46 otx_cpt_alarm_cb, arg);
50 otx_cpt_periodic_alarm_stop(void *arg)
52 return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
58 otx_cpt_dev_config(struct rte_cryptodev *dev,
59 struct rte_cryptodev_config *config __rte_unused)
63 CPT_PMD_INIT_FUNC_TRACE();
65 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
66 /* Initialize shared FPM table */
67 ret = cpt_fpm_init(otx_fpm_iova);
73 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
75 void *cptvf = c_dev->data->dev_private;
77 CPT_PMD_INIT_FUNC_TRACE();
79 return otx_cpt_start_device(cptvf);
83 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
85 void *cptvf = c_dev->data->dev_private;
87 CPT_PMD_INIT_FUNC_TRACE();
89 if (c_dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
92 otx_cpt_stop_device(cptvf);
96 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
98 void *cptvf = c_dev->data->dev_private;
101 CPT_PMD_INIT_FUNC_TRACE();
103 for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
104 ret = otx_cpt_que_pair_release(c_dev, i);
109 otx_cpt_periodic_alarm_stop(cptvf);
110 otx_cpt_deinit_device(cptvf);
116 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
118 CPT_PMD_INIT_FUNC_TRACE();
120 info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
121 info->feature_flags = dev->feature_flags;
122 info->capabilities = otx_get_capabilities(info->feature_flags);
123 info->sym.max_nb_sessions = 0;
124 info->driver_id = otx_cryptodev_driver_id;
125 info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
126 info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
131 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
132 uint16_t que_pair_id,
133 const struct rte_cryptodev_qp_conf *qp_conf,
134 int socket_id __rte_unused)
136 struct cpt_instance *instance = NULL;
137 struct rte_pci_device *pci_dev;
140 CPT_PMD_INIT_FUNC_TRACE();
142 if (dev->data->queue_pairs[que_pair_id] != NULL) {
143 ret = otx_cpt_que_pair_release(dev, que_pair_id);
148 if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
149 CPT_LOG_INFO("Number of descriptors too big %d, using default "
150 "queue length of %d", qp_conf->nb_descriptors,
154 pci_dev = RTE_DEV_TO_PCI(dev->device);
156 if (pci_dev->mem_resource[0].addr == NULL) {
157 CPT_LOG_ERR("PCI mem address null");
161 ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id);
162 if (ret != 0 || instance == NULL) {
163 CPT_LOG_ERR("Error getting instance handle from device %s : "
164 "ret = %d", dev->data->name, ret);
168 instance->queue_id = que_pair_id;
169 instance->sess_mp = qp_conf->mp_session;
170 instance->sess_mp_priv = qp_conf->mp_session_private;
171 dev->data->queue_pairs[que_pair_id] = instance;
177 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
179 struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
182 CPT_PMD_INIT_FUNC_TRACE();
184 ret = otx_cpt_put_resource(instance);
186 CPT_LOG_ERR("Error putting instance handle of device %s : "
187 "ret = %d", dev->data->name, ret);
191 dev->data->queue_pairs[que_pair_id] = NULL;
197 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
199 return cpt_get_session_size();
203 sym_xform_verify(struct rte_crypto_sym_xform *xform)
206 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
207 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
208 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
211 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
212 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
213 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
216 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
217 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
218 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
219 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
222 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
223 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
224 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
225 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
229 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
230 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
231 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
238 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
239 struct rte_cryptodev_sym_session *sess,
240 struct rte_mempool *pool)
242 struct cpt_sess_misc *misc;
246 ret = sym_xform_verify(xform);
250 if (unlikely(rte_mempool_get(pool, &priv))) {
251 CPT_LOG_ERR("Could not allocate session private data");
255 memset(priv, 0, sizeof(struct cpt_sess_misc) +
256 offsetof(struct cpt_ctx, fctx));
260 for ( ; xform != NULL; xform = xform->next) {
261 switch (xform->type) {
262 case RTE_CRYPTO_SYM_XFORM_AEAD:
263 ret = fill_sess_aead(xform, misc);
265 case RTE_CRYPTO_SYM_XFORM_CIPHER:
266 ret = fill_sess_cipher(xform, misc);
268 case RTE_CRYPTO_SYM_XFORM_AUTH:
269 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
270 ret = fill_sess_gmac(xform, misc);
272 ret = fill_sess_auth(xform, misc);
282 set_sym_session_private_data(sess, driver_id, priv);
284 misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
285 sizeof(struct cpt_sess_misc);
291 rte_mempool_put(pool, priv);
296 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
298 void *priv = get_sym_session_private_data(sess, driver_id);
299 struct rte_mempool *pool;
304 memset(priv, 0, cpt_get_session_size());
306 pool = rte_mempool_from_obj(priv);
308 set_sym_session_private_data(sess, driver_id, NULL);
310 rte_mempool_put(pool, priv);
314 otx_cpt_session_cfg(struct rte_cryptodev *dev,
315 struct rte_crypto_sym_xform *xform,
316 struct rte_cryptodev_sym_session *sess,
317 struct rte_mempool *pool)
319 CPT_PMD_INIT_FUNC_TRACE();
321 return sym_session_configure(dev->driver_id, xform, sess, pool);
326 otx_cpt_session_clear(struct rte_cryptodev *dev,
327 struct rte_cryptodev_sym_session *sess)
329 CPT_PMD_INIT_FUNC_TRACE();
331 return sym_session_clear(dev->driver_id, sess);
335 otx_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
337 return sizeof(struct cpt_asym_sess_misc);
341 otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
342 struct rte_crypto_asym_xform *xform __rte_unused,
343 struct rte_cryptodev_asym_session *sess,
344 struct rte_mempool *pool)
346 struct cpt_asym_sess_misc *priv;
349 CPT_PMD_INIT_FUNC_TRACE();
351 if (rte_mempool_get(pool, (void **)&priv)) {
352 CPT_LOG_ERR("Could not allocate session private data");
356 memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
358 ret = cpt_fill_asym_session_parameters(priv, xform);
360 CPT_LOG_ERR("Could not configure session parameters");
362 /* Return session to mempool */
363 rte_mempool_put(pool, priv);
367 set_asym_session_private_data(sess, dev->driver_id, priv);
372 otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
373 struct rte_cryptodev_asym_session *sess)
375 struct cpt_asym_sess_misc *priv;
376 struct rte_mempool *sess_mp;
378 CPT_PMD_INIT_FUNC_TRACE();
380 priv = get_asym_session_private_data(sess, dev->driver_id);
385 /* Free resources allocated during session configure */
386 cpt_free_asym_session_parameters(priv);
387 memset(priv, 0, otx_cpt_asym_session_size_get(dev));
388 sess_mp = rte_mempool_from_obj(priv);
389 set_asym_session_private_data(sess, dev->driver_id, NULL);
390 rte_mempool_put(sess_mp, priv);
393 static __rte_always_inline int32_t __rte_hot
394 otx_cpt_request_enqueue(struct cpt_instance *instance,
395 struct pending_queue *pqueue,
398 struct cpt_request_info *user_req = (struct cpt_request_info *)req;
400 if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
403 fill_cpt_inst(instance, req);
405 CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
407 /* Fill time_out cycles */
408 user_req->time_out = rte_get_timer_cycles() +
409 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
410 user_req->extra_time = 0;
412 /* Default mode of software queue */
413 mark_cpt_inst(instance);
415 pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
417 /* We will use soft queue length here to limit requests */
418 MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
419 pqueue->pending_count += 1;
421 CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
422 "op: %p", user_req, user_req->op);
426 static __rte_always_inline int __rte_hot
427 otx_cpt_enq_single_asym(struct cpt_instance *instance,
428 struct rte_crypto_op *op,
429 struct pending_queue *pqueue)
431 struct cpt_qp_meta_info *minfo = &instance->meta_info;
432 struct rte_crypto_asym_op *asym_op = op->asym;
433 struct asym_op_params params = {0};
434 struct cpt_asym_sess_misc *sess;
439 if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
440 CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
444 sess = get_asym_session_private_data(asym_op->session,
445 otx_cryptodev_driver_id);
447 /* Store phys_addr of the mdata to meta_buf */
448 params.meta_buf = rte_mempool_virt2iova(mdata);
451 cop[0] = (uintptr_t)mdata;
452 cop[1] = (uintptr_t)op;
453 cop[2] = cop[3] = 0ULL;
455 params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
456 params.req->op = cop;
458 /* Adjust meta_buf by crypto_op data and request_info struct */
459 params.meta_buf += (4 * sizeof(uintptr_t)) +
460 sizeof(struct cpt_request_info);
462 switch (sess->xfrm_type) {
463 case RTE_CRYPTO_ASYM_XFORM_MODEX:
464 ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
468 case RTE_CRYPTO_ASYM_XFORM_RSA:
469 ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
473 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
474 ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx_fpm_iova);
478 case RTE_CRYPTO_ASYM_XFORM_ECPM:
479 ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
480 sess->ec_ctx.curveid);
486 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
491 ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
494 CPT_LOG_DP_ERR("Could not enqueue crypto req");
501 free_op_meta(mdata, minfo->pool);
506 static __rte_always_inline int __rte_hot
507 otx_cpt_enq_single_sym(struct cpt_instance *instance,
508 struct rte_crypto_op *op,
509 struct pending_queue *pqueue)
511 struct cpt_sess_misc *sess;
512 struct rte_crypto_sym_op *sym_op = op->sym;
513 void *prep_req, *mdata = NULL;
517 sess = (struct cpt_sess_misc *)
518 get_sym_session_private_data(sym_op->session,
519 otx_cryptodev_driver_id);
521 cpt_op = sess->cpt_op;
523 if (likely(cpt_op & CPT_OP_CIPHER_MASK))
524 ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
527 ret = fill_digest_params(op, sess, &instance->meta_info,
531 CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
532 "ret 0x%x", op, (unsigned int)cpt_op, ret);
536 /* Enqueue prepared instruction to h/w */
537 ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
540 /* Buffer allocated for request preparation need to be freed */
541 free_op_meta(mdata, instance->meta_info.pool);
548 static __rte_always_inline int __rte_hot
549 otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
550 struct rte_crypto_op *op,
551 struct pending_queue *pend_q)
553 const int driver_id = otx_cryptodev_driver_id;
554 struct rte_crypto_sym_op *sym_op = op->sym;
555 struct rte_cryptodev_sym_session *sess;
558 /* Create temporary session */
560 if (rte_mempool_get(instance->sess_mp, (void **)&sess))
563 ret = sym_session_configure(driver_id, sym_op->xform, sess,
564 instance->sess_mp_priv);
568 sym_op->session = sess;
570 ret = otx_cpt_enq_single_sym(instance, op, pend_q);
578 sym_session_clear(driver_id, sess);
580 rte_mempool_put(instance->sess_mp, sess);
584 #define OP_TYPE_SYM 0
585 #define OP_TYPE_ASYM 1
587 static __rte_always_inline int __rte_hot
588 otx_cpt_enq_single(struct cpt_instance *inst,
589 struct rte_crypto_op *op,
590 struct pending_queue *pqueue,
591 const uint8_t op_type)
593 /* Check for the type */
595 if (op_type == OP_TYPE_SYM) {
596 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
597 return otx_cpt_enq_single_sym(inst, op, pqueue);
599 return otx_cpt_enq_single_sym_sessless(inst, op,
603 if (op_type == OP_TYPE_ASYM) {
604 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
605 return otx_cpt_enq_single_asym(inst, op, pqueue);
608 /* Should not reach here */
612 static __rte_always_inline uint16_t __rte_hot
613 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
614 const uint8_t op_type)
616 struct cpt_instance *instance = (struct cpt_instance *)qptr;
619 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
620 struct pending_queue *pqueue = &cptvf->pqueue;
622 count = DEFAULT_CMD_QLEN - pqueue->pending_count;
627 while (likely(count < nb_ops)) {
629 /* Enqueue single op */
630 ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
636 otx_cpt_ring_dbell(instance, count);
641 otx_cpt_enqueue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
643 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM);
647 otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
649 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
653 otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
654 struct rte_crypto_rsa_xform *rsa_ctx)
657 struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
659 switch (rsa->op_type) {
660 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
661 rsa->cipher.length = rsa_ctx->n.length;
662 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
664 case RTE_CRYPTO_ASYM_OP_DECRYPT:
665 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
666 rsa->message.length = rsa_ctx->n.length;
668 /* Get length of decrypted output */
669 rsa->message.length = rte_cpu_to_be_16
670 (*((uint16_t *)req->rptr));
672 /* Offset data pointer by length fields */
675 memcpy(rsa->message.data, req->rptr, rsa->message.length);
677 case RTE_CRYPTO_ASYM_OP_SIGN:
678 rsa->sign.length = rsa_ctx->n.length;
679 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
681 case RTE_CRYPTO_ASYM_OP_VERIFY:
682 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
683 rsa->sign.length = rsa_ctx->n.length;
685 /* Get length of decrypted output */
686 rsa->sign.length = rte_cpu_to_be_16
687 (*((uint16_t *)req->rptr));
689 /* Offset data pointer by length fields */
692 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
694 if (memcmp(rsa->sign.data, rsa->message.data,
695 rsa->message.length)) {
696 CPT_LOG_DP_ERR("RSA verification failed");
697 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
701 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
702 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
707 static __rte_always_inline void
708 otx_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
709 struct cpt_request_info *req,
710 struct cpt_asym_ec_ctx *ec)
713 int prime_len = ec_grp[ec->curveid].prime.length;
715 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
718 /* Separate out sign r and s components */
719 memcpy(ecdsa->r.data, req->rptr, prime_len);
720 memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
721 ecdsa->r.length = prime_len;
722 ecdsa->s.length = prime_len;
725 static __rte_always_inline void
726 otx_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
727 struct cpt_request_info *req,
728 struct cpt_asym_ec_ctx *ec)
730 int prime_len = ec_grp[ec->curveid].prime.length;
732 memcpy(ecpm->r.x.data, req->rptr, prime_len);
733 memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
734 ecpm->r.x.length = prime_len;
735 ecpm->r.y.length = prime_len;
738 static __rte_always_inline void __rte_hot
739 otx_cpt_asym_post_process(struct rte_crypto_op *cop,
740 struct cpt_request_info *req)
742 struct rte_crypto_asym_op *op = cop->asym;
743 struct cpt_asym_sess_misc *sess;
745 sess = get_asym_session_private_data(op->session,
746 otx_cryptodev_driver_id);
748 switch (sess->xfrm_type) {
749 case RTE_CRYPTO_ASYM_XFORM_RSA:
750 otx_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
752 case RTE_CRYPTO_ASYM_XFORM_MODEX:
753 op->modex.result.length = sess->mod_ctx.modulus.length;
754 memcpy(op->modex.result.data, req->rptr,
755 op->modex.result.length);
757 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
758 otx_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
760 case RTE_CRYPTO_ASYM_XFORM_ECPM:
761 otx_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
764 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
765 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
770 static __rte_always_inline void __rte_hot
771 otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
772 const uint8_t op_type)
774 /* H/w has returned success */
775 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
777 /* Perform further post processing */
779 if ((op_type == OP_TYPE_SYM) &&
780 (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
781 /* Check if auth verify need to be completed */
782 if (unlikely(rsp[2]))
783 compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
787 if ((op_type == OP_TYPE_ASYM) &&
788 (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)) {
789 rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
790 otx_cpt_asym_post_process(cop, (struct cpt_request_info *)rsp);
796 static __rte_always_inline uint16_t __rte_hot
797 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
798 const uint8_t op_type)
800 struct cpt_instance *instance = (struct cpt_instance *)qptr;
801 struct cpt_request_info *user_req;
802 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
805 int i, count, pcount;
808 struct pending_queue *pqueue = &cptvf->pqueue;
809 struct rte_crypto_op *cop;
813 pcount = pqueue->pending_count;
814 count = (nb_ops > pcount) ? pcount : nb_ops;
816 for (i = 0; i < count; i++) {
817 rid_e = &pqueue->rid_queue[pqueue->deq_head];
818 user_req = (struct cpt_request_info *)(rid_e->rid);
820 if (likely((i+1) < count))
821 rte_prefetch_non_temporal((void *)rid_e[1].rid);
823 ret = check_nb_command_id(user_req, instance);
825 if (unlikely(ret == ERR_REQ_PENDING)) {
826 /* Stop checking for completions */
830 /* Return completion code and op handle */
832 ops[i] = user_req->op;
834 CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
835 user_req, user_req->op, ret);
837 MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
838 pqueue->pending_count -= 1;
843 for (i = 0; i < nb_completed; i++) {
845 rsp = (void *)ops[i];
847 if (likely((i + 1) < nb_completed))
848 rte_prefetch0(ops[i+1]);
850 metabuf = (void *)rsp[0];
851 cop = (void *)rsp[1];
855 /* Check completion code */
857 if (likely(cc[i] == 0)) {
858 /* H/w success pkt. Post process */
859 otx_cpt_dequeue_post_process(cop, rsp, op_type);
860 } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
861 /* auth data mismatch */
862 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
865 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
868 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
869 void *sess_private_data_t =
870 get_sym_session_private_data(cop->sym->session,
871 otx_cryptodev_driver_id);
872 memset(sess_private_data_t, 0,
873 cpt_get_session_size());
874 memset(cop->sym->session, 0,
875 rte_cryptodev_sym_get_existing_header_session_size(
877 rte_mempool_put(instance->sess_mp_priv,
878 sess_private_data_t);
879 rte_mempool_put(instance->sess_mp, cop->sym->session);
880 cop->sym->session = NULL;
882 free_op_meta(metabuf, instance->meta_info.pool);
889 otx_cpt_dequeue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
891 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM);
895 otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
897 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
900 static struct rte_cryptodev_ops cptvf_ops = {
901 /* Device related operations */
902 .dev_configure = otx_cpt_dev_config,
903 .dev_start = otx_cpt_dev_start,
904 .dev_stop = otx_cpt_dev_stop,
905 .dev_close = otx_cpt_dev_close,
906 .dev_infos_get = otx_cpt_dev_info_get,
910 .queue_pair_setup = otx_cpt_que_pair_setup,
911 .queue_pair_release = otx_cpt_que_pair_release,
913 /* Crypto related operations */
914 .sym_session_get_size = otx_cpt_get_session_size,
915 .sym_session_configure = otx_cpt_session_cfg,
916 .sym_session_clear = otx_cpt_session_clear,
918 .asym_session_get_size = otx_cpt_asym_session_size_get,
919 .asym_session_configure = otx_cpt_asym_session_cfg,
920 .asym_session_clear = otx_cpt_asym_session_clear,
924 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
926 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
927 struct cpt_vf *cptvf = NULL;
932 if (pdev->mem_resource[0].phys_addr == 0ULL)
935 /* for secondary processes, we don't initialise any further as primary
936 * has already done this work.
938 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
941 cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
942 sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
946 CPT_LOG_ERR("Cannot allocate memory for device private data");
950 snprintf(dev_name, 32, "%02x:%02x.%x",
951 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
953 reg_base = pdev->mem_resource[0].addr;
955 CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
960 ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
962 CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
967 switch (cptvf->vftype) {
968 case OTX_CPT_VF_TYPE_AE:
969 /* Set asymmetric cpt feature flags */
970 c_dev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
971 RTE_CRYPTODEV_FF_HW_ACCELERATED |
972 RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
974 case OTX_CPT_VF_TYPE_SE:
975 /* Set symmetric cpt feature flags */
976 c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
977 RTE_CRYPTODEV_FF_HW_ACCELERATED |
978 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
979 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
980 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
981 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
982 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
985 /* Feature not supported. Abort */
986 CPT_LOG_ERR("VF type not supported by %s", dev_name);
991 /* Start off timer for mailbox interrupts */
992 otx_cpt_periodic_alarm_start(cptvf);
994 c_dev->dev_ops = &cptvf_ops;
996 if (c_dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
997 c_dev->enqueue_burst = otx_cpt_enqueue_sym;
998 c_dev->dequeue_burst = otx_cpt_dequeue_sym;
1000 c_dev->enqueue_burst = otx_cpt_enqueue_asym;
1001 c_dev->dequeue_burst = otx_cpt_dequeue_asym;
1004 /* Save dev private data */
1005 c_dev->data->dev_private = cptvf;
1010 otx_cpt_deinit_device(cptvf);
1014 /* Free private data allocated */