1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Cavium, Inc
6 #include <rte_bus_pci.h>
7 #include <rte_cryptodev.h>
8 #include <rte_cryptodev_pmd.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
13 #include "otx_cryptodev.h"
14 #include "otx_cryptodev_capabilities.h"
15 #include "otx_cryptodev_hw_access.h"
16 #include "otx_cryptodev_mbox.h"
17 #include "otx_cryptodev_ops.h"
19 #include "cpt_pmd_logs.h"
20 #include "cpt_pmd_ops_helper.h"
21 #include "cpt_ucode.h"
22 #include "cpt_ucode_asym.h"
24 static uint64_t otx_fpm_iova[CPT_EC_ID_PMAX];
26 /* Forward declarations */
29 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id);
34 otx_cpt_alarm_cb(void *arg)
36 struct cpt_vf *cptvf = arg;
37 otx_cpt_poll_misc(cptvf);
38 rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
39 otx_cpt_alarm_cb, cptvf);
43 otx_cpt_periodic_alarm_start(void *arg)
45 return rte_eal_alarm_set(CPT_INTR_POLL_INTERVAL_MS * 1000,
46 otx_cpt_alarm_cb, arg);
50 otx_cpt_periodic_alarm_stop(void *arg)
52 return rte_eal_alarm_cancel(otx_cpt_alarm_cb, arg);
58 otx_cpt_dev_config(struct rte_cryptodev *dev,
59 struct rte_cryptodev_config *config __rte_unused)
63 CPT_PMD_INIT_FUNC_TRACE();
65 if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
66 /* Initialize shared FPM table */
67 ret = cpt_fpm_init(otx_fpm_iova);
73 otx_cpt_dev_start(struct rte_cryptodev *c_dev)
75 void *cptvf = c_dev->data->dev_private;
77 CPT_PMD_INIT_FUNC_TRACE();
79 return otx_cpt_start_device(cptvf);
83 otx_cpt_dev_stop(struct rte_cryptodev *c_dev)
85 void *cptvf = c_dev->data->dev_private;
87 CPT_PMD_INIT_FUNC_TRACE();
89 if (c_dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
92 otx_cpt_stop_device(cptvf);
96 otx_cpt_dev_close(struct rte_cryptodev *c_dev)
98 void *cptvf = c_dev->data->dev_private;
101 CPT_PMD_INIT_FUNC_TRACE();
103 for (i = 0; i < c_dev->data->nb_queue_pairs; i++) {
104 ret = otx_cpt_que_pair_release(c_dev, i);
109 otx_cpt_periodic_alarm_stop(cptvf);
110 otx_cpt_deinit_device(cptvf);
116 otx_cpt_dev_info_get(struct rte_cryptodev *dev, struct rte_cryptodev_info *info)
118 CPT_PMD_INIT_FUNC_TRACE();
120 info->max_nb_queue_pairs = CPT_NUM_QS_PER_VF;
121 info->feature_flags = dev->feature_flags;
122 info->capabilities = otx_get_capabilities(info->feature_flags);
123 info->sym.max_nb_sessions = 0;
124 info->driver_id = otx_cryptodev_driver_id;
125 info->min_mbuf_headroom_req = OTX_CPT_MIN_HEADROOM_REQ;
126 info->min_mbuf_tailroom_req = OTX_CPT_MIN_TAILROOM_REQ;
131 otx_cpt_stats_get(struct rte_cryptodev *dev __rte_unused,
132 struct rte_cryptodev_stats *stats __rte_unused)
134 CPT_PMD_INIT_FUNC_TRACE();
138 otx_cpt_stats_reset(struct rte_cryptodev *dev __rte_unused)
140 CPT_PMD_INIT_FUNC_TRACE();
144 otx_cpt_que_pair_setup(struct rte_cryptodev *dev,
145 uint16_t que_pair_id,
146 const struct rte_cryptodev_qp_conf *qp_conf,
147 int socket_id __rte_unused)
149 struct cpt_instance *instance = NULL;
150 struct rte_pci_device *pci_dev;
153 CPT_PMD_INIT_FUNC_TRACE();
155 if (dev->data->queue_pairs[que_pair_id] != NULL) {
156 ret = otx_cpt_que_pair_release(dev, que_pair_id);
161 if (qp_conf->nb_descriptors > DEFAULT_CMD_QLEN) {
162 CPT_LOG_INFO("Number of descriptors too big %d, using default "
163 "queue length of %d", qp_conf->nb_descriptors,
167 pci_dev = RTE_DEV_TO_PCI(dev->device);
169 if (pci_dev->mem_resource[0].addr == NULL) {
170 CPT_LOG_ERR("PCI mem address null");
174 ret = otx_cpt_get_resource(dev, 0, &instance, que_pair_id);
175 if (ret != 0 || instance == NULL) {
176 CPT_LOG_ERR("Error getting instance handle from device %s : "
177 "ret = %d", dev->data->name, ret);
181 instance->queue_id = que_pair_id;
182 instance->sess_mp = qp_conf->mp_session;
183 instance->sess_mp_priv = qp_conf->mp_session_private;
184 dev->data->queue_pairs[que_pair_id] = instance;
190 otx_cpt_que_pair_release(struct rte_cryptodev *dev, uint16_t que_pair_id)
192 struct cpt_instance *instance = dev->data->queue_pairs[que_pair_id];
195 CPT_PMD_INIT_FUNC_TRACE();
197 ret = otx_cpt_put_resource(instance);
199 CPT_LOG_ERR("Error putting instance handle of device %s : "
200 "ret = %d", dev->data->name, ret);
204 dev->data->queue_pairs[que_pair_id] = NULL;
210 otx_cpt_get_session_size(struct rte_cryptodev *dev __rte_unused)
212 return cpt_get_session_size();
216 sym_xform_verify(struct rte_crypto_sym_xform *xform)
219 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
220 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
221 xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
224 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
225 xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
226 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
229 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
230 xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
231 xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
232 xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
235 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
236 xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
237 xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
238 xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
242 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
243 xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
244 xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
251 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
252 struct rte_cryptodev_sym_session *sess,
253 struct rte_mempool *pool)
255 struct cpt_sess_misc *misc;
259 ret = sym_xform_verify(xform);
263 if (unlikely(rte_mempool_get(pool, &priv))) {
264 CPT_LOG_ERR("Could not allocate session private data");
270 for ( ; xform != NULL; xform = xform->next) {
271 switch (xform->type) {
272 case RTE_CRYPTO_SYM_XFORM_AEAD:
273 ret = fill_sess_aead(xform, misc);
275 case RTE_CRYPTO_SYM_XFORM_CIPHER:
276 ret = fill_sess_cipher(xform, misc);
278 case RTE_CRYPTO_SYM_XFORM_AUTH:
279 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
280 ret = fill_sess_gmac(xform, misc);
282 ret = fill_sess_auth(xform, misc);
292 set_sym_session_private_data(sess, driver_id, priv);
294 misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
295 sizeof(struct cpt_sess_misc);
301 rte_mempool_put(pool, priv);
306 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
308 void *priv = get_sym_session_private_data(sess, driver_id);
309 struct rte_mempool *pool;
314 memset(priv, 0, cpt_get_session_size());
316 pool = rte_mempool_from_obj(priv);
318 set_sym_session_private_data(sess, driver_id, NULL);
320 rte_mempool_put(pool, priv);
324 otx_cpt_session_cfg(struct rte_cryptodev *dev,
325 struct rte_crypto_sym_xform *xform,
326 struct rte_cryptodev_sym_session *sess,
327 struct rte_mempool *pool)
329 CPT_PMD_INIT_FUNC_TRACE();
331 return sym_session_configure(dev->driver_id, xform, sess, pool);
336 otx_cpt_session_clear(struct rte_cryptodev *dev,
337 struct rte_cryptodev_sym_session *sess)
339 CPT_PMD_INIT_FUNC_TRACE();
341 return sym_session_clear(dev->driver_id, sess);
345 otx_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
347 return sizeof(struct cpt_asym_sess_misc);
351 otx_cpt_asym_session_cfg(struct rte_cryptodev *dev,
352 struct rte_crypto_asym_xform *xform __rte_unused,
353 struct rte_cryptodev_asym_session *sess,
354 struct rte_mempool *pool)
356 struct cpt_asym_sess_misc *priv;
359 CPT_PMD_INIT_FUNC_TRACE();
361 if (rte_mempool_get(pool, (void **)&priv)) {
362 CPT_LOG_ERR("Could not allocate session private data");
366 memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
368 ret = cpt_fill_asym_session_parameters(priv, xform);
370 CPT_LOG_ERR("Could not configure session parameters");
372 /* Return session to mempool */
373 rte_mempool_put(pool, priv);
377 set_asym_session_private_data(sess, dev->driver_id, priv);
382 otx_cpt_asym_session_clear(struct rte_cryptodev *dev,
383 struct rte_cryptodev_asym_session *sess)
385 struct cpt_asym_sess_misc *priv;
386 struct rte_mempool *sess_mp;
388 CPT_PMD_INIT_FUNC_TRACE();
390 priv = get_asym_session_private_data(sess, dev->driver_id);
395 /* Free resources allocated during session configure */
396 cpt_free_asym_session_parameters(priv);
397 memset(priv, 0, otx_cpt_asym_session_size_get(dev));
398 sess_mp = rte_mempool_from_obj(priv);
399 set_asym_session_private_data(sess, dev->driver_id, NULL);
400 rte_mempool_put(sess_mp, priv);
403 static __rte_always_inline int32_t __rte_hot
404 otx_cpt_request_enqueue(struct cpt_instance *instance,
405 struct pending_queue *pqueue,
408 struct cpt_request_info *user_req = (struct cpt_request_info *)req;
410 if (unlikely(pqueue->pending_count >= DEFAULT_CMD_QLEN))
413 fill_cpt_inst(instance, req);
415 CPT_LOG_DP_DEBUG("req: %p op: %p ", req, user_req->op);
417 /* Fill time_out cycles */
418 user_req->time_out = rte_get_timer_cycles() +
419 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
420 user_req->extra_time = 0;
422 /* Default mode of software queue */
423 mark_cpt_inst(instance);
425 pqueue->rid_queue[pqueue->enq_tail].rid = (uintptr_t)user_req;
427 /* We will use soft queue length here to limit requests */
428 MOD_INC(pqueue->enq_tail, DEFAULT_CMD_QLEN);
429 pqueue->pending_count += 1;
431 CPT_LOG_DP_DEBUG("Submitted NB cmd with request: %p "
432 "op: %p", user_req, user_req->op);
436 static __rte_always_inline int __rte_hot
437 otx_cpt_enq_single_asym(struct cpt_instance *instance,
438 struct rte_crypto_op *op,
439 struct pending_queue *pqueue)
441 struct cpt_qp_meta_info *minfo = &instance->meta_info;
442 struct rte_crypto_asym_op *asym_op = op->asym;
443 struct asym_op_params params = {0};
444 struct cpt_asym_sess_misc *sess;
449 if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
450 CPT_LOG_DP_ERR("Could not allocate meta buffer for request");
454 sess = get_asym_session_private_data(asym_op->session,
455 otx_cryptodev_driver_id);
457 /* Store phys_addr of the mdata to meta_buf */
458 params.meta_buf = rte_mempool_virt2iova(mdata);
461 cop[0] = (uintptr_t)mdata;
462 cop[1] = (uintptr_t)op;
463 cop[2] = cop[3] = 0ULL;
465 params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
466 params.req->op = cop;
468 /* Adjust meta_buf by crypto_op data and request_info struct */
469 params.meta_buf += (4 * sizeof(uintptr_t)) +
470 sizeof(struct cpt_request_info);
472 switch (sess->xfrm_type) {
473 case RTE_CRYPTO_ASYM_XFORM_MODEX:
474 ret = cpt_modex_prep(¶ms, &sess->mod_ctx);
478 case RTE_CRYPTO_ASYM_XFORM_RSA:
479 ret = cpt_enqueue_rsa_op(op, ¶ms, sess);
483 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
484 ret = cpt_enqueue_ecdsa_op(op, ¶ms, sess, otx_fpm_iova);
488 case RTE_CRYPTO_ASYM_XFORM_ECPM:
489 ret = cpt_ecpm_prep(&asym_op->ecpm, ¶ms,
490 sess->ec_ctx.curveid);
496 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
501 ret = otx_cpt_request_enqueue(instance, pqueue, params.req);
504 CPT_LOG_DP_ERR("Could not enqueue crypto req");
511 free_op_meta(mdata, minfo->pool);
516 static __rte_always_inline int __rte_hot
517 otx_cpt_enq_single_sym(struct cpt_instance *instance,
518 struct rte_crypto_op *op,
519 struct pending_queue *pqueue)
521 struct cpt_sess_misc *sess;
522 struct rte_crypto_sym_op *sym_op = op->sym;
523 void *prep_req, *mdata = NULL;
527 sess = (struct cpt_sess_misc *)
528 get_sym_session_private_data(sym_op->session,
529 otx_cryptodev_driver_id);
531 cpt_op = sess->cpt_op;
533 if (likely(cpt_op & CPT_OP_CIPHER_MASK))
534 ret = fill_fc_params(op, sess, &instance->meta_info, &mdata,
537 ret = fill_digest_params(op, sess, &instance->meta_info,
541 CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
542 "ret 0x%x", op, (unsigned int)cpt_op, ret);
546 /* Enqueue prepared instruction to h/w */
547 ret = otx_cpt_request_enqueue(instance, pqueue, prep_req);
550 /* Buffer allocated for request preparation need to be freed */
551 free_op_meta(mdata, instance->meta_info.pool);
558 static __rte_always_inline int __rte_hot
559 otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
560 struct rte_crypto_op *op,
561 struct pending_queue *pend_q)
563 const int driver_id = otx_cryptodev_driver_id;
564 struct rte_crypto_sym_op *sym_op = op->sym;
565 struct rte_cryptodev_sym_session *sess;
568 /* Create temporary session */
570 if (rte_mempool_get(instance->sess_mp, (void **)&sess))
573 ret = sym_session_configure(driver_id, sym_op->xform, sess,
574 instance->sess_mp_priv);
578 sym_op->session = sess;
580 ret = otx_cpt_enq_single_sym(instance, op, pend_q);
588 sym_session_clear(driver_id, sess);
590 rte_mempool_put(instance->sess_mp, sess);
594 #define OP_TYPE_SYM 0
595 #define OP_TYPE_ASYM 1
597 static __rte_always_inline int __rte_hot
598 otx_cpt_enq_single(struct cpt_instance *inst,
599 struct rte_crypto_op *op,
600 struct pending_queue *pqueue,
601 const uint8_t op_type)
603 /* Check for the type */
605 if (op_type == OP_TYPE_SYM) {
606 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
607 return otx_cpt_enq_single_sym(inst, op, pqueue);
609 return otx_cpt_enq_single_sym_sessless(inst, op,
613 if (op_type == OP_TYPE_ASYM) {
614 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
615 return otx_cpt_enq_single_asym(inst, op, pqueue);
618 /* Should not reach here */
622 static __rte_always_inline uint16_t __rte_hot
623 otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
624 const uint8_t op_type)
626 struct cpt_instance *instance = (struct cpt_instance *)qptr;
629 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
630 struct pending_queue *pqueue = &cptvf->pqueue;
632 count = DEFAULT_CMD_QLEN - pqueue->pending_count;
637 while (likely(count < nb_ops)) {
639 /* Enqueue single op */
640 ret = otx_cpt_enq_single(instance, ops[count], pqueue, op_type);
646 otx_cpt_ring_dbell(instance, count);
651 otx_cpt_enqueue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
653 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_ASYM);
657 otx_cpt_enqueue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
659 return otx_cpt_pkt_enqueue(qptr, ops, nb_ops, OP_TYPE_SYM);
663 otx_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
664 struct rte_crypto_rsa_xform *rsa_ctx)
667 struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
669 switch (rsa->op_type) {
670 case RTE_CRYPTO_ASYM_OP_ENCRYPT:
671 rsa->cipher.length = rsa_ctx->n.length;
672 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
674 case RTE_CRYPTO_ASYM_OP_DECRYPT:
675 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
676 rsa->message.length = rsa_ctx->n.length;
678 /* Get length of decrypted output */
679 rsa->message.length = rte_cpu_to_be_16
680 (*((uint16_t *)req->rptr));
682 /* Offset data pointer by length fields */
685 memcpy(rsa->message.data, req->rptr, rsa->message.length);
687 case RTE_CRYPTO_ASYM_OP_SIGN:
688 rsa->sign.length = rsa_ctx->n.length;
689 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
691 case RTE_CRYPTO_ASYM_OP_VERIFY:
692 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE)
693 rsa->sign.length = rsa_ctx->n.length;
695 /* Get length of decrypted output */
696 rsa->sign.length = rte_cpu_to_be_16
697 (*((uint16_t *)req->rptr));
699 /* Offset data pointer by length fields */
702 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
704 if (memcmp(rsa->sign.data, rsa->message.data,
705 rsa->message.length)) {
706 CPT_LOG_DP_ERR("RSA verification failed");
707 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
711 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
712 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
717 static __rte_always_inline void
718 otx_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
719 struct cpt_request_info *req,
720 struct cpt_asym_ec_ctx *ec)
723 int prime_len = ec_grp[ec->curveid].prime.length;
725 if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
728 /* Separate out sign r and s components */
729 memcpy(ecdsa->r.data, req->rptr, prime_len);
730 memcpy(ecdsa->s.data, req->rptr + ROUNDUP8(prime_len), prime_len);
731 ecdsa->r.length = prime_len;
732 ecdsa->s.length = prime_len;
735 static __rte_always_inline void
736 otx_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
737 struct cpt_request_info *req,
738 struct cpt_asym_ec_ctx *ec)
740 int prime_len = ec_grp[ec->curveid].prime.length;
742 memcpy(ecpm->r.x.data, req->rptr, prime_len);
743 memcpy(ecpm->r.y.data, req->rptr + ROUNDUP8(prime_len), prime_len);
744 ecpm->r.x.length = prime_len;
745 ecpm->r.y.length = prime_len;
748 static __rte_always_inline void __rte_hot
749 otx_cpt_asym_post_process(struct rte_crypto_op *cop,
750 struct cpt_request_info *req)
752 struct rte_crypto_asym_op *op = cop->asym;
753 struct cpt_asym_sess_misc *sess;
755 sess = get_asym_session_private_data(op->session,
756 otx_cryptodev_driver_id);
758 switch (sess->xfrm_type) {
759 case RTE_CRYPTO_ASYM_XFORM_RSA:
760 otx_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
762 case RTE_CRYPTO_ASYM_XFORM_MODEX:
763 op->modex.result.length = sess->mod_ctx.modulus.length;
764 memcpy(op->modex.result.data, req->rptr,
765 op->modex.result.length);
767 case RTE_CRYPTO_ASYM_XFORM_ECDSA:
768 otx_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
770 case RTE_CRYPTO_ASYM_XFORM_ECPM:
771 otx_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
774 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
775 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
780 static __rte_always_inline void __rte_hot
781 otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
782 const uint8_t op_type)
784 /* H/w has returned success */
785 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
787 /* Perform further post processing */
789 if ((op_type == OP_TYPE_SYM) &&
790 (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC)) {
791 /* Check if auth verify need to be completed */
792 if (unlikely(rsp[2]))
793 compl_auth_verify(cop, (uint8_t *)rsp[2], rsp[3]);
797 if ((op_type == OP_TYPE_ASYM) &&
798 (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC)) {
799 rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
800 otx_cpt_asym_post_process(cop, (struct cpt_request_info *)rsp);
806 static __rte_always_inline uint16_t __rte_hot
807 otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
808 const uint8_t op_type)
810 struct cpt_instance *instance = (struct cpt_instance *)qptr;
811 struct cpt_request_info *user_req;
812 struct cpt_vf *cptvf = (struct cpt_vf *)instance;
815 int i, count, pcount;
818 struct pending_queue *pqueue = &cptvf->pqueue;
819 struct rte_crypto_op *cop;
823 pcount = pqueue->pending_count;
824 count = (nb_ops > pcount) ? pcount : nb_ops;
826 for (i = 0; i < count; i++) {
827 rid_e = &pqueue->rid_queue[pqueue->deq_head];
828 user_req = (struct cpt_request_info *)(rid_e->rid);
830 if (likely((i+1) < count))
831 rte_prefetch_non_temporal((void *)rid_e[1].rid);
833 ret = check_nb_command_id(user_req, instance);
835 if (unlikely(ret == ERR_REQ_PENDING)) {
836 /* Stop checking for completions */
840 /* Return completion code and op handle */
842 ops[i] = user_req->op;
844 CPT_LOG_DP_DEBUG("Request %p Op %p completed with code %d",
845 user_req, user_req->op, ret);
847 MOD_INC(pqueue->deq_head, DEFAULT_CMD_QLEN);
848 pqueue->pending_count -= 1;
853 for (i = 0; i < nb_completed; i++) {
855 rsp = (void *)ops[i];
857 if (likely((i + 1) < nb_completed))
858 rte_prefetch0(ops[i+1]);
860 metabuf = (void *)rsp[0];
861 cop = (void *)rsp[1];
865 /* Check completion code */
867 if (likely(cc[i] == 0)) {
868 /* H/w success pkt. Post process */
869 otx_cpt_dequeue_post_process(cop, rsp, op_type);
870 } else if (cc[i] == ERR_GC_ICV_MISCOMPARE) {
871 /* auth data mismatch */
872 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
875 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
878 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
879 void *sess_private_data_t =
880 get_sym_session_private_data(cop->sym->session,
881 otx_cryptodev_driver_id);
882 memset(sess_private_data_t, 0,
883 cpt_get_session_size());
884 memset(cop->sym->session, 0,
885 rte_cryptodev_sym_get_existing_header_session_size(
887 rte_mempool_put(instance->sess_mp_priv,
888 sess_private_data_t);
889 rte_mempool_put(instance->sess_mp, cop->sym->session);
890 cop->sym->session = NULL;
892 free_op_meta(metabuf, instance->meta_info.pool);
899 otx_cpt_dequeue_asym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
901 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_ASYM);
905 otx_cpt_dequeue_sym(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
907 return otx_cpt_pkt_dequeue(qptr, ops, nb_ops, OP_TYPE_SYM);
910 static struct rte_cryptodev_ops cptvf_ops = {
911 /* Device related operations */
912 .dev_configure = otx_cpt_dev_config,
913 .dev_start = otx_cpt_dev_start,
914 .dev_stop = otx_cpt_dev_stop,
915 .dev_close = otx_cpt_dev_close,
916 .dev_infos_get = otx_cpt_dev_info_get,
918 .stats_get = otx_cpt_stats_get,
919 .stats_reset = otx_cpt_stats_reset,
920 .queue_pair_setup = otx_cpt_que_pair_setup,
921 .queue_pair_release = otx_cpt_que_pair_release,
923 /* Crypto related operations */
924 .sym_session_get_size = otx_cpt_get_session_size,
925 .sym_session_configure = otx_cpt_session_cfg,
926 .sym_session_clear = otx_cpt_session_clear,
928 .asym_session_get_size = otx_cpt_asym_session_size_get,
929 .asym_session_configure = otx_cpt_asym_session_cfg,
930 .asym_session_clear = otx_cpt_asym_session_clear,
934 otx_cpt_dev_create(struct rte_cryptodev *c_dev)
936 struct rte_pci_device *pdev = RTE_DEV_TO_PCI(c_dev->device);
937 struct cpt_vf *cptvf = NULL;
942 if (pdev->mem_resource[0].phys_addr == 0ULL)
945 /* for secondary processes, we don't initialise any further as primary
946 * has already done this work.
948 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
951 cptvf = rte_zmalloc_socket("otx_cryptodev_private_mem",
952 sizeof(struct cpt_vf), RTE_CACHE_LINE_SIZE,
956 CPT_LOG_ERR("Cannot allocate memory for device private data");
960 snprintf(dev_name, 32, "%02x:%02x.%x",
961 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
963 reg_base = pdev->mem_resource[0].addr;
965 CPT_LOG_ERR("Failed to map BAR0 of %s", dev_name);
970 ret = otx_cpt_hw_init(cptvf, pdev, reg_base, dev_name);
972 CPT_LOG_ERR("Failed to init cptvf %s", dev_name);
977 switch (cptvf->vftype) {
978 case OTX_CPT_VF_TYPE_AE:
979 /* Set asymmetric cpt feature flags */
980 c_dev->feature_flags = RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO |
981 RTE_CRYPTODEV_FF_HW_ACCELERATED |
982 RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT;
984 case OTX_CPT_VF_TYPE_SE:
985 /* Set symmetric cpt feature flags */
986 c_dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
987 RTE_CRYPTODEV_FF_HW_ACCELERATED |
988 RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
989 RTE_CRYPTODEV_FF_IN_PLACE_SGL |
990 RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
991 RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
992 RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA |
993 RTE_CRYPTODEV_FF_SYM_SESSIONLESS;
996 /* Feature not supported. Abort */
997 CPT_LOG_ERR("VF type not supported by %s", dev_name);
1002 /* Start off timer for mailbox interrupts */
1003 otx_cpt_periodic_alarm_start(cptvf);
1005 c_dev->dev_ops = &cptvf_ops;
1007 if (c_dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
1008 c_dev->enqueue_burst = otx_cpt_enqueue_sym;
1009 c_dev->dequeue_burst = otx_cpt_dequeue_sym;
1011 c_dev->enqueue_burst = otx_cpt_enqueue_asym;
1012 c_dev->dequeue_burst = otx_cpt_dequeue_asym;
1015 /* Save dev private data */
1016 c_dev->data->dev_private = cptvf;
1021 otx_cpt_deinit_device(cptvf);
1025 /* Free private data allocated */