1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
10 #include "cn9k_cryptodev.h"
11 #include "cn9k_cryptodev_ops.h"
13 #include "cnxk_cryptodev.h"
14 #include "cnxk_cryptodev_ops.h"
17 static __rte_always_inline int __rte_hot
18 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
19 struct cnxk_se_sess *sess,
20 struct cpt_inflight_req *infl_req,
21 struct cpt_inst_s *inst)
26 cpt_op = sess->cpt_op;
28 if (cpt_op & ROC_SE_OP_CIPHER_MASK)
29 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
31 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
37 static inline struct cnxk_se_sess *
38 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
40 const int driver_id = cn9k_cryptodev_driver_id;
41 struct rte_crypto_sym_op *sym_op = op->sym;
42 struct rte_cryptodev_sym_session *sess;
43 struct cnxk_se_sess *priv;
46 /* Create temporary session */
47 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
51 ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
52 sess, qp->sess_mp_priv);
56 priv = get_sym_session_private_data(sess, driver_id);
58 sym_op->session = sess;
63 rte_mempool_put(qp->sess_mp, sess);
68 cn9k_cpt_inst_prep(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
69 struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
73 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
74 struct rte_crypto_sym_op *sym_op;
75 struct cnxk_se_sess *sess;
77 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
79 sess = get_sym_session_private_data(
80 sym_op->session, cn9k_cryptodev_driver_id);
81 ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
84 sess = cn9k_cpt_sym_temp_sess_create(qp, op);
85 if (unlikely(sess == NULL)) {
86 plt_dp_err("Could not create temp session");
90 ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
93 sym_session_clear(cn9k_cryptodev_driver_id,
95 rte_mempool_put(qp->sess_mp, op->sym->session);
98 inst->w7.u64 = sess->cpt_inst_w7;
99 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
100 struct rte_crypto_asym_op *asym_op;
101 struct cnxk_ae_sess *sess;
103 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
105 sess = get_asym_session_private_data(
106 asym_op->session, cn9k_cryptodev_driver_id);
107 ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
108 inst->w7.u64 = sess->cpt_inst_w7;
114 plt_dp_err("Unsupported op type");
121 cn9k_cpt_inst_submit(struct cpt_inst_s *inst, uint64_t lmtline,
127 /* Copy CPT command to LMTLINE */
128 roc_lmt_mov((void *)lmtline, inst, 2);
131 * Make sure compiler does not reorder memcpy and ldeor.
132 * LMTST transactions are always flushed from the write
133 * buffer immediately, a DMB is not required to push out
137 lmt_status = roc_lmt_submit_ldeor(io_addr);
138 } while (lmt_status == 0);
141 static __plt_always_inline void
142 cn9k_cpt_inst_submit_dual(struct cpt_inst_s *inst, uint64_t lmtline,
148 /* Copy 2 CPT inst_s to LMTLINE */
149 #if defined(RTE_ARCH_ARM64)
150 uint64_t *s = (uint64_t *)inst;
151 uint64_t *d = (uint64_t *)lmtline;
153 vst1q_u64(&d[0], vld1q_u64(&s[0]));
154 vst1q_u64(&d[2], vld1q_u64(&s[2]));
155 vst1q_u64(&d[4], vld1q_u64(&s[4]));
156 vst1q_u64(&d[6], vld1q_u64(&s[6]));
157 vst1q_u64(&d[8], vld1q_u64(&s[8]));
158 vst1q_u64(&d[10], vld1q_u64(&s[10]));
159 vst1q_u64(&d[12], vld1q_u64(&s[12]));
160 vst1q_u64(&d[14], vld1q_u64(&s[14]));
162 roc_lmt_mov_seg((void *)lmtline, inst, 8);
166 * Make sure compiler does not reorder memcpy and ldeor.
167 * LMTST transactions are always flushed from the write
168 * buffer immediately, a DMB is not required to push out
172 lmt_status = roc_lmt_submit_ldeor(io_addr);
173 } while (lmt_status == 0);
177 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
179 struct cpt_inflight_req *infl_req_1, *infl_req_2;
180 struct cpt_inst_s inst[2] __rte_cache_aligned;
181 struct rte_crypto_op *op_1, *op_2;
182 uint16_t nb_allowed, count = 0;
183 struct cnxk_cpt_qp *qp = qptr;
184 struct pending_queue *pend_q;
188 const uint32_t nb_desc = qp->lf.nb_desc;
189 const uint64_t lmt_base = qp->lf.lmt_base;
190 const uint64_t io_addr = qp->lf.io_addr;
192 pend_q = &qp->pend_q;
194 /* Clear w0, w2, w3 of both inst */
203 nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
204 nb_ops = RTE_MIN(nb_ops, nb_allowed);
206 enq_tail = pend_q->enq_tail;
208 if (unlikely(nb_ops & 1)) {
210 infl_req_1 = &pend_q->req_queue[enq_tail];
211 infl_req_1->op_flags = 0;
213 ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
215 plt_dp_err("Could not process op: %p", op_1);
219 infl_req_1->cop = op_1;
220 infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
221 inst[0].res_addr = (uint64_t)&infl_req_1->res;
223 cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
224 MOD_INC(enq_tail, nb_desc);
228 while (count < nb_ops) {
230 op_2 = ops[count + 1];
232 infl_req_1 = &pend_q->req_queue[enq_tail];
233 MOD_INC(enq_tail, nb_desc);
234 infl_req_2 = &pend_q->req_queue[enq_tail];
235 MOD_INC(enq_tail, nb_desc);
237 infl_req_1->cop = op_1;
238 infl_req_2->cop = op_2;
239 infl_req_1->op_flags = 0;
240 infl_req_2->op_flags = 0;
242 infl_req_1->res.cn9k.compcode = CPT_COMP_NOT_DONE;
243 inst[0].res_addr = (uint64_t)&infl_req_1->res;
245 infl_req_2->res.cn9k.compcode = CPT_COMP_NOT_DONE;
246 inst[1].res_addr = (uint64_t)&infl_req_2->res;
248 ret = cn9k_cpt_inst_prep(qp, op_1, infl_req_1, &inst[0]);
250 plt_dp_err("Could not process op: %p", op_1);
252 enq_tail = nb_desc - 2;
253 else if (enq_tail == 1)
254 enq_tail = nb_desc - 1;
260 ret = cn9k_cpt_inst_prep(qp, op_2, infl_req_2, &inst[1]);
262 plt_dp_err("Could not process op: %p", op_2);
264 enq_tail = nb_desc - 1;
268 cn9k_cpt_inst_submit(&inst[0], lmt_base, io_addr);
273 cn9k_cpt_inst_submit_dual(&inst[0], lmt_base, io_addr);
278 pend_q->enq_tail = enq_tail;
279 pend_q->pending_count += count;
280 pend_q->time_out = rte_get_timer_cycles() +
281 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
287 cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
289 union rte_event_crypto_metadata *ec_mdata;
290 struct cpt_inflight_req *infl_req;
291 struct rte_event *rsp_info;
292 struct cnxk_cpt_qp *qp;
293 struct cpt_inst_s inst;
298 ec_mdata = cnxk_event_crypto_mdata_get(op);
304 cdev_id = ec_mdata->request_info.cdev_id;
305 qp_id = ec_mdata->request_info.queue_pair_id;
306 qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
307 rsp_info = &ec_mdata->response_info;
309 if (unlikely(!qp->ca.enabled)) {
314 if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
318 infl_req->op_flags = 0;
320 ret = cn9k_cpt_inst_prep(qp, op, infl_req, &inst);
322 plt_dp_err("Could not process op: %p", op);
323 rte_mempool_put(qp->ca.req_mp, infl_req);
328 infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
331 inst.res_addr = (uint64_t)&infl_req->res;
332 inst.w2.u64 = CNXK_CPT_INST_W2(
333 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
334 rsp_info->sched_type, rsp_info->queue_id, 0);
335 inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
337 if (roc_cpt_is_iq_full(&qp->lf)) {
338 rte_mempool_put(qp->ca.req_mp, infl_req);
343 if (!rsp_info->sched_type)
344 roc_sso_hws_head_wait(tag_op);
346 cn9k_cpt_inst_submit(&inst, qp->lmtline.lmt_base, qp->lmtline.io_addr);
352 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
353 struct cpt_inflight_req *infl_req)
355 struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
358 if (likely(res->compcode == CPT_COMP_GOOD)) {
359 if (unlikely(res->uc_compcode)) {
360 if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
361 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
363 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
365 plt_dp_info("Request failed with microcode error");
366 plt_dp_info("MC completion code 0x%x",
371 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
372 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
373 /* Verify authentication data if required */
374 if (unlikely(infl_req->op_flags &
375 CPT_OP_FLAGS_AUTH_VERIFY)) {
376 uintptr_t *rsp = infl_req->mdata;
377 compl_auth_verify(cop, (uint8_t *)rsp[0],
380 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
381 struct rte_crypto_asym_op *op = cop->asym;
382 uintptr_t *mdata = infl_req->mdata;
383 struct cnxk_ae_sess *sess;
385 sess = get_asym_session_private_data(
386 op->session, cn9k_cryptodev_driver_id);
388 cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
391 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
392 plt_dp_info("HW completion code 0x%x", res->compcode);
394 switch (res->compcode) {
395 case CPT_COMP_INSTERR:
396 plt_dp_err("Request failed with instruction error");
399 plt_dp_err("Request failed with DMA fault");
402 plt_dp_err("Request failed with hardware error");
406 "Request failed with unknown completion code");
411 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
412 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
413 sym_session_clear(cn9k_cryptodev_driver_id,
415 sz = rte_cryptodev_sym_get_existing_header_session_size(
417 memset(cop->sym->session, 0, sz);
418 rte_mempool_put(qp->sess_mp, cop->sym->session);
419 cop->sym->session = NULL;
425 cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
427 struct cpt_inflight_req *infl_req;
428 struct rte_crypto_op *cop;
429 struct cnxk_cpt_qp *qp;
431 infl_req = (struct cpt_inflight_req *)(get_work1);
435 cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
437 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
438 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
440 rte_mempool_put(qp->ca.req_mp, infl_req);
441 return (uintptr_t)cop;
445 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
447 struct cpt_inflight_req *infl_req;
448 struct cnxk_cpt_qp *qp = qptr;
449 struct pending_queue *pend_q;
450 struct cpt_cn9k_res_s *res;
451 struct rte_crypto_op *cop;
452 uint32_t pq_deq_head;
455 const uint32_t nb_desc = qp->lf.nb_desc;
457 pend_q = &qp->pend_q;
459 nb_ops = RTE_MIN(nb_ops, pend_q->pending_count);
461 pq_deq_head = pend_q->deq_head;
463 for (i = 0; i < nb_ops; i++) {
464 infl_req = &pend_q->req_queue[pq_deq_head];
466 res = (struct cpt_cn9k_res_s *)&infl_req->res;
468 if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
469 if (unlikely(rte_get_timer_cycles() >
471 plt_err("Request timed out");
472 pend_q->time_out = rte_get_timer_cycles() +
473 DEFAULT_COMMAND_TIMEOUT *
479 MOD_INC(pq_deq_head, nb_desc);
485 cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
487 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
488 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
491 pend_q->pending_count -= i;
492 pend_q->deq_head = pq_deq_head;
497 cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
499 dev->enqueue_burst = cn9k_cpt_enqueue_burst;
500 dev->dequeue_burst = cn9k_cpt_dequeue_burst;
506 cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
507 struct rte_cryptodev_info *info)
510 cnxk_cpt_dev_info_get(dev, info);
511 info->driver_id = cn9k_cryptodev_driver_id;
515 struct rte_cryptodev_ops cn9k_cpt_ops = {
516 /* Device control ops */
517 .dev_configure = cnxk_cpt_dev_config,
518 .dev_start = cnxk_cpt_dev_start,
519 .dev_stop = cnxk_cpt_dev_stop,
520 .dev_close = cnxk_cpt_dev_close,
521 .dev_infos_get = cn9k_cpt_dev_info_get,
525 .queue_pair_setup = cnxk_cpt_queue_pair_setup,
526 .queue_pair_release = cnxk_cpt_queue_pair_release,
528 /* Symmetric crypto ops */
529 .sym_session_get_size = cnxk_cpt_sym_session_get_size,
530 .sym_session_configure = cnxk_cpt_sym_session_configure,
531 .sym_session_clear = cnxk_cpt_sym_session_clear,
533 /* Asymmetric crypto ops */
534 .asym_session_get_size = cnxk_ae_session_size_get,
535 .asym_session_configure = cnxk_ae_session_cfg,
536 .asym_session_clear = cnxk_ae_session_clear,