1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
9 #include "cn9k_cryptodev.h"
10 #include "cn9k_cryptodev_ops.h"
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
16 static __rte_always_inline int __rte_hot
17 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
18 struct cnxk_se_sess *sess,
19 struct cpt_inflight_req *infl_req,
20 struct cpt_inst_s *inst)
25 cpt_op = sess->cpt_op;
27 if (cpt_op & ROC_SE_OP_CIPHER_MASK)
28 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
30 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
36 static inline struct cnxk_se_sess *
37 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
39 const int driver_id = cn9k_cryptodev_driver_id;
40 struct rte_crypto_sym_op *sym_op = op->sym;
41 struct rte_cryptodev_sym_session *sess;
42 struct cnxk_se_sess *priv;
45 /* Create temporary session */
46 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
50 ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
51 sess, qp->sess_mp_priv);
55 priv = get_sym_session_private_data(sess, driver_id);
57 sym_op->session = sess;
62 rte_mempool_put(qp->sess_mp, sess);
67 cn9k_cpt_prepare_instruction(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
68 struct cpt_inflight_req *infl_req,
69 struct cpt_inst_s *inst)
73 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
74 struct rte_crypto_sym_op *sym_op;
75 struct cnxk_se_sess *sess;
77 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
79 sess = get_sym_session_private_data(
80 sym_op->session, cn9k_cryptodev_driver_id);
81 ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
84 sess = cn9k_cpt_sym_temp_sess_create(qp, op);
85 if (unlikely(sess == NULL)) {
86 plt_dp_err("Could not create temp session");
90 ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
93 sym_session_clear(cn9k_cryptodev_driver_id,
95 rte_mempool_put(qp->sess_mp, op->sym->session);
98 inst->w7.u64 = sess->cpt_inst_w7;
99 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
100 struct rte_crypto_asym_op *asym_op;
101 struct cnxk_ae_sess *sess;
103 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
105 sess = get_asym_session_private_data(
106 asym_op->session, cn9k_cryptodev_driver_id);
107 ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
108 inst->w7.u64 = sess->cpt_inst_w7;
114 plt_dp_err("Unsupported op type");
121 cn9k_cpt_submit_instruction(struct cpt_inst_s *inst, uint64_t lmtline,
127 /* Copy CPT command to LMTLINE */
128 roc_lmt_mov((void *)lmtline, inst, 2);
131 * Make sure compiler does not reorder memcpy and ldeor.
132 * LMTST transactions are always flushed from the write
133 * buffer immediately, a DMB is not required to push out
137 lmt_status = roc_lmt_submit_ldeor(io_addr);
138 } while (lmt_status == 0);
142 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
144 struct cpt_inflight_req *infl_req;
145 uint16_t nb_allowed, count = 0;
146 struct cnxk_cpt_qp *qp = qptr;
147 struct pending_queue *pend_q;
148 struct rte_crypto_op *op;
149 struct cpt_inst_s inst;
152 pend_q = &qp->pend_q;
158 nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
159 nb_ops = RTE_MIN(nb_ops, nb_allowed);
161 for (count = 0; count < nb_ops; count++) {
163 infl_req = &pend_q->req_queue[pend_q->enq_tail];
164 infl_req->op_flags = 0;
166 ret = cn9k_cpt_prepare_instruction(qp, op, infl_req, &inst);
168 plt_dp_err("Could not process op: %p", op);
173 infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
174 inst.res_addr = (uint64_t)&infl_req->res;
176 cn9k_cpt_submit_instruction(&inst, qp->lmtline.lmt_base,
177 qp->lmtline.io_addr);
178 MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
181 pend_q->pending_count += count;
182 pend_q->time_out = rte_get_timer_cycles() +
183 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
189 cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
191 union rte_event_crypto_metadata *ec_mdata;
192 struct cpt_inflight_req *infl_req;
193 struct rte_event *rsp_info;
194 struct cnxk_cpt_qp *qp;
195 struct cpt_inst_s inst;
200 ec_mdata = cnxk_event_crypto_mdata_get(op);
206 cdev_id = ec_mdata->request_info.cdev_id;
207 qp_id = ec_mdata->request_info.queue_pair_id;
208 qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
209 rsp_info = &ec_mdata->response_info;
211 if (unlikely(!qp->ca.enabled)) {
216 if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
220 infl_req->op_flags = 0;
222 ret = cn9k_cpt_prepare_instruction(qp, op, infl_req, &inst);
224 plt_dp_err("Could not process op: %p", op);
225 rte_mempool_put(qp->ca.req_mp, infl_req);
230 infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
233 inst.res_addr = (uint64_t)&infl_req->res;
234 inst.w2.u64 = CNXK_CPT_INST_W2(
235 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
236 rsp_info->sched_type, rsp_info->queue_id, 0);
237 inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
239 if (roc_cpt_is_iq_full(&qp->lf)) {
240 rte_mempool_put(qp->ca.req_mp, infl_req);
245 if (!rsp_info->sched_type)
246 roc_sso_hws_head_wait(tag_op);
248 cn9k_cpt_submit_instruction(&inst, qp->lmtline.lmt_base,
249 qp->lmtline.io_addr);
255 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
256 struct cpt_inflight_req *infl_req)
258 struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
261 if (likely(res->compcode == CPT_COMP_GOOD)) {
262 if (unlikely(res->uc_compcode)) {
263 if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
264 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
266 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
268 plt_dp_info("Request failed with microcode error");
269 plt_dp_info("MC completion code 0x%x",
274 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
275 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
276 /* Verify authentication data if required */
277 if (unlikely(infl_req->op_flags &
278 CPT_OP_FLAGS_AUTH_VERIFY)) {
279 uintptr_t *rsp = infl_req->mdata;
280 compl_auth_verify(cop, (uint8_t *)rsp[0],
283 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
284 struct rte_crypto_asym_op *op = cop->asym;
285 uintptr_t *mdata = infl_req->mdata;
286 struct cnxk_ae_sess *sess;
288 sess = get_asym_session_private_data(
289 op->session, cn9k_cryptodev_driver_id);
291 cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
294 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
295 plt_dp_info("HW completion code 0x%x", res->compcode);
297 switch (res->compcode) {
298 case CPT_COMP_INSTERR:
299 plt_dp_err("Request failed with instruction error");
302 plt_dp_err("Request failed with DMA fault");
305 plt_dp_err("Request failed with hardware error");
309 "Request failed with unknown completion code");
314 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
315 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
316 sym_session_clear(cn9k_cryptodev_driver_id,
318 sz = rte_cryptodev_sym_get_existing_header_session_size(
320 memset(cop->sym->session, 0, sz);
321 rte_mempool_put(qp->sess_mp, cop->sym->session);
322 cop->sym->session = NULL;
328 cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
330 struct cpt_inflight_req *infl_req;
331 struct rte_crypto_op *cop;
332 struct cnxk_cpt_qp *qp;
334 infl_req = (struct cpt_inflight_req *)(get_work1);
338 cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
340 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
341 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
343 rte_mempool_put(qp->ca.req_mp, infl_req);
344 return (uintptr_t)cop;
348 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
350 struct cnxk_cpt_qp *qp = qptr;
351 struct pending_queue *pend_q;
352 struct cpt_inflight_req *infl_req;
353 struct cpt_cn9k_res_s *res;
354 struct rte_crypto_op *cop;
355 uint32_t pq_deq_head;
358 pend_q = &qp->pend_q;
360 nb_ops = RTE_MIN(nb_ops, pend_q->pending_count);
362 pq_deq_head = pend_q->deq_head;
364 for (i = 0; i < nb_ops; i++) {
365 infl_req = &pend_q->req_queue[pq_deq_head];
367 res = (struct cpt_cn9k_res_s *)&infl_req->res;
369 if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
370 if (unlikely(rte_get_timer_cycles() >
372 plt_err("Request timed out");
373 pend_q->time_out = rte_get_timer_cycles() +
374 DEFAULT_COMMAND_TIMEOUT *
380 MOD_INC(pq_deq_head, qp->lf.nb_desc);
386 cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
388 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
389 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
392 pend_q->pending_count -= i;
393 pend_q->deq_head = pq_deq_head;
398 cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
400 dev->enqueue_burst = cn9k_cpt_enqueue_burst;
401 dev->dequeue_burst = cn9k_cpt_dequeue_burst;
407 cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
408 struct rte_cryptodev_info *info)
411 cnxk_cpt_dev_info_get(dev, info);
412 info->driver_id = cn9k_cryptodev_driver_id;
416 struct rte_cryptodev_ops cn9k_cpt_ops = {
417 /* Device control ops */
418 .dev_configure = cnxk_cpt_dev_config,
419 .dev_start = cnxk_cpt_dev_start,
420 .dev_stop = cnxk_cpt_dev_stop,
421 .dev_close = cnxk_cpt_dev_close,
422 .dev_infos_get = cn9k_cpt_dev_info_get,
426 .queue_pair_setup = cnxk_cpt_queue_pair_setup,
427 .queue_pair_release = cnxk_cpt_queue_pair_release,
429 /* Symmetric crypto ops */
430 .sym_session_get_size = cnxk_cpt_sym_session_get_size,
431 .sym_session_configure = cnxk_cpt_sym_session_configure,
432 .sym_session_clear = cnxk_cpt_sym_session_clear,
434 /* Asymmetric crypto ops */
435 .asym_session_get_size = cnxk_ae_session_size_get,
436 .asym_session_configure = cnxk_ae_session_cfg,
437 .asym_session_clear = cnxk_ae_session_clear,