1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
8 #include "cn9k_cryptodev.h"
9 #include "cn9k_cryptodev_ops.h"
11 #include "cnxk_cryptodev.h"
12 #include "cnxk_cryptodev_ops.h"
15 static __rte_always_inline int __rte_hot
16 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
17 struct cnxk_se_sess *sess,
18 struct cpt_inflight_req *infl_req,
19 struct cpt_inst_s *inst)
24 cpt_op = sess->cpt_op;
26 if (cpt_op & ROC_SE_OP_CIPHER_MASK)
27 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
29 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
35 static inline struct cnxk_se_sess *
36 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
38 const int driver_id = cn9k_cryptodev_driver_id;
39 struct rte_crypto_sym_op *sym_op = op->sym;
40 struct rte_cryptodev_sym_session *sess;
41 struct cnxk_se_sess *priv;
44 /* Create temporary session */
45 sess = rte_cryptodev_sym_session_create(qp->sess_mp);
49 ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
50 sess, qp->sess_mp_priv);
54 priv = get_sym_session_private_data(sess, driver_id);
56 sym_op->session = sess;
61 rte_mempool_put(qp->sess_mp, sess);
66 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
68 struct cpt_inflight_req *infl_req;
69 struct rte_crypto_asym_op *asym_op;
70 struct rte_crypto_sym_op *sym_op;
71 uint16_t nb_allowed, count = 0;
72 struct cnxk_cpt_qp *qp = qptr;
73 struct pending_queue *pend_q;
74 struct rte_crypto_op *op;
75 struct cpt_inst_s inst;
83 lmtline = qp->lmtline.lmt_base;
84 io_addr = qp->lmtline.io_addr;
90 nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
91 nb_ops = RTE_MIN(nb_ops, nb_allowed);
93 for (count = 0; count < nb_ops; count++) {
95 infl_req = &pend_q->req_queue[pend_q->enq_tail];
96 infl_req->op_flags = 0;
98 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
99 struct cnxk_se_sess *sess;
101 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
103 sess = get_sym_session_private_data(
105 cn9k_cryptodev_driver_id);
106 ret = cn9k_cpt_sym_inst_fill(qp, op, sess,
109 sess = cn9k_cpt_sym_temp_sess_create(qp, op);
110 if (unlikely(sess == NULL)) {
112 "Could not create temp session");
116 ret = cn9k_cpt_sym_inst_fill(qp, op, sess,
120 cn9k_cryptodev_driver_id,
122 rte_mempool_put(qp->sess_mp,
126 inst.w7.u64 = sess->cpt_inst_w7;
127 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
128 struct cnxk_ae_sess *sess;
131 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
133 sess = get_asym_session_private_data(
135 cn9k_cryptodev_driver_id);
136 ret = cnxk_ae_enqueue(qp, op, infl_req, &inst,
138 inst.w7.u64 = sess->cpt_inst_w7;
141 plt_dp_err("Unsupported op type");
146 plt_dp_err("Could not process op: %p", op);
152 infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
153 inst.res_addr = (uint64_t)&infl_req->res;
156 /* Copy CPT command to LMTLINE */
157 memcpy((void *)lmtline, &inst, sizeof(inst));
160 * Make sure compiler does not reorder memcpy and ldeor.
161 * LMTST transactions are always flushed from the write
162 * buffer immediately, a DMB is not required to push out
166 lmt_status = roc_lmt_submit_ldeor(io_addr);
167 } while (lmt_status == 0);
169 MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
172 pend_q->pending_count += count;
173 pend_q->time_out = rte_get_timer_cycles() +
174 DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
180 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
181 struct cpt_inflight_req *infl_req)
183 struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
186 if (likely(res->compcode == CPT_COMP_GOOD)) {
187 if (unlikely(res->uc_compcode)) {
188 if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
189 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
191 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
193 plt_dp_info("Request failed with microcode error");
194 plt_dp_info("MC completion code 0x%x",
199 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
200 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
201 /* Verify authentication data if required */
202 if (unlikely(infl_req->op_flags &
203 CPT_OP_FLAGS_AUTH_VERIFY)) {
204 uintptr_t *rsp = infl_req->mdata;
205 compl_auth_verify(cop, (uint8_t *)rsp[0],
208 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
209 struct rte_crypto_asym_op *op = cop->asym;
210 uintptr_t *mdata = infl_req->mdata;
211 struct cnxk_ae_sess *sess;
213 sess = get_asym_session_private_data(
214 op->session, cn9k_cryptodev_driver_id);
216 cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
219 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
220 plt_dp_info("HW completion code 0x%x", res->compcode);
222 switch (res->compcode) {
223 case CPT_COMP_INSTERR:
224 plt_dp_err("Request failed with instruction error");
227 plt_dp_err("Request failed with DMA fault");
230 plt_dp_err("Request failed with hardware error");
234 "Request failed with unknown completion code");
239 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
240 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
241 sym_session_clear(cn9k_cryptodev_driver_id,
243 sz = rte_cryptodev_sym_get_existing_header_session_size(
245 memset(cop->sym->session, 0, sz);
246 rte_mempool_put(qp->sess_mp, cop->sym->session);
247 cop->sym->session = NULL;
253 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
255 struct cnxk_cpt_qp *qp = qptr;
256 struct pending_queue *pend_q;
257 struct cpt_inflight_req *infl_req;
258 struct cpt_cn9k_res_s *res;
259 struct rte_crypto_op *cop;
260 uint32_t pq_deq_head;
263 pend_q = &qp->pend_q;
265 nb_ops = RTE_MIN(nb_ops, pend_q->pending_count);
267 pq_deq_head = pend_q->deq_head;
269 for (i = 0; i < nb_ops; i++) {
270 infl_req = &pend_q->req_queue[pq_deq_head];
272 res = (struct cpt_cn9k_res_s *)&infl_req->res;
274 if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
275 if (unlikely(rte_get_timer_cycles() >
277 plt_err("Request timed out");
278 pend_q->time_out = rte_get_timer_cycles() +
279 DEFAULT_COMMAND_TIMEOUT *
285 MOD_INC(pq_deq_head, qp->lf.nb_desc);
291 cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
293 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
294 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
297 pend_q->pending_count -= i;
298 pend_q->deq_head = pq_deq_head;
303 cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
305 dev->enqueue_burst = cn9k_cpt_enqueue_burst;
306 dev->dequeue_burst = cn9k_cpt_dequeue_burst;
312 cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
313 struct rte_cryptodev_info *info)
316 cnxk_cpt_dev_info_get(dev, info);
317 info->driver_id = cn9k_cryptodev_driver_id;
321 struct rte_cryptodev_ops cn9k_cpt_ops = {
322 /* Device control ops */
323 .dev_configure = cnxk_cpt_dev_config,
324 .dev_start = cnxk_cpt_dev_start,
325 .dev_stop = cnxk_cpt_dev_stop,
326 .dev_close = cnxk_cpt_dev_close,
327 .dev_infos_get = cn9k_cpt_dev_info_get,
331 .queue_pair_setup = cnxk_cpt_queue_pair_setup,
332 .queue_pair_release = cnxk_cpt_queue_pair_release,
334 /* Symmetric crypto ops */
335 .sym_session_get_size = cnxk_cpt_sym_session_get_size,
336 .sym_session_configure = cnxk_cpt_sym_session_configure,
337 .sym_session_clear = cnxk_cpt_sym_session_clear,
339 /* Asymmetric crypto ops */
340 .asym_session_get_size = cnxk_ae_session_size_get,
341 .asym_session_configure = cnxk_ae_session_cfg,
342 .asym_session_clear = cnxk_ae_session_clear,