crypto/cnxk: add cn9k crypto adapter fast path
[dpdk.git] / drivers / crypto / cnxk / cn9k_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
8
9 #include "cn9k_cryptodev.h"
10 #include "cn9k_cryptodev_ops.h"
11 #include "cnxk_ae.h"
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_se.h"
15
16 static __rte_always_inline int __rte_hot
17 cn9k_cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
18                        struct cnxk_se_sess *sess,
19                        struct cpt_inflight_req *infl_req,
20                        struct cpt_inst_s *inst)
21 {
22         uint64_t cpt_op;
23         int ret = -1;
24
25         cpt_op = sess->cpt_op;
26
27         if (cpt_op & ROC_SE_OP_CIPHER_MASK)
28                 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
29         else
30                 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
31                                          inst);
32
33         return ret;
34 }
35
36 static inline struct cnxk_se_sess *
37 cn9k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
38 {
39         const int driver_id = cn9k_cryptodev_driver_id;
40         struct rte_crypto_sym_op *sym_op = op->sym;
41         struct rte_cryptodev_sym_session *sess;
42         struct cnxk_se_sess *priv;
43         int ret;
44
45         /* Create temporary session */
46         sess = rte_cryptodev_sym_session_create(qp->sess_mp);
47         if (sess == NULL)
48                 return NULL;
49
50         ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
51                                     sess, qp->sess_mp_priv);
52         if (ret)
53                 goto sess_put;
54
55         priv = get_sym_session_private_data(sess, driver_id);
56
57         sym_op->session = sess;
58
59         return priv;
60
61 sess_put:
62         rte_mempool_put(qp->sess_mp, sess);
63         return NULL;
64 }
65
66 static inline int
67 cn9k_cpt_prepare_instruction(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
68                              struct cpt_inflight_req *infl_req,
69                              struct cpt_inst_s *inst)
70 {
71         int ret;
72
73         if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
74                 struct rte_crypto_sym_op *sym_op;
75                 struct cnxk_se_sess *sess;
76
77                 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
78                         sym_op = op->sym;
79                         sess = get_sym_session_private_data(
80                                 sym_op->session, cn9k_cryptodev_driver_id);
81                         ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
82                                                      inst);
83                 } else {
84                         sess = cn9k_cpt_sym_temp_sess_create(qp, op);
85                         if (unlikely(sess == NULL)) {
86                                 plt_dp_err("Could not create temp session");
87                                 return -1;
88                         }
89
90                         ret = cn9k_cpt_sym_inst_fill(qp, op, sess, infl_req,
91                                                      inst);
92                         if (unlikely(ret)) {
93                                 sym_session_clear(cn9k_cryptodev_driver_id,
94                                                   op->sym->session);
95                                 rte_mempool_put(qp->sess_mp, op->sym->session);
96                         }
97                 }
98                 inst->w7.u64 = sess->cpt_inst_w7;
99         } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
100                 struct rte_crypto_asym_op *asym_op;
101                 struct cnxk_ae_sess *sess;
102
103                 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
104                         asym_op = op->asym;
105                         sess = get_asym_session_private_data(
106                                 asym_op->session, cn9k_cryptodev_driver_id);
107                         ret = cnxk_ae_enqueue(qp, op, infl_req, inst, sess);
108                         inst->w7.u64 = sess->cpt_inst_w7;
109                 } else {
110                         ret = -EINVAL;
111                 }
112         } else {
113                 ret = -EINVAL;
114                 plt_dp_err("Unsupported op type");
115         }
116
117         return ret;
118 }
119
120 static inline void
121 cn9k_cpt_submit_instruction(struct cpt_inst_s *inst, uint64_t lmtline,
122                             uint64_t io_addr)
123 {
124         uint64_t lmt_status;
125
126         do {
127                 /* Copy CPT command to LMTLINE */
128                 roc_lmt_mov((void *)lmtline, inst, 2);
129
130                 /*
131                  * Make sure compiler does not reorder memcpy and ldeor.
132                  * LMTST transactions are always flushed from the write
133                  * buffer immediately, a DMB is not required to push out
134                  * LMTSTs.
135                  */
136                 rte_io_wmb();
137                 lmt_status = roc_lmt_submit_ldeor(io_addr);
138         } while (lmt_status == 0);
139 }
140
141 static uint16_t
142 cn9k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
143 {
144         struct cpt_inflight_req *infl_req;
145         uint16_t nb_allowed, count = 0;
146         struct cnxk_cpt_qp *qp = qptr;
147         struct pending_queue *pend_q;
148         struct rte_crypto_op *op;
149         struct cpt_inst_s inst;
150         int ret;
151
152         pend_q = &qp->pend_q;
153
154         inst.w0.u64 = 0;
155         inst.w2.u64 = 0;
156         inst.w3.u64 = 0;
157
158         nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
159         nb_ops = RTE_MIN(nb_ops, nb_allowed);
160
161         for (count = 0; count < nb_ops; count++) {
162                 op = ops[count];
163                 infl_req = &pend_q->req_queue[pend_q->enq_tail];
164                 infl_req->op_flags = 0;
165
166                 ret = cn9k_cpt_prepare_instruction(qp, op, infl_req, &inst);
167                 if (unlikely(ret)) {
168                         plt_dp_err("Could not process op: %p", op);
169                         break;
170                 }
171
172                 infl_req->cop = op;
173                 infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
174                 inst.res_addr = (uint64_t)&infl_req->res;
175
176                 cn9k_cpt_submit_instruction(&inst, qp->lmtline.lmt_base,
177                                             qp->lmtline.io_addr);
178                 MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
179         }
180
181         pend_q->pending_count += count;
182         pend_q->time_out = rte_get_timer_cycles() +
183                            DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
184
185         return count;
186 }
187
188 uint16_t
189 cn9k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
190 {
191         union rte_event_crypto_metadata *ec_mdata;
192         struct cpt_inflight_req *infl_req;
193         struct rte_event *rsp_info;
194         struct cnxk_cpt_qp *qp;
195         struct cpt_inst_s inst;
196         uint8_t cdev_id;
197         uint16_t qp_id;
198         int ret;
199
200         ec_mdata = cnxk_event_crypto_mdata_get(op);
201         if (!ec_mdata) {
202                 rte_errno = EINVAL;
203                 return 0;
204         }
205
206         cdev_id = ec_mdata->request_info.cdev_id;
207         qp_id = ec_mdata->request_info.queue_pair_id;
208         qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
209         rsp_info = &ec_mdata->response_info;
210
211         if (unlikely(!qp->ca.enabled)) {
212                 rte_errno = EINVAL;
213                 return 0;
214         }
215
216         if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
217                 rte_errno = ENOMEM;
218                 return 0;
219         }
220         infl_req->op_flags = 0;
221
222         ret = cn9k_cpt_prepare_instruction(qp, op, infl_req, &inst);
223         if (unlikely(ret)) {
224                 plt_dp_err("Could not process op: %p", op);
225                 rte_mempool_put(qp->ca.req_mp, infl_req);
226                 return 0;
227         }
228
229         infl_req->cop = op;
230         infl_req->res.cn9k.compcode = CPT_COMP_NOT_DONE;
231         infl_req->qp = qp;
232         inst.w0.u64 = 0;
233         inst.res_addr = (uint64_t)&infl_req->res;
234         inst.w2.u64 = CNXK_CPT_INST_W2(
235                 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
236                 rsp_info->sched_type, rsp_info->queue_id, 0);
237         inst.w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
238
239         if (roc_cpt_is_iq_full(&qp->lf)) {
240                 rte_mempool_put(qp->ca.req_mp, infl_req);
241                 rte_errno = EAGAIN;
242                 return 0;
243         }
244
245         if (!rsp_info->sched_type)
246                 roc_sso_hws_head_wait(tag_op);
247
248         cn9k_cpt_submit_instruction(&inst, qp->lmtline.lmt_base,
249                                     qp->lmtline.io_addr);
250
251         return 1;
252 }
253
254 static inline void
255 cn9k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp, struct rte_crypto_op *cop,
256                               struct cpt_inflight_req *infl_req)
257 {
258         struct cpt_cn9k_res_s *res = (struct cpt_cn9k_res_s *)&infl_req->res;
259         unsigned int sz;
260
261         if (likely(res->compcode == CPT_COMP_GOOD)) {
262                 if (unlikely(res->uc_compcode)) {
263                         if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
264                                 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
265                         else
266                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
267
268                         plt_dp_info("Request failed with microcode error");
269                         plt_dp_info("MC completion code 0x%x",
270                                     res->uc_compcode);
271                         goto temp_sess_free;
272                 }
273
274                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
275                 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
276                         /* Verify authentication data if required */
277                         if (unlikely(infl_req->op_flags &
278                                      CPT_OP_FLAGS_AUTH_VERIFY)) {
279                                 uintptr_t *rsp = infl_req->mdata;
280                                 compl_auth_verify(cop, (uint8_t *)rsp[0],
281                                                   rsp[1]);
282                         }
283                 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
284                         struct rte_crypto_asym_op *op = cop->asym;
285                         uintptr_t *mdata = infl_req->mdata;
286                         struct cnxk_ae_sess *sess;
287
288                         sess = get_asym_session_private_data(
289                                 op->session, cn9k_cryptodev_driver_id);
290
291                         cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
292                 }
293         } else {
294                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
295                 plt_dp_info("HW completion code 0x%x", res->compcode);
296
297                 switch (res->compcode) {
298                 case CPT_COMP_INSTERR:
299                         plt_dp_err("Request failed with instruction error");
300                         break;
301                 case CPT_COMP_FAULT:
302                         plt_dp_err("Request failed with DMA fault");
303                         break;
304                 case CPT_COMP_HWERR:
305                         plt_dp_err("Request failed with hardware error");
306                         break;
307                 default:
308                         plt_dp_err(
309                                 "Request failed with unknown completion code");
310                 }
311         }
312
313 temp_sess_free:
314         if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
315                 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
316                         sym_session_clear(cn9k_cryptodev_driver_id,
317                                           cop->sym->session);
318                         sz = rte_cryptodev_sym_get_existing_header_session_size(
319                                 cop->sym->session);
320                         memset(cop->sym->session, 0, sz);
321                         rte_mempool_put(qp->sess_mp, cop->sym->session);
322                         cop->sym->session = NULL;
323                 }
324         }
325 }
326
327 uintptr_t
328 cn9k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
329 {
330         struct cpt_inflight_req *infl_req;
331         struct rte_crypto_op *cop;
332         struct cnxk_cpt_qp *qp;
333
334         infl_req = (struct cpt_inflight_req *)(get_work1);
335         cop = infl_req->cop;
336         qp = infl_req->qp;
337
338         cn9k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req);
339
340         if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
341                 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
342
343         rte_mempool_put(qp->ca.req_mp, infl_req);
344         return (uintptr_t)cop;
345 }
346
347 static uint16_t
348 cn9k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
349 {
350         struct cnxk_cpt_qp *qp = qptr;
351         struct pending_queue *pend_q;
352         struct cpt_inflight_req *infl_req;
353         struct cpt_cn9k_res_s *res;
354         struct rte_crypto_op *cop;
355         uint32_t pq_deq_head;
356         int i;
357
358         pend_q = &qp->pend_q;
359
360         nb_ops = RTE_MIN(nb_ops, pend_q->pending_count);
361
362         pq_deq_head = pend_q->deq_head;
363
364         for (i = 0; i < nb_ops; i++) {
365                 infl_req = &pend_q->req_queue[pq_deq_head];
366
367                 res = (struct cpt_cn9k_res_s *)&infl_req->res;
368
369                 if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
370                         if (unlikely(rte_get_timer_cycles() >
371                                      pend_q->time_out)) {
372                                 plt_err("Request timed out");
373                                 pend_q->time_out = rte_get_timer_cycles() +
374                                                    DEFAULT_COMMAND_TIMEOUT *
375                                                            rte_get_timer_hz();
376                         }
377                         break;
378                 }
379
380                 MOD_INC(pq_deq_head, qp->lf.nb_desc);
381
382                 cop = infl_req->cop;
383
384                 ops[i] = cop;
385
386                 cn9k_cpt_dequeue_post_process(qp, cop, infl_req);
387
388                 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
389                         rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
390         }
391
392         pend_q->pending_count -= i;
393         pend_q->deq_head = pq_deq_head;
394
395         return i;
396 }
397 void
398 cn9k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
399 {
400         dev->enqueue_burst = cn9k_cpt_enqueue_burst;
401         dev->dequeue_burst = cn9k_cpt_dequeue_burst;
402
403         rte_mb();
404 }
405
406 static void
407 cn9k_cpt_dev_info_get(struct rte_cryptodev *dev,
408                       struct rte_cryptodev_info *info)
409 {
410         if (info != NULL) {
411                 cnxk_cpt_dev_info_get(dev, info);
412                 info->driver_id = cn9k_cryptodev_driver_id;
413         }
414 }
415
416 struct rte_cryptodev_ops cn9k_cpt_ops = {
417         /* Device control ops */
418         .dev_configure = cnxk_cpt_dev_config,
419         .dev_start = cnxk_cpt_dev_start,
420         .dev_stop = cnxk_cpt_dev_stop,
421         .dev_close = cnxk_cpt_dev_close,
422         .dev_infos_get = cn9k_cpt_dev_info_get,
423
424         .stats_get = NULL,
425         .stats_reset = NULL,
426         .queue_pair_setup = cnxk_cpt_queue_pair_setup,
427         .queue_pair_release = cnxk_cpt_queue_pair_release,
428
429         /* Symmetric crypto ops */
430         .sym_session_get_size = cnxk_cpt_sym_session_get_size,
431         .sym_session_configure = cnxk_cpt_sym_session_configure,
432         .sym_session_clear = cnxk_cpt_sym_session_clear,
433
434         /* Asymmetric crypto ops */
435         .asym_session_get_size = cnxk_ae_session_size_get,
436         .asym_session_configure = cnxk_ae_session_cfg,
437         .asym_session_clear = cnxk_ae_session_clear,
438
439 };