crypto/cnxk: add digest support
[dpdk.git] / drivers / crypto / cnxk / cn10k_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7
8 #include "cn10k_cryptodev.h"
9 #include "cn10k_cryptodev_ops.h"
10 #include "cnxk_cryptodev.h"
11 #include "cnxk_cryptodev_ops.h"
12 #include "cnxk_se.h"
13
14 static inline struct cnxk_se_sess *
15 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
16 {
17         const int driver_id = cn10k_cryptodev_driver_id;
18         struct rte_crypto_sym_op *sym_op = op->sym;
19         struct rte_cryptodev_sym_session *sess;
20         struct cnxk_se_sess *priv;
21         int ret;
22
23         /* Create temporary session */
24         sess = rte_cryptodev_sym_session_create(qp->sess_mp);
25         if (sess == NULL)
26                 return NULL;
27
28         ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
29                                     sess, qp->sess_mp_priv);
30         if (ret)
31                 goto sess_put;
32
33         priv = get_sym_session_private_data(sess, driver_id);
34
35         sym_op->session = sess;
36
37         return priv;
38
39 sess_put:
40         rte_mempool_put(qp->sess_mp, sess);
41         return NULL;
42 }
43
44 static __rte_always_inline int __rte_hot
45 cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
46                   struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
47                   struct cpt_inst_s *inst)
48 {
49         uint64_t cpt_op;
50         int ret = -1;
51
52         cpt_op = sess->cpt_op;
53
54         if (cpt_op & ROC_SE_OP_CIPHER_MASK)
55                 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
56         else
57                 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
58                                          inst);
59
60         return ret;
61 }
62
63 static inline int
64 cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
65                     struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
66 {
67         struct rte_crypto_sym_op *sym_op;
68         struct cnxk_se_sess *sess;
69         struct rte_crypto_op *op;
70         uint64_t w7;
71         int ret;
72
73         op = ops[0];
74
75         inst[0].w0.u64 = 0;
76         inst[0].w2.u64 = 0;
77         inst[0].w3.u64 = 0;
78
79         sym_op = op->sym;
80
81         if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
82                 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
83                         sess = get_sym_session_private_data(
84                                 sym_op->session, cn10k_cryptodev_driver_id);
85                         ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
86                                                 &inst[0]);
87                         if (unlikely(ret))
88                                 return 0;
89                         w7 = sess->cpt_inst_w7;
90                 } else {
91                         sess = cn10k_cpt_sym_temp_sess_create(qp, op);
92                         if (unlikely(sess == NULL)) {
93                                 plt_dp_err("Could not create temp session");
94                                 return 0;
95                         }
96
97                         ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
98                                                 &inst[0]);
99                         if (unlikely(ret)) {
100                                 sym_session_clear(cn10k_cryptodev_driver_id,
101                                                   op->sym->session);
102                                 rte_mempool_put(qp->sess_mp, op->sym->session);
103                                 return 0;
104                         }
105                         w7 = sess->cpt_inst_w7;
106                 }
107         } else {
108                 plt_dp_err("Unsupported op type");
109                 return 0;
110         }
111
112         inst[0].res_addr = (uint64_t)&infl_req->res;
113         infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
114         infl_req->cop = op;
115
116         inst[0].w7.u64 = w7;
117
118         return 1;
119 }
120
121 #define PKTS_PER_LOOP   32
122 #define PKTS_PER_STEORL 16
123
124 static uint16_t
125 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
126 {
127         uint64_t lmt_base, lmt_arg, io_addr;
128         struct cpt_inflight_req *infl_req;
129         uint16_t nb_allowed, count = 0;
130         struct cnxk_cpt_qp *qp = qptr;
131         struct pending_queue *pend_q;
132         struct cpt_inst_s *inst;
133         uint16_t lmt_id;
134         int ret, i;
135
136         pend_q = &qp->pend_q;
137
138         nb_allowed = qp->lf.nb_desc - pend_q->pending_count;
139         nb_ops = RTE_MIN(nb_ops, nb_allowed);
140
141         if (unlikely(nb_ops == 0))
142                 return 0;
143
144         lmt_base = qp->lmtline.lmt_base;
145         io_addr = qp->lmtline.io_addr;
146
147         ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
148         inst = (struct cpt_inst_s *)lmt_base;
149
150 again:
151         for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {
152                 infl_req = &pend_q->req_queue[pend_q->enq_tail];
153                 infl_req->op_flags = 0;
154
155                 ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);
156                 if (unlikely(ret != 1)) {
157                         plt_dp_err("Could not process op: %p", ops + i);
158                         if (i == 0)
159                                 goto update_pending;
160                         break;
161                 }
162
163                 MOD_INC(pend_q->enq_tail, qp->lf.nb_desc);
164         }
165
166         if (i > PKTS_PER_STEORL) {
167                 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 |
168                           (uint64_t)lmt_id;
169                 roc_lmt_submit_steorl(lmt_arg, io_addr);
170                 lmt_arg = ROC_CN10K_CPT_LMT_ARG |
171                           (i - PKTS_PER_STEORL - 1) << 12 |
172                           (uint64_t)(lmt_id + PKTS_PER_STEORL);
173                 roc_lmt_submit_steorl(lmt_arg, io_addr);
174         } else {
175                 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 |
176                           (uint64_t)lmt_id;
177                 roc_lmt_submit_steorl(lmt_arg, io_addr);
178         }
179
180         rte_io_wmb();
181
182         if (nb_ops - i > 0 && i == PKTS_PER_LOOP) {
183                 nb_ops -= i;
184                 ops += i;
185                 count += i;
186                 goto again;
187         }
188
189 update_pending:
190         pend_q->pending_count += count + i;
191
192         pend_q->time_out = rte_get_timer_cycles() +
193                            DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
194
195         return count + i;
196 }
197
198 static inline void
199 cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
200                                struct rte_crypto_op *cop,
201                                struct cpt_inflight_req *infl_req)
202 {
203         struct cpt_cn10k_res_s *res = (struct cpt_cn10k_res_s *)&infl_req->res;
204         unsigned int sz;
205
206         if (likely(res->compcode == CPT_COMP_GOOD ||
207                    res->compcode == CPT_COMP_WARN)) {
208                 if (unlikely(res->uc_compcode)) {
209                         if (res->uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
210                                 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
211                         else
212                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
213
214                         plt_dp_info("Request failed with microcode error");
215                         plt_dp_info("MC completion code 0x%x",
216                                     res->uc_compcode);
217                         goto temp_sess_free;
218                 }
219
220                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
221                 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
222
223                         /* Verify authentication data if required */
224                         if (unlikely(infl_req->op_flags &
225                                      CPT_OP_FLAGS_AUTH_VERIFY)) {
226                                 uintptr_t *rsp = infl_req->mdata;
227                                 compl_auth_verify(cop, (uint8_t *)rsp[0],
228                                                   rsp[1]);
229                         }
230                 }
231         } else {
232                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
233                 plt_dp_info("HW completion code 0x%x", res->compcode);
234
235                 switch (res->compcode) {
236                 case CPT_COMP_INSTERR:
237                         plt_dp_err("Request failed with instruction error");
238                         break;
239                 case CPT_COMP_FAULT:
240                         plt_dp_err("Request failed with DMA fault");
241                         break;
242                 case CPT_COMP_HWERR:
243                         plt_dp_err("Request failed with hardware error");
244                         break;
245                 default:
246                         plt_dp_err(
247                                 "Request failed with unknown completion code");
248                 }
249         }
250
251 temp_sess_free:
252         if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
253                 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
254                         sym_session_clear(cn10k_cryptodev_driver_id,
255                                           cop->sym->session);
256                         sz = rte_cryptodev_sym_get_existing_header_session_size(
257                                 cop->sym->session);
258                         memset(cop->sym->session, 0, sz);
259                         rte_mempool_put(qp->sess_mp, cop->sym->session);
260                         cop->sym->session = NULL;
261                 }
262         }
263 }
264
265 static uint16_t
266 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
267 {
268         struct cpt_inflight_req *infl_req;
269         struct cnxk_cpt_qp *qp = qptr;
270         struct pending_queue *pend_q;
271         struct cpt_cn10k_res_s *res;
272         struct rte_crypto_op *cop;
273         int i, nb_pending;
274
275         pend_q = &qp->pend_q;
276
277         nb_pending = pend_q->pending_count;
278
279         if (nb_ops > nb_pending)
280                 nb_ops = nb_pending;
281
282         for (i = 0; i < nb_ops; i++) {
283                 infl_req = &pend_q->req_queue[pend_q->deq_head];
284
285                 res = (struct cpt_cn10k_res_s *)&infl_req->res;
286
287                 if (unlikely(res->compcode == CPT_COMP_NOT_DONE)) {
288                         if (unlikely(rte_get_timer_cycles() >
289                                      pend_q->time_out)) {
290                                 plt_err("Request timed out");
291                                 pend_q->time_out = rte_get_timer_cycles() +
292                                                    DEFAULT_COMMAND_TIMEOUT *
293                                                            rte_get_timer_hz();
294                         }
295                         break;
296                 }
297
298                 MOD_INC(pend_q->deq_head, qp->lf.nb_desc);
299
300                 cop = infl_req->cop;
301
302                 ops[i] = cop;
303
304                 cn10k_cpt_dequeue_post_process(qp, cop, infl_req);
305
306                 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
307                         rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
308         }
309
310         pend_q->pending_count -= i;
311
312         return i;
313 }
314
315 void
316 cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
317 {
318         dev->enqueue_burst = cn10k_cpt_enqueue_burst;
319         dev->dequeue_burst = cn10k_cpt_dequeue_burst;
320
321         rte_mb();
322 }
323
324 static void
325 cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,
326                        struct rte_cryptodev_info *info)
327 {
328         if (info != NULL) {
329                 cnxk_cpt_dev_info_get(dev, info);
330                 info->driver_id = cn10k_cryptodev_driver_id;
331         }
332 }
333
334 struct rte_cryptodev_ops cn10k_cpt_ops = {
335         /* Device control ops */
336         .dev_configure = cnxk_cpt_dev_config,
337         .dev_start = cnxk_cpt_dev_start,
338         .dev_stop = cnxk_cpt_dev_stop,
339         .dev_close = cnxk_cpt_dev_close,
340         .dev_infos_get = cn10k_cpt_dev_info_get,
341
342         .stats_get = NULL,
343         .stats_reset = NULL,
344         .queue_pair_setup = cnxk_cpt_queue_pair_setup,
345         .queue_pair_release = cnxk_cpt_queue_pair_release,
346
347         /* Symmetric crypto ops */
348         .sym_session_get_size = cnxk_cpt_sym_session_get_size,
349         .sym_session_configure = cnxk_cpt_sym_session_configure,
350         .sym_session_clear = cnxk_cpt_sym_session_clear,
351
352         /* Asymmetric crypto ops */
353         .asym_session_get_size = NULL,
354         .asym_session_configure = NULL,
355         .asym_session_clear = NULL,
356
357 };