crypto/cnxk: add cipher operation in session
[dpdk.git] / drivers / crypto / cnxk / cnxk_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_errno.h>
8
9 #include "roc_cpt.h"
10
11 #include "cnxk_cryptodev.h"
12 #include "cnxk_cryptodev_ops.h"
13 #include "cnxk_se.h"
14
15 static int
16 cnxk_cpt_get_mlen(void)
17 {
18         uint32_t len;
19
20         /* For MAC */
21         len = 2 * sizeof(uint64_t);
22         len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
23
24         len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
25         len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
26                                (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
27                                 2) * ROC_SE_SG_ENTRY_SIZE),
28                               8);
29
30         return len;
31 }
32
33 int
34 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
35                     struct rte_cryptodev_config *conf)
36 {
37         struct cnxk_cpt_vf *vf = dev->data->dev_private;
38         struct roc_cpt *roc_cpt = &vf->cpt;
39         uint16_t nb_lf_avail, nb_lf;
40         int ret;
41
42         dev->feature_flags &= ~conf->ff_disable;
43
44         nb_lf_avail = roc_cpt->nb_lf_avail;
45         nb_lf = conf->nb_queue_pairs;
46
47         if (nb_lf > nb_lf_avail)
48                 return -ENOTSUP;
49
50         ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
51         if (ret) {
52                 plt_err("Could not configure device");
53                 return ret;
54         }
55
56         return 0;
57 }
58
59 int
60 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
61 {
62         RTE_SET_USED(dev);
63
64         return 0;
65 }
66
67 void
68 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
69 {
70         RTE_SET_USED(dev);
71 }
72
73 int
74 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
75 {
76         struct cnxk_cpt_vf *vf = dev->data->dev_private;
77         uint16_t i;
78         int ret;
79
80         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
81                 ret = cnxk_cpt_queue_pair_release(dev, i);
82                 if (ret < 0) {
83                         plt_err("Could not release queue pair %u", i);
84                         return ret;
85                 }
86         }
87
88         roc_cpt_dev_clear(&vf->cpt);
89
90         return 0;
91 }
92
93 void
94 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
95                       struct rte_cryptodev_info *info)
96 {
97         struct cnxk_cpt_vf *vf = dev->data->dev_private;
98         struct roc_cpt *roc_cpt = &vf->cpt;
99
100         info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
101         info->feature_flags = dev->feature_flags;
102         info->capabilities = NULL;
103         info->sym.max_nb_sessions = 0;
104         info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
105         info->min_mbuf_tailroom_req = 0;
106 }
107
108 static void
109 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
110 {
111         snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
112 }
113
114 static int
115 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
116                                 struct cnxk_cpt_qp *qp, uint8_t qp_id,
117                                 uint32_t nb_elements)
118 {
119         char mempool_name[RTE_MEMPOOL_NAMESIZE];
120         struct cpt_qp_meta_info *meta_info;
121         struct rte_mempool *pool;
122         uint32_t cache_sz;
123         int mlen = 8;
124
125         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
126                 /* Get meta len */
127                 mlen = cnxk_cpt_get_mlen();
128         }
129
130         cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
131
132         /* Allocate mempool */
133
134         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
135                  dev->data->dev_id, qp_id);
136
137         pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
138                                   NULL, NULL, NULL, NULL, rte_socket_id(), 0);
139
140         if (pool == NULL) {
141                 plt_err("Could not create mempool for metabuf");
142                 return rte_errno;
143         }
144
145         meta_info = &qp->meta_info;
146
147         meta_info->pool = pool;
148         meta_info->mlen = mlen;
149
150         return 0;
151 }
152
153 static void
154 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
155 {
156         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
157
158         rte_mempool_free(meta_info->pool);
159
160         meta_info->pool = NULL;
161         meta_info->mlen = 0;
162 }
163
164 static struct cnxk_cpt_qp *
165 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
166                    uint32_t iq_len)
167 {
168         const struct rte_memzone *pq_mem;
169         char name[RTE_MEMZONE_NAMESIZE];
170         struct cnxk_cpt_qp *qp;
171         uint32_t len;
172         uint8_t *va;
173         int ret;
174
175         /* Allocate queue pair */
176         qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
177                                 ROC_ALIGN, 0);
178         if (qp == NULL) {
179                 plt_err("Could not allocate queue pair");
180                 return NULL;
181         }
182
183         /* For pending queue */
184         len = iq_len * sizeof(struct cpt_inflight_req);
185
186         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
187                             qp_id);
188
189         pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
190                                              RTE_MEMZONE_SIZE_HINT_ONLY |
191                                                      RTE_MEMZONE_256MB,
192                                              RTE_CACHE_LINE_SIZE);
193         if (pq_mem == NULL) {
194                 plt_err("Could not allocate reserved memzone");
195                 goto qp_free;
196         }
197
198         va = pq_mem->addr;
199
200         memset(va, 0, len);
201
202         ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
203         if (ret) {
204                 plt_err("Could not create mempool for metabuf");
205                 goto pq_mem_free;
206         }
207
208         /* Initialize pending queue */
209         qp->pend_q.req_queue = pq_mem->addr;
210         qp->pend_q.enq_tail = 0;
211         qp->pend_q.deq_head = 0;
212         qp->pend_q.pending_count = 0;
213
214         return qp;
215
216 pq_mem_free:
217         rte_memzone_free(pq_mem);
218 qp_free:
219         rte_free(qp);
220         return NULL;
221 }
222
223 static int
224 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
225 {
226         const struct rte_memzone *pq_mem;
227         char name[RTE_MEMZONE_NAMESIZE];
228         int ret;
229
230         cnxk_cpt_metabuf_mempool_destroy(qp);
231
232         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
233                             qp->lf.lf_id);
234
235         pq_mem = rte_memzone_lookup(name);
236
237         ret = rte_memzone_free(pq_mem);
238         if (ret)
239                 return ret;
240
241         rte_free(qp);
242
243         return 0;
244 }
245
246 int
247 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
248 {
249         struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
250         struct cnxk_cpt_vf *vf = dev->data->dev_private;
251         struct roc_cpt *roc_cpt = &vf->cpt;
252         struct roc_cpt_lf *lf;
253         int ret;
254
255         if (qp == NULL)
256                 return -EINVAL;
257
258         lf = roc_cpt->lf[qp_id];
259         if (lf == NULL)
260                 return -ENOTSUP;
261
262         roc_cpt_lf_fini(lf);
263
264         ret = cnxk_cpt_qp_destroy(dev, qp);
265         if (ret) {
266                 plt_err("Could not destroy queue pair %d", qp_id);
267                 return ret;
268         }
269
270         roc_cpt->lf[qp_id] = NULL;
271         dev->data->queue_pairs[qp_id] = NULL;
272
273         return 0;
274 }
275
276 int
277 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
278                           const struct rte_cryptodev_qp_conf *conf,
279                           int socket_id __rte_unused)
280 {
281         struct cnxk_cpt_vf *vf = dev->data->dev_private;
282         struct roc_cpt *roc_cpt = &vf->cpt;
283         struct rte_pci_device *pci_dev;
284         struct cnxk_cpt_qp *qp;
285         int ret;
286
287         if (dev->data->queue_pairs[qp_id] != NULL)
288                 cnxk_cpt_queue_pair_release(dev, qp_id);
289
290         pci_dev = RTE_DEV_TO_PCI(dev->device);
291
292         if (pci_dev->mem_resource[2].addr == NULL) {
293                 plt_err("Invalid PCI mem address");
294                 return -EIO;
295         }
296
297         qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
298         if (qp == NULL) {
299                 plt_err("Could not create queue pair %d", qp_id);
300                 return -ENOMEM;
301         }
302
303         qp->lf.lf_id = qp_id;
304         qp->lf.nb_desc = conf->nb_descriptors;
305
306         ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
307         if (ret < 0) {
308                 plt_err("Could not initialize queue pair %d", qp_id);
309                 ret = -EINVAL;
310                 goto exit;
311         }
312
313         roc_cpt->lf[qp_id] = &qp->lf;
314
315         ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
316         if (ret < 0) {
317                 roc_cpt->lf[qp_id] = NULL;
318                 plt_err("Could not init lmtline for queue pair %d", qp_id);
319                 goto exit;
320         }
321
322         qp->sess_mp = conf->mp_session;
323         qp->sess_mp_priv = conf->mp_session_private;
324         dev->data->queue_pairs[qp_id] = qp;
325
326         return 0;
327
328 exit:
329         cnxk_cpt_qp_destroy(dev, qp);
330         return ret;
331 }
332
333 unsigned int
334 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
335 {
336         return sizeof(struct cnxk_se_sess);
337 }
338
339 static int
340 sym_xform_verify(struct rte_crypto_sym_xform *xform)
341 {
342         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
343             xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
344             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
345                 return -ENOTSUP;
346
347         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
348                 return CNXK_CPT_CIPHER;
349
350         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
351                 return CNXK_CPT_AUTH;
352
353         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
354                 return CNXK_CPT_AEAD;
355
356         if (xform->next == NULL)
357                 return -EIO;
358
359         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
360             xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
361             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
362             xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
363                 return -ENOTSUP;
364
365         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
366             xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
367             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
368             xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
369                 return -ENOTSUP;
370
371         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
372             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
373             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
374             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
375                 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
376
377         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
378             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
379             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
380             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
381                 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
382
383         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
384             xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
385             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
386             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
387                 switch (xform->auth.algo) {
388                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
389                         switch (xform->next->cipher.algo) {
390                         case RTE_CRYPTO_CIPHER_AES_CBC:
391                                 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
392                         default:
393                                 return -ENOTSUP;
394                         }
395                 default:
396                         return -ENOTSUP;
397                 }
398         }
399
400         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
401             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
402             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
403             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
404                 switch (xform->cipher.algo) {
405                 case RTE_CRYPTO_CIPHER_AES_CBC:
406                         switch (xform->next->auth.algo) {
407                         case RTE_CRYPTO_AUTH_SHA1_HMAC:
408                                 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
409                         default:
410                                 return -ENOTSUP;
411                         }
412                 default:
413                         return -ENOTSUP;
414                 }
415         }
416
417         return -ENOTSUP;
418 }
419
420 static uint64_t
421 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
422 {
423         union cpt_inst_w7 inst_w7;
424
425         inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
426
427         /* Set the engine group */
428         if (sess->zsk_flag || sess->chacha_poly)
429                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
430         else
431                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
432
433         return inst_w7.u64;
434 }
435
436 int
437 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
438                       struct rte_crypto_sym_xform *xform,
439                       struct rte_cryptodev_sym_session *sess,
440                       struct rte_mempool *pool)
441 {
442         struct cnxk_se_sess *sess_priv;
443         void *priv;
444         int ret;
445
446         ret = sym_xform_verify(xform);
447         if (unlikely(ret < 0))
448                 return ret;
449
450         if (unlikely(rte_mempool_get(pool, &priv))) {
451                 plt_dp_err("Could not allocate session private data");
452                 return -ENOMEM;
453         }
454
455         memset(priv, 0, sizeof(struct cnxk_se_sess));
456
457         sess_priv = priv;
458
459         switch (ret) {
460         case CNXK_CPT_CIPHER:
461                 ret = fill_sess_cipher(xform, sess_priv);
462                 break;
463         default:
464                 ret = -1;
465         }
466
467         if (ret)
468                 goto priv_put;
469
470         sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
471
472         set_sym_session_private_data(sess, driver_id, sess_priv);
473
474         return 0;
475
476 priv_put:
477         rte_mempool_put(pool, priv);
478
479         return -ENOTSUP;
480 }
481
482 int
483 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
484                                struct rte_crypto_sym_xform *xform,
485                                struct rte_cryptodev_sym_session *sess,
486                                struct rte_mempool *pool)
487 {
488         struct cnxk_cpt_vf *vf = dev->data->dev_private;
489         struct roc_cpt *roc_cpt = &vf->cpt;
490         uint8_t driver_id;
491
492         driver_id = dev->driver_id;
493
494         return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
495 }
496
497 void
498 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
499 {
500         void *priv = get_sym_session_private_data(sess, driver_id);
501         struct rte_mempool *pool;
502
503         if (priv == NULL)
504                 return;
505
506         memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
507
508         pool = rte_mempool_from_obj(priv);
509
510         set_sym_session_private_data(sess, driver_id, NULL);
511
512         rte_mempool_put(pool, priv);
513 }
514
515 void
516 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
517                            struct rte_cryptodev_sym_session *sess)
518 {
519         return sym_session_clear(dev->driver_id, sess);
520 }