crypto/cnxk: add symmetric capabilities
[dpdk.git] / drivers / crypto / cnxk / cnxk_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <rte_cryptodev_pmd.h>
7 #include <rte_errno.h>
8
9 #include "roc_cpt.h"
10
11 #include "cnxk_cryptodev.h"
12 #include "cnxk_cryptodev_ops.h"
13 #include "cnxk_cryptodev_capabilities.h"
14 #include "cnxk_se.h"
15
16 static int
17 cnxk_cpt_get_mlen(void)
18 {
19         uint32_t len;
20
21         /* For MAC */
22         len = 2 * sizeof(uint64_t);
23         len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
24
25         len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
26         len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
27                                (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
28                                 2) * ROC_SE_SG_ENTRY_SIZE),
29                               8);
30
31         return len;
32 }
33
34 int
35 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
36                     struct rte_cryptodev_config *conf)
37 {
38         struct cnxk_cpt_vf *vf = dev->data->dev_private;
39         struct roc_cpt *roc_cpt = &vf->cpt;
40         uint16_t nb_lf_avail, nb_lf;
41         int ret;
42
43         dev->feature_flags &= ~conf->ff_disable;
44
45         nb_lf_avail = roc_cpt->nb_lf_avail;
46         nb_lf = conf->nb_queue_pairs;
47
48         if (nb_lf > nb_lf_avail)
49                 return -ENOTSUP;
50
51         ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
52         if (ret) {
53                 plt_err("Could not configure device");
54                 return ret;
55         }
56
57         return 0;
58 }
59
60 int
61 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
62 {
63         RTE_SET_USED(dev);
64
65         return 0;
66 }
67
68 void
69 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
70 {
71         RTE_SET_USED(dev);
72 }
73
74 int
75 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
76 {
77         struct cnxk_cpt_vf *vf = dev->data->dev_private;
78         uint16_t i;
79         int ret;
80
81         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
82                 ret = cnxk_cpt_queue_pair_release(dev, i);
83                 if (ret < 0) {
84                         plt_err("Could not release queue pair %u", i);
85                         return ret;
86                 }
87         }
88
89         roc_cpt_dev_clear(&vf->cpt);
90
91         return 0;
92 }
93
94 void
95 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
96                       struct rte_cryptodev_info *info)
97 {
98         struct cnxk_cpt_vf *vf = dev->data->dev_private;
99         struct roc_cpt *roc_cpt = &vf->cpt;
100
101         info->max_nb_queue_pairs = roc_cpt->nb_lf_avail;
102         info->feature_flags = dev->feature_flags;
103         info->capabilities = cnxk_crypto_capabilities_get(vf);
104         info->sym.max_nb_sessions = 0;
105         info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
106         info->min_mbuf_tailroom_req = 0;
107 }
108
109 static void
110 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
111 {
112         snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
113 }
114
115 static int
116 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
117                                 struct cnxk_cpt_qp *qp, uint8_t qp_id,
118                                 uint32_t nb_elements)
119 {
120         char mempool_name[RTE_MEMPOOL_NAMESIZE];
121         struct cpt_qp_meta_info *meta_info;
122         struct rte_mempool *pool;
123         uint32_t cache_sz;
124         int mlen = 8;
125
126         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
127                 /* Get meta len */
128                 mlen = cnxk_cpt_get_mlen();
129         }
130
131         cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
132
133         /* Allocate mempool */
134
135         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
136                  dev->data->dev_id, qp_id);
137
138         pool = rte_mempool_create(mempool_name, nb_elements, mlen, cache_sz, 0,
139                                   NULL, NULL, NULL, NULL, rte_socket_id(), 0);
140
141         if (pool == NULL) {
142                 plt_err("Could not create mempool for metabuf");
143                 return rte_errno;
144         }
145
146         meta_info = &qp->meta_info;
147
148         meta_info->pool = pool;
149         meta_info->mlen = mlen;
150
151         return 0;
152 }
153
154 static void
155 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
156 {
157         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
158
159         rte_mempool_free(meta_info->pool);
160
161         meta_info->pool = NULL;
162         meta_info->mlen = 0;
163 }
164
165 static struct cnxk_cpt_qp *
166 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
167                    uint32_t iq_len)
168 {
169         const struct rte_memzone *pq_mem;
170         char name[RTE_MEMZONE_NAMESIZE];
171         struct cnxk_cpt_qp *qp;
172         uint32_t len;
173         uint8_t *va;
174         int ret;
175
176         /* Allocate queue pair */
177         qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
178                                 ROC_ALIGN, 0);
179         if (qp == NULL) {
180                 plt_err("Could not allocate queue pair");
181                 return NULL;
182         }
183
184         /* For pending queue */
185         len = iq_len * sizeof(struct cpt_inflight_req);
186
187         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
188                             qp_id);
189
190         pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
191                                              RTE_MEMZONE_SIZE_HINT_ONLY |
192                                                      RTE_MEMZONE_256MB,
193                                              RTE_CACHE_LINE_SIZE);
194         if (pq_mem == NULL) {
195                 plt_err("Could not allocate reserved memzone");
196                 goto qp_free;
197         }
198
199         va = pq_mem->addr;
200
201         memset(va, 0, len);
202
203         ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
204         if (ret) {
205                 plt_err("Could not create mempool for metabuf");
206                 goto pq_mem_free;
207         }
208
209         /* Initialize pending queue */
210         qp->pend_q.req_queue = pq_mem->addr;
211         qp->pend_q.enq_tail = 0;
212         qp->pend_q.deq_head = 0;
213         qp->pend_q.pending_count = 0;
214
215         return qp;
216
217 pq_mem_free:
218         rte_memzone_free(pq_mem);
219 qp_free:
220         rte_free(qp);
221         return NULL;
222 }
223
224 static int
225 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
226 {
227         const struct rte_memzone *pq_mem;
228         char name[RTE_MEMZONE_NAMESIZE];
229         int ret;
230
231         cnxk_cpt_metabuf_mempool_destroy(qp);
232
233         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
234                             qp->lf.lf_id);
235
236         pq_mem = rte_memzone_lookup(name);
237
238         ret = rte_memzone_free(pq_mem);
239         if (ret)
240                 return ret;
241
242         rte_free(qp);
243
244         return 0;
245 }
246
247 int
248 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
249 {
250         struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
251         struct cnxk_cpt_vf *vf = dev->data->dev_private;
252         struct roc_cpt *roc_cpt = &vf->cpt;
253         struct roc_cpt_lf *lf;
254         int ret;
255
256         if (qp == NULL)
257                 return -EINVAL;
258
259         lf = roc_cpt->lf[qp_id];
260         if (lf == NULL)
261                 return -ENOTSUP;
262
263         roc_cpt_lf_fini(lf);
264
265         ret = cnxk_cpt_qp_destroy(dev, qp);
266         if (ret) {
267                 plt_err("Could not destroy queue pair %d", qp_id);
268                 return ret;
269         }
270
271         roc_cpt->lf[qp_id] = NULL;
272         dev->data->queue_pairs[qp_id] = NULL;
273
274         return 0;
275 }
276
277 int
278 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
279                           const struct rte_cryptodev_qp_conf *conf,
280                           int socket_id __rte_unused)
281 {
282         struct cnxk_cpt_vf *vf = dev->data->dev_private;
283         struct roc_cpt *roc_cpt = &vf->cpt;
284         struct rte_pci_device *pci_dev;
285         struct cnxk_cpt_qp *qp;
286         int ret;
287
288         if (dev->data->queue_pairs[qp_id] != NULL)
289                 cnxk_cpt_queue_pair_release(dev, qp_id);
290
291         pci_dev = RTE_DEV_TO_PCI(dev->device);
292
293         if (pci_dev->mem_resource[2].addr == NULL) {
294                 plt_err("Invalid PCI mem address");
295                 return -EIO;
296         }
297
298         qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
299         if (qp == NULL) {
300                 plt_err("Could not create queue pair %d", qp_id);
301                 return -ENOMEM;
302         }
303
304         qp->lf.lf_id = qp_id;
305         qp->lf.nb_desc = conf->nb_descriptors;
306
307         ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
308         if (ret < 0) {
309                 plt_err("Could not initialize queue pair %d", qp_id);
310                 ret = -EINVAL;
311                 goto exit;
312         }
313
314         roc_cpt->lf[qp_id] = &qp->lf;
315
316         ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
317         if (ret < 0) {
318                 roc_cpt->lf[qp_id] = NULL;
319                 plt_err("Could not init lmtline for queue pair %d", qp_id);
320                 goto exit;
321         }
322
323         qp->sess_mp = conf->mp_session;
324         qp->sess_mp_priv = conf->mp_session_private;
325         dev->data->queue_pairs[qp_id] = qp;
326
327         return 0;
328
329 exit:
330         cnxk_cpt_qp_destroy(dev, qp);
331         return ret;
332 }
333
334 unsigned int
335 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
336 {
337         return sizeof(struct cnxk_se_sess);
338 }
339
340 static int
341 sym_xform_verify(struct rte_crypto_sym_xform *xform)
342 {
343         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
344             xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
345             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
346                 return -ENOTSUP;
347
348         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
349                 return CNXK_CPT_CIPHER;
350
351         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
352                 return CNXK_CPT_AUTH;
353
354         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
355                 return CNXK_CPT_AEAD;
356
357         if (xform->next == NULL)
358                 return -EIO;
359
360         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
361             xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
362             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
363             xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
364                 return -ENOTSUP;
365
366         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
367             xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
368             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
369             xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
370                 return -ENOTSUP;
371
372         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
373             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
374             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
375             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
376                 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
377
378         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
379             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
380             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
381             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
382                 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
383
384         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
385             xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
386             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
387             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
388                 switch (xform->auth.algo) {
389                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
390                         switch (xform->next->cipher.algo) {
391                         case RTE_CRYPTO_CIPHER_AES_CBC:
392                                 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
393                         default:
394                                 return -ENOTSUP;
395                         }
396                 default:
397                         return -ENOTSUP;
398                 }
399         }
400
401         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
402             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
403             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
404             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
405                 switch (xform->cipher.algo) {
406                 case RTE_CRYPTO_CIPHER_AES_CBC:
407                         switch (xform->next->auth.algo) {
408                         case RTE_CRYPTO_AUTH_SHA1_HMAC:
409                                 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
410                         default:
411                                 return -ENOTSUP;
412                         }
413                 default:
414                         return -ENOTSUP;
415                 }
416         }
417
418         return -ENOTSUP;
419 }
420
421 static uint64_t
422 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
423 {
424         union cpt_inst_w7 inst_w7;
425
426         inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
427
428         /* Set the engine group */
429         if (sess->zsk_flag || sess->chacha_poly)
430                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
431         else
432                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
433
434         return inst_w7.u64;
435 }
436
437 int
438 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
439                       struct rte_crypto_sym_xform *xform,
440                       struct rte_cryptodev_sym_session *sess,
441                       struct rte_mempool *pool)
442 {
443         struct cnxk_se_sess *sess_priv;
444         void *priv;
445         int ret;
446
447         ret = sym_xform_verify(xform);
448         if (unlikely(ret < 0))
449                 return ret;
450
451         if (unlikely(rte_mempool_get(pool, &priv))) {
452                 plt_dp_err("Could not allocate session private data");
453                 return -ENOMEM;
454         }
455
456         memset(priv, 0, sizeof(struct cnxk_se_sess));
457
458         sess_priv = priv;
459
460         switch (ret) {
461         case CNXK_CPT_CIPHER:
462                 ret = fill_sess_cipher(xform, sess_priv);
463                 break;
464         case CNXK_CPT_AUTH:
465                 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
466                         ret = fill_sess_gmac(xform, sess_priv);
467                 else
468                         ret = fill_sess_auth(xform, sess_priv);
469                 break;
470         case CNXK_CPT_AEAD:
471                 ret = fill_sess_aead(xform, sess_priv);
472                 break;
473         case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
474         case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
475                 ret = fill_sess_cipher(xform, sess_priv);
476                 if (ret < 0)
477                         break;
478                 ret = fill_sess_auth(xform->next, sess_priv);
479                 break;
480         case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
481         case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
482                 ret = fill_sess_auth(xform, sess_priv);
483                 if (ret < 0)
484                         break;
485                 ret = fill_sess_cipher(xform->next, sess_priv);
486                 break;
487         default:
488                 ret = -1;
489         }
490
491         if (ret)
492                 goto priv_put;
493
494         if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
495             cpt_mac_len_verify(&xform->auth)) {
496                 plt_dp_err("MAC length is not supported");
497                 ret = -ENOTSUP;
498                 goto priv_put;
499         }
500
501         sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
502
503         set_sym_session_private_data(sess, driver_id, sess_priv);
504
505         return 0;
506
507 priv_put:
508         rte_mempool_put(pool, priv);
509
510         return -ENOTSUP;
511 }
512
513 int
514 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
515                                struct rte_crypto_sym_xform *xform,
516                                struct rte_cryptodev_sym_session *sess,
517                                struct rte_mempool *pool)
518 {
519         struct cnxk_cpt_vf *vf = dev->data->dev_private;
520         struct roc_cpt *roc_cpt = &vf->cpt;
521         uint8_t driver_id;
522
523         driver_id = dev->driver_id;
524
525         return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
526 }
527
528 void
529 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
530 {
531         void *priv = get_sym_session_private_data(sess, driver_id);
532         struct rte_mempool *pool;
533
534         if (priv == NULL)
535                 return;
536
537         memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
538
539         pool = rte_mempool_from_obj(priv);
540
541         set_sym_session_private_data(sess, driver_id, NULL);
542
543         rte_mempool_put(pool, priv);
544 }
545
546 void
547 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
548                            struct rte_cryptodev_sym_session *sess)
549 {
550         return sym_session_clear(dev->driver_id, sess);
551 }