crypto/cnxk: enable allocated queues only
[dpdk.git] / drivers / crypto / cnxk / cnxk_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_errno.h>
8
9 #include "roc_cpt.h"
10
11 #include "cnxk_ae.h"
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_cryptodev_capabilities.h"
15 #include "cnxk_se.h"
16
17 #define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
18 #define CNXK_CPT_MAX_ASYM_OP_MOD_LEN    1024
19
20 static int
21 cnxk_cpt_get_mlen(void)
22 {
23         uint32_t len;
24
25         /* For MAC */
26         len = 2 * sizeof(uint64_t);
27         len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
28
29         len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
30         len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
31                                (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
32                                 2) * ROC_SE_SG_ENTRY_SIZE),
33                               8);
34
35         return len;
36 }
37
38 static int
39 cnxk_cpt_asym_get_mlen(void)
40 {
41         uint32_t len;
42
43         /* To hold RPTR */
44         len = sizeof(uint64_t);
45
46         /* Get meta len for asymmetric operations */
47         len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
48
49         return len;
50 }
51
52 int
53 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
54                     struct rte_cryptodev_config *conf)
55 {
56         struct cnxk_cpt_vf *vf = dev->data->dev_private;
57         struct roc_cpt *roc_cpt = &vf->cpt;
58         uint16_t nb_lf_avail, nb_lf;
59         int ret;
60
61         dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
62
63         nb_lf_avail = roc_cpt->nb_lf_avail;
64         nb_lf = conf->nb_queue_pairs;
65
66         if (nb_lf > nb_lf_avail)
67                 return -ENOTSUP;
68
69         ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
70         if (ret) {
71                 plt_err("Could not configure device");
72                 return ret;
73         }
74
75         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76                 /* Initialize shared FPM table */
77                 ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
78                 if (ret) {
79                         plt_err("Could not get FPM table");
80                         return ret;
81                 }
82
83                 /* Init EC grp table */
84                 ret = roc_ae_ec_grp_get(vf->ec_grp);
85                 if (ret) {
86                         plt_err("Could not get EC grp table");
87                         roc_ae_fpm_put();
88                         return ret;
89                 }
90         }
91
92         return 0;
93 }
94
95 int
96 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
97 {
98         struct cnxk_cpt_vf *vf = dev->data->dev_private;
99         struct roc_cpt *roc_cpt = &vf->cpt;
100         uint16_t nb_lf = roc_cpt->nb_lf;
101         uint16_t qp_id;
102
103         for (qp_id = 0; qp_id < nb_lf; qp_id++) {
104                 /* Application may not setup all queue pair */
105                 if (roc_cpt->lf[qp_id] == NULL)
106                         continue;
107
108                 roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
109         }
110
111         return 0;
112 }
113
114 void
115 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
116 {
117         struct cnxk_cpt_vf *vf = dev->data->dev_private;
118         struct roc_cpt *roc_cpt = &vf->cpt;
119         uint16_t nb_lf = roc_cpt->nb_lf;
120         uint16_t qp_id;
121
122         for (qp_id = 0; qp_id < nb_lf; qp_id++) {
123                 if (roc_cpt->lf[qp_id] == NULL)
124                         continue;
125
126                 roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
127         }
128 }
129
130 int
131 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
132 {
133         struct cnxk_cpt_vf *vf = dev->data->dev_private;
134         uint16_t i;
135         int ret;
136
137         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
138                 ret = cnxk_cpt_queue_pair_release(dev, i);
139                 if (ret < 0) {
140                         plt_err("Could not release queue pair %u", i);
141                         return ret;
142                 }
143         }
144
145         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
146                 roc_ae_fpm_put();
147                 roc_ae_ec_grp_put();
148         }
149
150         roc_cpt_dev_clear(&vf->cpt);
151
152         return 0;
153 }
154
155 void
156 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
157                       struct rte_cryptodev_info *info)
158 {
159         struct cnxk_cpt_vf *vf = dev->data->dev_private;
160         struct roc_cpt *roc_cpt = &vf->cpt;
161
162         info->max_nb_queue_pairs =
163                 RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
164         plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
165
166         info->feature_flags = cnxk_cpt_default_ff_get();
167         info->capabilities = cnxk_crypto_capabilities_get(vf);
168         info->sym.max_nb_sessions = 0;
169         info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
170         info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
171 }
172
173 static void
174 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
175 {
176         snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
177 }
178
179 static int
180 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
181                                 struct cnxk_cpt_qp *qp, uint8_t qp_id,
182                                 uint32_t nb_elements)
183 {
184         char mempool_name[RTE_MEMPOOL_NAMESIZE];
185         struct cpt_qp_meta_info *meta_info;
186         int lcore_cnt = rte_lcore_count();
187         struct rte_mempool *pool;
188         int mb_pool_sz, mlen = 8;
189         uint32_t cache_sz;
190
191         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
192                 /* Get meta len */
193                 mlen = cnxk_cpt_get_mlen();
194         }
195
196         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
197
198                 /* Get meta len required for asymmetric operations */
199                 mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
200         }
201
202         mb_pool_sz = nb_elements;
203         cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
204
205         /* For poll mode, core that enqueues and core that dequeues can be
206          * different. For event mode, all cores are allowed to use same crypto
207          * queue pair.
208          */
209
210         mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
211
212         /* Allocate mempool */
213
214         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
215                  dev->data->dev_id, qp_id);
216
217         pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
218                                   NULL, NULL, NULL, NULL, rte_socket_id(), 0);
219
220         if (pool == NULL) {
221                 plt_err("Could not create mempool for metabuf");
222                 return rte_errno;
223         }
224
225         meta_info = &qp->meta_info;
226
227         meta_info->pool = pool;
228         meta_info->mlen = mlen;
229
230         return 0;
231 }
232
233 static void
234 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
235 {
236         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
237
238         rte_mempool_free(meta_info->pool);
239
240         meta_info->pool = NULL;
241         meta_info->mlen = 0;
242 }
243
244 static struct cnxk_cpt_qp *
245 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
246                    uint32_t iq_len)
247 {
248         const struct rte_memzone *pq_mem;
249         char name[RTE_MEMZONE_NAMESIZE];
250         struct cnxk_cpt_qp *qp;
251         uint32_t len;
252         uint8_t *va;
253         int ret;
254
255         /* Allocate queue pair */
256         qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
257                                 ROC_ALIGN, 0);
258         if (qp == NULL) {
259                 plt_err("Could not allocate queue pair");
260                 return NULL;
261         }
262
263         /* For pending queue */
264         len = iq_len * sizeof(struct cpt_inflight_req);
265
266         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
267                             qp_id);
268
269         pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
270                                              RTE_MEMZONE_SIZE_HINT_ONLY |
271                                                      RTE_MEMZONE_256MB,
272                                              RTE_CACHE_LINE_SIZE);
273         if (pq_mem == NULL) {
274                 plt_err("Could not allocate reserved memzone");
275                 goto qp_free;
276         }
277
278         va = pq_mem->addr;
279
280         memset(va, 0, len);
281
282         ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
283         if (ret) {
284                 plt_err("Could not create mempool for metabuf");
285                 goto pq_mem_free;
286         }
287
288         /* Initialize pending queue */
289         qp->pend_q.req_queue = pq_mem->addr;
290         qp->pend_q.head = 0;
291         qp->pend_q.tail = 0;
292
293         return qp;
294
295 pq_mem_free:
296         rte_memzone_free(pq_mem);
297 qp_free:
298         rte_free(qp);
299         return NULL;
300 }
301
302 static int
303 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
304 {
305         const struct rte_memzone *pq_mem;
306         char name[RTE_MEMZONE_NAMESIZE];
307         int ret;
308
309         cnxk_cpt_metabuf_mempool_destroy(qp);
310
311         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
312                             qp->lf.lf_id);
313
314         pq_mem = rte_memzone_lookup(name);
315
316         ret = rte_memzone_free(pq_mem);
317         if (ret)
318                 return ret;
319
320         rte_free(qp);
321
322         return 0;
323 }
324
325 int
326 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
327 {
328         struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
329         struct cnxk_cpt_vf *vf = dev->data->dev_private;
330         struct roc_cpt *roc_cpt = &vf->cpt;
331         struct roc_cpt_lf *lf;
332         int ret;
333
334         if (qp == NULL)
335                 return -EINVAL;
336
337         lf = roc_cpt->lf[qp_id];
338         if (lf == NULL)
339                 return -ENOTSUP;
340
341         roc_cpt_lf_fini(lf);
342
343         ret = cnxk_cpt_qp_destroy(dev, qp);
344         if (ret) {
345                 plt_err("Could not destroy queue pair %d", qp_id);
346                 return ret;
347         }
348
349         roc_cpt->lf[qp_id] = NULL;
350         dev->data->queue_pairs[qp_id] = NULL;
351
352         return 0;
353 }
354
355 int
356 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
357                           const struct rte_cryptodev_qp_conf *conf,
358                           int socket_id __rte_unused)
359 {
360         struct cnxk_cpt_vf *vf = dev->data->dev_private;
361         struct roc_cpt *roc_cpt = &vf->cpt;
362         struct rte_pci_device *pci_dev;
363         struct cnxk_cpt_qp *qp;
364         int ret;
365
366         if (dev->data->queue_pairs[qp_id] != NULL)
367                 cnxk_cpt_queue_pair_release(dev, qp_id);
368
369         pci_dev = RTE_DEV_TO_PCI(dev->device);
370
371         if (pci_dev->mem_resource[2].addr == NULL) {
372                 plt_err("Invalid PCI mem address");
373                 return -EIO;
374         }
375
376         qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
377         if (qp == NULL) {
378                 plt_err("Could not create queue pair %d", qp_id);
379                 return -ENOMEM;
380         }
381
382         qp->lf.lf_id = qp_id;
383         qp->lf.nb_desc = conf->nb_descriptors;
384
385         ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
386         if (ret < 0) {
387                 plt_err("Could not initialize queue pair %d", qp_id);
388                 ret = -EINVAL;
389                 goto exit;
390         }
391
392         qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
393
394         roc_cpt->lf[qp_id] = &qp->lf;
395
396         ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
397         if (ret < 0) {
398                 roc_cpt->lf[qp_id] = NULL;
399                 plt_err("Could not init lmtline for queue pair %d", qp_id);
400                 goto exit;
401         }
402
403         qp->sess_mp = conf->mp_session;
404         qp->sess_mp_priv = conf->mp_session_private;
405         dev->data->queue_pairs[qp_id] = qp;
406
407         return 0;
408
409 exit:
410         cnxk_cpt_qp_destroy(dev, qp);
411         return ret;
412 }
413
414 unsigned int
415 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
416 {
417         return sizeof(struct cnxk_se_sess);
418 }
419
420 static int
421 sym_xform_verify(struct rte_crypto_sym_xform *xform)
422 {
423         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
424             xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
425             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
426                 return -ENOTSUP;
427
428         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
429                 return CNXK_CPT_CIPHER;
430
431         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
432                 return CNXK_CPT_AUTH;
433
434         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
435                 return CNXK_CPT_AEAD;
436
437         if (xform->next == NULL)
438                 return -EIO;
439
440         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
441             xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
442             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
443             xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
444                 return -ENOTSUP;
445
446         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
447             xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
448             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
449             xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
450                 return -ENOTSUP;
451
452         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
453             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
454             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
455             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
456                 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
457
458         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
459             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
460             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
461             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
462                 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
463
464         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
465             xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
466             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
467             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
468                 switch (xform->auth.algo) {
469                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
470                         switch (xform->next->cipher.algo) {
471                         case RTE_CRYPTO_CIPHER_AES_CBC:
472                                 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
473                         default:
474                                 return -ENOTSUP;
475                         }
476                 default:
477                         return -ENOTSUP;
478                 }
479         }
480
481         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
482             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
483             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
484             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
485                 switch (xform->cipher.algo) {
486                 case RTE_CRYPTO_CIPHER_AES_CBC:
487                         switch (xform->next->auth.algo) {
488                         case RTE_CRYPTO_AUTH_SHA1_HMAC:
489                                 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
490                         default:
491                                 return -ENOTSUP;
492                         }
493                 default:
494                         return -ENOTSUP;
495                 }
496         }
497
498         return -ENOTSUP;
499 }
500
501 static uint64_t
502 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
503 {
504         union cpt_inst_w7 inst_w7;
505
506         inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
507
508         /* Set the engine group */
509         if (sess->zsk_flag || sess->chacha_poly)
510                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
511         else
512                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
513
514         return inst_w7.u64;
515 }
516
517 int
518 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
519                       struct rte_crypto_sym_xform *xform,
520                       struct rte_cryptodev_sym_session *sess,
521                       struct rte_mempool *pool)
522 {
523         struct cnxk_se_sess *sess_priv;
524         void *priv;
525         int ret;
526
527         ret = sym_xform_verify(xform);
528         if (unlikely(ret < 0))
529                 return ret;
530
531         if (unlikely(rte_mempool_get(pool, &priv))) {
532                 plt_dp_err("Could not allocate session private data");
533                 return -ENOMEM;
534         }
535
536         memset(priv, 0, sizeof(struct cnxk_se_sess));
537
538         sess_priv = priv;
539
540         switch (ret) {
541         case CNXK_CPT_CIPHER:
542                 ret = fill_sess_cipher(xform, sess_priv);
543                 break;
544         case CNXK_CPT_AUTH:
545                 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
546                         ret = fill_sess_gmac(xform, sess_priv);
547                 else
548                         ret = fill_sess_auth(xform, sess_priv);
549                 break;
550         case CNXK_CPT_AEAD:
551                 ret = fill_sess_aead(xform, sess_priv);
552                 break;
553         case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
554         case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
555                 ret = fill_sess_cipher(xform, sess_priv);
556                 if (ret < 0)
557                         break;
558                 ret = fill_sess_auth(xform->next, sess_priv);
559                 break;
560         case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
561         case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
562                 ret = fill_sess_auth(xform, sess_priv);
563                 if (ret < 0)
564                         break;
565                 ret = fill_sess_cipher(xform->next, sess_priv);
566                 break;
567         default:
568                 ret = -1;
569         }
570
571         if (ret)
572                 goto priv_put;
573
574         if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
575             cpt_mac_len_verify(&xform->auth)) {
576                 plt_dp_err("MAC length is not supported");
577                 if (sess_priv->roc_se_ctx.auth_key != NULL) {
578                         plt_free(sess_priv->roc_se_ctx.auth_key);
579                         sess_priv->roc_se_ctx.auth_key = NULL;
580                 }
581
582                 ret = -ENOTSUP;
583                 goto priv_put;
584         }
585
586         sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
587
588         set_sym_session_private_data(sess, driver_id, sess_priv);
589
590         return 0;
591
592 priv_put:
593         rte_mempool_put(pool, priv);
594
595         return -ENOTSUP;
596 }
597
598 int
599 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
600                                struct rte_crypto_sym_xform *xform,
601                                struct rte_cryptodev_sym_session *sess,
602                                struct rte_mempool *pool)
603 {
604         struct cnxk_cpt_vf *vf = dev->data->dev_private;
605         struct roc_cpt *roc_cpt = &vf->cpt;
606         uint8_t driver_id;
607
608         driver_id = dev->driver_id;
609
610         return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
611 }
612
613 void
614 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
615 {
616         void *priv = get_sym_session_private_data(sess, driver_id);
617         struct cnxk_se_sess *sess_priv;
618         struct rte_mempool *pool;
619
620         if (priv == NULL)
621                 return;
622
623         sess_priv = priv;
624
625         if (sess_priv->roc_se_ctx.auth_key != NULL)
626                 plt_free(sess_priv->roc_se_ctx.auth_key);
627
628         memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
629
630         pool = rte_mempool_from_obj(priv);
631
632         set_sym_session_private_data(sess, driver_id, NULL);
633
634         rte_mempool_put(pool, priv);
635 }
636
637 void
638 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
639                            struct rte_cryptodev_sym_session *sess)
640 {
641         return sym_session_clear(dev->driver_id, sess);
642 }
643
644 unsigned int
645 cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
646 {
647         return sizeof(struct cnxk_ae_sess);
648 }
649
650 void
651 cnxk_ae_session_clear(struct rte_cryptodev *dev,
652                       struct rte_cryptodev_asym_session *sess)
653 {
654         struct rte_mempool *sess_mp;
655         struct cnxk_ae_sess *priv;
656
657         priv = get_asym_session_private_data(sess, dev->driver_id);
658         if (priv == NULL)
659                 return;
660
661         /* Free resources allocated in session_cfg */
662         cnxk_ae_free_session_parameters(priv);
663
664         /* Reset and free object back to pool */
665         memset(priv, 0, cnxk_ae_session_size_get(dev));
666         sess_mp = rte_mempool_from_obj(priv);
667         set_asym_session_private_data(sess, dev->driver_id, NULL);
668         rte_mempool_put(sess_mp, priv);
669 }
670
671 int
672 cnxk_ae_session_cfg(struct rte_cryptodev *dev,
673                     struct rte_crypto_asym_xform *xform,
674                     struct rte_cryptodev_asym_session *sess,
675                     struct rte_mempool *pool)
676 {
677         struct cnxk_cpt_vf *vf = dev->data->dev_private;
678         struct roc_cpt *roc_cpt = &vf->cpt;
679         struct cnxk_ae_sess *priv;
680         union cpt_inst_w7 w7;
681         int ret;
682
683         if (rte_mempool_get(pool, (void **)&priv))
684                 return -ENOMEM;
685
686         memset(priv, 0, sizeof(struct cnxk_ae_sess));
687
688         ret = cnxk_ae_fill_session_parameters(priv, xform);
689         if (ret) {
690                 rte_mempool_put(pool, priv);
691                 return ret;
692         }
693
694         w7.u64 = 0;
695         w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
696         priv->cpt_inst_w7 = w7.u64;
697         priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
698         priv->ec_grp = vf->ec_grp;
699         set_asym_session_private_data(sess, dev->driver_id, priv);
700
701         return 0;
702 }