crypto/cnxk: allow different cores in pending queue
[dpdk.git] / drivers / crypto / cnxk / cnxk_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_errno.h>
8
9 #include "roc_cpt.h"
10
11 #include "cnxk_ae.h"
12 #include "cnxk_cryptodev.h"
13 #include "cnxk_cryptodev_ops.h"
14 #include "cnxk_cryptodev_capabilities.h"
15 #include "cnxk_se.h"
16
17 #define CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS 5
18 #define CNXK_CPT_MAX_ASYM_OP_MOD_LEN    1024
19
20 static int
21 cnxk_cpt_get_mlen(void)
22 {
23         uint32_t len;
24
25         /* For MAC */
26         len = 2 * sizeof(uint64_t);
27         len += ROC_SE_MAX_MAC_LEN * sizeof(uint8_t);
28
29         len += ROC_SE_OFF_CTRL_LEN + ROC_CPT_AES_CBC_IV_LEN;
30         len += RTE_ALIGN_CEIL((ROC_SE_SG_LIST_HDR_SIZE +
31                                (RTE_ALIGN_CEIL(ROC_SE_MAX_SG_IN_OUT_CNT, 4) >>
32                                 2) * ROC_SE_SG_ENTRY_SIZE),
33                               8);
34
35         return len;
36 }
37
38 static int
39 cnxk_cpt_asym_get_mlen(void)
40 {
41         uint32_t len;
42
43         /* To hold RPTR */
44         len = sizeof(uint64_t);
45
46         /* Get meta len for asymmetric operations */
47         len += CNXK_CPT_MAX_ASYM_OP_NUM_PARAMS * CNXK_CPT_MAX_ASYM_OP_MOD_LEN;
48
49         return len;
50 }
51
52 int
53 cnxk_cpt_dev_config(struct rte_cryptodev *dev,
54                     struct rte_cryptodev_config *conf)
55 {
56         struct cnxk_cpt_vf *vf = dev->data->dev_private;
57         struct roc_cpt *roc_cpt = &vf->cpt;
58         uint16_t nb_lf_avail, nb_lf;
59         int ret;
60
61         dev->feature_flags = cnxk_cpt_default_ff_get() & ~conf->ff_disable;
62
63         nb_lf_avail = roc_cpt->nb_lf_avail;
64         nb_lf = conf->nb_queue_pairs;
65
66         if (nb_lf > nb_lf_avail)
67                 return -ENOTSUP;
68
69         ret = roc_cpt_dev_configure(roc_cpt, nb_lf);
70         if (ret) {
71                 plt_err("Could not configure device");
72                 return ret;
73         }
74
75         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
76                 /* Initialize shared FPM table */
77                 ret = roc_ae_fpm_get(vf->cnxk_fpm_iova);
78                 if (ret) {
79                         plt_err("Could not get FPM table");
80                         return ret;
81                 }
82
83                 /* Init EC grp table */
84                 ret = roc_ae_ec_grp_get(vf->ec_grp);
85                 if (ret) {
86                         plt_err("Could not get EC grp table");
87                         roc_ae_fpm_put();
88                         return ret;
89                 }
90         }
91
92         return 0;
93 }
94
95 int
96 cnxk_cpt_dev_start(struct rte_cryptodev *dev)
97 {
98         struct cnxk_cpt_vf *vf = dev->data->dev_private;
99         struct roc_cpt *roc_cpt = &vf->cpt;
100         uint16_t nb_lf = roc_cpt->nb_lf;
101         uint16_t qp_id;
102
103         for (qp_id = 0; qp_id < nb_lf; qp_id++)
104                 roc_cpt_iq_enable(roc_cpt->lf[qp_id]);
105
106         return 0;
107 }
108
109 void
110 cnxk_cpt_dev_stop(struct rte_cryptodev *dev)
111 {
112         struct cnxk_cpt_vf *vf = dev->data->dev_private;
113         struct roc_cpt *roc_cpt = &vf->cpt;
114         uint16_t nb_lf = roc_cpt->nb_lf;
115         uint16_t qp_id;
116
117         for (qp_id = 0; qp_id < nb_lf; qp_id++)
118                 roc_cpt_iq_disable(roc_cpt->lf[qp_id]);
119 }
120
121 int
122 cnxk_cpt_dev_close(struct rte_cryptodev *dev)
123 {
124         struct cnxk_cpt_vf *vf = dev->data->dev_private;
125         uint16_t i;
126         int ret;
127
128         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
129                 ret = cnxk_cpt_queue_pair_release(dev, i);
130                 if (ret < 0) {
131                         plt_err("Could not release queue pair %u", i);
132                         return ret;
133                 }
134         }
135
136         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
137                 roc_ae_fpm_put();
138                 roc_ae_ec_grp_put();
139         }
140
141         roc_cpt_dev_clear(&vf->cpt);
142
143         return 0;
144 }
145
146 void
147 cnxk_cpt_dev_info_get(struct rte_cryptodev *dev,
148                       struct rte_cryptodev_info *info)
149 {
150         struct cnxk_cpt_vf *vf = dev->data->dev_private;
151         struct roc_cpt *roc_cpt = &vf->cpt;
152
153         info->max_nb_queue_pairs =
154                 RTE_MIN(roc_cpt->nb_lf_avail, vf->max_qps_limit);
155         plt_cpt_dbg("max_nb_queue_pairs %u", info->max_nb_queue_pairs);
156
157         info->feature_flags = cnxk_cpt_default_ff_get();
158         info->capabilities = cnxk_crypto_capabilities_get(vf);
159         info->sym.max_nb_sessions = 0;
160         info->min_mbuf_headroom_req = CNXK_CPT_MIN_HEADROOM_REQ;
161         info->min_mbuf_tailroom_req = CNXK_CPT_MIN_TAILROOM_REQ;
162 }
163
164 static void
165 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
166 {
167         snprintf(name, size, "cnxk_cpt_pq_mem_%u:%u", dev_id, qp_id);
168 }
169
170 static int
171 cnxk_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
172                                 struct cnxk_cpt_qp *qp, uint8_t qp_id,
173                                 uint32_t nb_elements)
174 {
175         char mempool_name[RTE_MEMPOOL_NAMESIZE];
176         struct cpt_qp_meta_info *meta_info;
177         int lcore_cnt = rte_lcore_count();
178         struct rte_mempool *pool;
179         int mb_pool_sz, mlen = 8;
180         uint32_t cache_sz;
181
182         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
183                 /* Get meta len */
184                 mlen = cnxk_cpt_get_mlen();
185         }
186
187         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
188
189                 /* Get meta len required for asymmetric operations */
190                 mlen = RTE_MAX(mlen, cnxk_cpt_asym_get_mlen());
191         }
192
193         mb_pool_sz = nb_elements;
194         cache_sz = RTE_MIN(RTE_MEMPOOL_CACHE_MAX_SIZE, nb_elements / 1.5);
195
196         /* For poll mode, core that enqueues and core that dequeues can be
197          * different. For event mode, all cores are allowed to use same crypto
198          * queue pair.
199          */
200
201         mb_pool_sz += (RTE_MAX(2, lcore_cnt) * cache_sz);
202
203         /* Allocate mempool */
204
205         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "cnxk_cpt_mb_%u:%u",
206                  dev->data->dev_id, qp_id);
207
208         pool = rte_mempool_create(mempool_name, mb_pool_sz, mlen, cache_sz, 0,
209                                   NULL, NULL, NULL, NULL, rte_socket_id(), 0);
210
211         if (pool == NULL) {
212                 plt_err("Could not create mempool for metabuf");
213                 return rte_errno;
214         }
215
216         meta_info = &qp->meta_info;
217
218         meta_info->pool = pool;
219         meta_info->mlen = mlen;
220
221         return 0;
222 }
223
224 static void
225 cnxk_cpt_metabuf_mempool_destroy(struct cnxk_cpt_qp *qp)
226 {
227         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
228
229         rte_mempool_free(meta_info->pool);
230
231         meta_info->pool = NULL;
232         meta_info->mlen = 0;
233 }
234
235 static struct cnxk_cpt_qp *
236 cnxk_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
237                    uint32_t iq_len)
238 {
239         const struct rte_memzone *pq_mem;
240         char name[RTE_MEMZONE_NAMESIZE];
241         struct cnxk_cpt_qp *qp;
242         uint32_t len;
243         uint8_t *va;
244         int ret;
245
246         /* Allocate queue pair */
247         qp = rte_zmalloc_socket("CNXK Crypto PMD Queue Pair", sizeof(*qp),
248                                 ROC_ALIGN, 0);
249         if (qp == NULL) {
250                 plt_err("Could not allocate queue pair");
251                 return NULL;
252         }
253
254         /* For pending queue */
255         len = iq_len * sizeof(struct cpt_inflight_req);
256
257         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
258                             qp_id);
259
260         pq_mem = rte_memzone_reserve_aligned(name, len, rte_socket_id(),
261                                              RTE_MEMZONE_SIZE_HINT_ONLY |
262                                                      RTE_MEMZONE_256MB,
263                                              RTE_CACHE_LINE_SIZE);
264         if (pq_mem == NULL) {
265                 plt_err("Could not allocate reserved memzone");
266                 goto qp_free;
267         }
268
269         va = pq_mem->addr;
270
271         memset(va, 0, len);
272
273         ret = cnxk_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
274         if (ret) {
275                 plt_err("Could not create mempool for metabuf");
276                 goto pq_mem_free;
277         }
278
279         /* Initialize pending queue */
280         qp->pend_q.req_queue = pq_mem->addr;
281         qp->pend_q.head = 0;
282         qp->pend_q.tail = 0;
283
284         return qp;
285
286 pq_mem_free:
287         rte_memzone_free(pq_mem);
288 qp_free:
289         rte_free(qp);
290         return NULL;
291 }
292
293 static int
294 cnxk_cpt_qp_destroy(const struct rte_cryptodev *dev, struct cnxk_cpt_qp *qp)
295 {
296         const struct rte_memzone *pq_mem;
297         char name[RTE_MEMZONE_NAMESIZE];
298         int ret;
299
300         cnxk_cpt_metabuf_mempool_destroy(qp);
301
302         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
303                             qp->lf.lf_id);
304
305         pq_mem = rte_memzone_lookup(name);
306
307         ret = rte_memzone_free(pq_mem);
308         if (ret)
309                 return ret;
310
311         rte_free(qp);
312
313         return 0;
314 }
315
316 int
317 cnxk_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
318 {
319         struct cnxk_cpt_qp *qp = dev->data->queue_pairs[qp_id];
320         struct cnxk_cpt_vf *vf = dev->data->dev_private;
321         struct roc_cpt *roc_cpt = &vf->cpt;
322         struct roc_cpt_lf *lf;
323         int ret;
324
325         if (qp == NULL)
326                 return -EINVAL;
327
328         lf = roc_cpt->lf[qp_id];
329         if (lf == NULL)
330                 return -ENOTSUP;
331
332         roc_cpt_lf_fini(lf);
333
334         ret = cnxk_cpt_qp_destroy(dev, qp);
335         if (ret) {
336                 plt_err("Could not destroy queue pair %d", qp_id);
337                 return ret;
338         }
339
340         roc_cpt->lf[qp_id] = NULL;
341         dev->data->queue_pairs[qp_id] = NULL;
342
343         return 0;
344 }
345
346 int
347 cnxk_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
348                           const struct rte_cryptodev_qp_conf *conf,
349                           int socket_id __rte_unused)
350 {
351         struct cnxk_cpt_vf *vf = dev->data->dev_private;
352         struct roc_cpt *roc_cpt = &vf->cpt;
353         struct rte_pci_device *pci_dev;
354         struct cnxk_cpt_qp *qp;
355         int ret;
356
357         if (dev->data->queue_pairs[qp_id] != NULL)
358                 cnxk_cpt_queue_pair_release(dev, qp_id);
359
360         pci_dev = RTE_DEV_TO_PCI(dev->device);
361
362         if (pci_dev->mem_resource[2].addr == NULL) {
363                 plt_err("Invalid PCI mem address");
364                 return -EIO;
365         }
366
367         qp = cnxk_cpt_qp_create(dev, qp_id, conf->nb_descriptors);
368         if (qp == NULL) {
369                 plt_err("Could not create queue pair %d", qp_id);
370                 return -ENOMEM;
371         }
372
373         qp->lf.lf_id = qp_id;
374         qp->lf.nb_desc = conf->nb_descriptors;
375
376         ret = roc_cpt_lf_init(roc_cpt, &qp->lf);
377         if (ret < 0) {
378                 plt_err("Could not initialize queue pair %d", qp_id);
379                 ret = -EINVAL;
380                 goto exit;
381         }
382
383         qp->pend_q.pq_mask = qp->lf.nb_desc - 1;
384
385         roc_cpt->lf[qp_id] = &qp->lf;
386
387         ret = roc_cpt_lmtline_init(roc_cpt, &qp->lmtline, qp_id);
388         if (ret < 0) {
389                 roc_cpt->lf[qp_id] = NULL;
390                 plt_err("Could not init lmtline for queue pair %d", qp_id);
391                 goto exit;
392         }
393
394         qp->sess_mp = conf->mp_session;
395         qp->sess_mp_priv = conf->mp_session_private;
396         dev->data->queue_pairs[qp_id] = qp;
397
398         return 0;
399
400 exit:
401         cnxk_cpt_qp_destroy(dev, qp);
402         return ret;
403 }
404
405 unsigned int
406 cnxk_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
407 {
408         return sizeof(struct cnxk_se_sess);
409 }
410
411 static int
412 sym_xform_verify(struct rte_crypto_sym_xform *xform)
413 {
414         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
415             xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
416             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
417                 return -ENOTSUP;
418
419         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL)
420                 return CNXK_CPT_CIPHER;
421
422         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH && xform->next == NULL)
423                 return CNXK_CPT_AUTH;
424
425         if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD && xform->next == NULL)
426                 return CNXK_CPT_AEAD;
427
428         if (xform->next == NULL)
429                 return -EIO;
430
431         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
432             xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
433             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
434             xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
435                 return -ENOTSUP;
436
437         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
438             xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
439             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
440             xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
441                 return -ENOTSUP;
442
443         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
444             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
445             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
446             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE)
447                 return CNXK_CPT_CIPHER_ENC_AUTH_GEN;
448
449         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
450             xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY &&
451             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
452             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT)
453                 return CNXK_CPT_AUTH_VRFY_CIPHER_DEC;
454
455         if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
456             xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE &&
457             xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
458             xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
459                 switch (xform->auth.algo) {
460                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
461                         switch (xform->next->cipher.algo) {
462                         case RTE_CRYPTO_CIPHER_AES_CBC:
463                                 return CNXK_CPT_AUTH_GEN_CIPHER_ENC;
464                         default:
465                                 return -ENOTSUP;
466                         }
467                 default:
468                         return -ENOTSUP;
469                 }
470         }
471
472         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
473             xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
474             xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
475             xform->next->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY) {
476                 switch (xform->cipher.algo) {
477                 case RTE_CRYPTO_CIPHER_AES_CBC:
478                         switch (xform->next->auth.algo) {
479                         case RTE_CRYPTO_AUTH_SHA1_HMAC:
480                                 return CNXK_CPT_CIPHER_DEC_AUTH_VRFY;
481                         default:
482                                 return -ENOTSUP;
483                         }
484                 default:
485                         return -ENOTSUP;
486                 }
487         }
488
489         return -ENOTSUP;
490 }
491
492 static uint64_t
493 cnxk_cpt_inst_w7_get(struct cnxk_se_sess *sess, struct roc_cpt *roc_cpt)
494 {
495         union cpt_inst_w7 inst_w7;
496
497         inst_w7.s.cptr = (uint64_t)&sess->roc_se_ctx.se_ctx;
498
499         /* Set the engine group */
500         if (sess->zsk_flag || sess->chacha_poly)
501                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_SE];
502         else
503                 inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
504
505         return inst_w7.u64;
506 }
507
508 int
509 sym_session_configure(struct roc_cpt *roc_cpt, int driver_id,
510                       struct rte_crypto_sym_xform *xform,
511                       struct rte_cryptodev_sym_session *sess,
512                       struct rte_mempool *pool)
513 {
514         struct cnxk_se_sess *sess_priv;
515         void *priv;
516         int ret;
517
518         ret = sym_xform_verify(xform);
519         if (unlikely(ret < 0))
520                 return ret;
521
522         if (unlikely(rte_mempool_get(pool, &priv))) {
523                 plt_dp_err("Could not allocate session private data");
524                 return -ENOMEM;
525         }
526
527         memset(priv, 0, sizeof(struct cnxk_se_sess));
528
529         sess_priv = priv;
530
531         switch (ret) {
532         case CNXK_CPT_CIPHER:
533                 ret = fill_sess_cipher(xform, sess_priv);
534                 break;
535         case CNXK_CPT_AUTH:
536                 if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
537                         ret = fill_sess_gmac(xform, sess_priv);
538                 else
539                         ret = fill_sess_auth(xform, sess_priv);
540                 break;
541         case CNXK_CPT_AEAD:
542                 ret = fill_sess_aead(xform, sess_priv);
543                 break;
544         case CNXK_CPT_CIPHER_ENC_AUTH_GEN:
545         case CNXK_CPT_CIPHER_DEC_AUTH_VRFY:
546                 ret = fill_sess_cipher(xform, sess_priv);
547                 if (ret < 0)
548                         break;
549                 ret = fill_sess_auth(xform->next, sess_priv);
550                 break;
551         case CNXK_CPT_AUTH_VRFY_CIPHER_DEC:
552         case CNXK_CPT_AUTH_GEN_CIPHER_ENC:
553                 ret = fill_sess_auth(xform, sess_priv);
554                 if (ret < 0)
555                         break;
556                 ret = fill_sess_cipher(xform->next, sess_priv);
557                 break;
558         default:
559                 ret = -1;
560         }
561
562         if (ret)
563                 goto priv_put;
564
565         if ((sess_priv->roc_se_ctx.fc_type == ROC_SE_HASH_HMAC) &&
566             cpt_mac_len_verify(&xform->auth)) {
567                 plt_dp_err("MAC length is not supported");
568                 if (sess_priv->roc_se_ctx.auth_key != NULL) {
569                         plt_free(sess_priv->roc_se_ctx.auth_key);
570                         sess_priv->roc_se_ctx.auth_key = NULL;
571                 }
572
573                 ret = -ENOTSUP;
574                 goto priv_put;
575         }
576
577         sess_priv->cpt_inst_w7 = cnxk_cpt_inst_w7_get(sess_priv, roc_cpt);
578
579         set_sym_session_private_data(sess, driver_id, sess_priv);
580
581         return 0;
582
583 priv_put:
584         rte_mempool_put(pool, priv);
585
586         return -ENOTSUP;
587 }
588
589 int
590 cnxk_cpt_sym_session_configure(struct rte_cryptodev *dev,
591                                struct rte_crypto_sym_xform *xform,
592                                struct rte_cryptodev_sym_session *sess,
593                                struct rte_mempool *pool)
594 {
595         struct cnxk_cpt_vf *vf = dev->data->dev_private;
596         struct roc_cpt *roc_cpt = &vf->cpt;
597         uint8_t driver_id;
598
599         driver_id = dev->driver_id;
600
601         return sym_session_configure(roc_cpt, driver_id, xform, sess, pool);
602 }
603
604 void
605 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
606 {
607         void *priv = get_sym_session_private_data(sess, driver_id);
608         struct cnxk_se_sess *sess_priv;
609         struct rte_mempool *pool;
610
611         if (priv == NULL)
612                 return;
613
614         sess_priv = priv;
615
616         if (sess_priv->roc_se_ctx.auth_key != NULL)
617                 plt_free(sess_priv->roc_se_ctx.auth_key);
618
619         memset(priv, 0, cnxk_cpt_sym_session_get_size(NULL));
620
621         pool = rte_mempool_from_obj(priv);
622
623         set_sym_session_private_data(sess, driver_id, NULL);
624
625         rte_mempool_put(pool, priv);
626 }
627
628 void
629 cnxk_cpt_sym_session_clear(struct rte_cryptodev *dev,
630                            struct rte_cryptodev_sym_session *sess)
631 {
632         return sym_session_clear(dev->driver_id, sess);
633 }
634
635 unsigned int
636 cnxk_ae_session_size_get(struct rte_cryptodev *dev __rte_unused)
637 {
638         return sizeof(struct cnxk_ae_sess);
639 }
640
641 void
642 cnxk_ae_session_clear(struct rte_cryptodev *dev,
643                       struct rte_cryptodev_asym_session *sess)
644 {
645         struct rte_mempool *sess_mp;
646         struct cnxk_ae_sess *priv;
647
648         priv = get_asym_session_private_data(sess, dev->driver_id);
649         if (priv == NULL)
650                 return;
651
652         /* Free resources allocated in session_cfg */
653         cnxk_ae_free_session_parameters(priv);
654
655         /* Reset and free object back to pool */
656         memset(priv, 0, cnxk_ae_session_size_get(dev));
657         sess_mp = rte_mempool_from_obj(priv);
658         set_asym_session_private_data(sess, dev->driver_id, NULL);
659         rte_mempool_put(sess_mp, priv);
660 }
661
662 int
663 cnxk_ae_session_cfg(struct rte_cryptodev *dev,
664                     struct rte_crypto_asym_xform *xform,
665                     struct rte_cryptodev_asym_session *sess,
666                     struct rte_mempool *pool)
667 {
668         struct cnxk_cpt_vf *vf = dev->data->dev_private;
669         struct roc_cpt *roc_cpt = &vf->cpt;
670         struct cnxk_ae_sess *priv;
671         union cpt_inst_w7 w7;
672         int ret;
673
674         if (rte_mempool_get(pool, (void **)&priv))
675                 return -ENOMEM;
676
677         memset(priv, 0, sizeof(struct cnxk_ae_sess));
678
679         ret = cnxk_ae_fill_session_parameters(priv, xform);
680         if (ret) {
681                 rte_mempool_put(pool, priv);
682                 return ret;
683         }
684
685         w7.u64 = 0;
686         w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_AE];
687         priv->cpt_inst_w7 = w7.u64;
688         priv->cnxk_fpm_iova = vf->cnxk_fpm_iova;
689         priv->ec_grp = vf->ec_grp;
690         set_asym_session_private_data(sess, dev->driver_id, priv);
691
692         return 0;
693 }