f8240e1923b2b64207e8215f85e14c497f1137f3
[dpdk.git] / drivers / crypto / cnxk / cn10k_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <rte_cryptodev.h>
6 #include <cryptodev_pmd.h>
7 #include <rte_event_crypto_adapter.h>
8 #include <rte_ip.h>
9
10 #include "cn10k_cryptodev.h"
11 #include "cn10k_cryptodev_ops.h"
12 #include "cn10k_ipsec_la_ops.h"
13 #include "cn10k_ipsec.h"
14 #include "cnxk_ae.h"
15 #include "cnxk_cryptodev.h"
16 #include "cnxk_cryptodev_ops.h"
17 #include "cnxk_se.h"
18
19 #include "roc_api.h"
20
21 static inline struct cnxk_se_sess *
22 cn10k_cpt_sym_temp_sess_create(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op)
23 {
24         const int driver_id = cn10k_cryptodev_driver_id;
25         struct rte_crypto_sym_op *sym_op = op->sym;
26         struct rte_cryptodev_sym_session *sess;
27         struct cnxk_se_sess *priv;
28         int ret;
29
30         /* Create temporary session */
31         sess = rte_cryptodev_sym_session_create(qp->sess_mp);
32         if (sess == NULL)
33                 return NULL;
34
35         ret = sym_session_configure(qp->lf.roc_cpt, driver_id, sym_op->xform,
36                                     sess, qp->sess_mp_priv);
37         if (ret)
38                 goto sess_put;
39
40         priv = get_sym_session_private_data(sess, driver_id);
41
42         sym_op->session = sess;
43
44         return priv;
45
46 sess_put:
47         rte_mempool_put(qp->sess_mp, sess);
48         return NULL;
49 }
50
51 static __rte_always_inline int __rte_hot
52 cpt_sec_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
53                   struct cn10k_sec_session *sess,
54                   struct cpt_inflight_req *infl_req, struct cpt_inst_s *inst)
55 {
56         struct rte_crypto_sym_op *sym_op = op->sym;
57         struct cn10k_ipsec_sa *sa;
58         int ret;
59
60         if (unlikely(sym_op->m_dst && sym_op->m_dst != sym_op->m_src)) {
61                 plt_dp_err("Out of place is not supported");
62                 return -ENOTSUP;
63         }
64
65         if (unlikely(!rte_pktmbuf_is_contiguous(sym_op->m_src))) {
66                 plt_dp_err("Scatter Gather mode is not supported");
67                 return -ENOTSUP;
68         }
69
70         sa = &sess->sa;
71
72         if (sa->is_outbound)
73                 ret = process_outb_sa(&qp->lf, op, sa, inst);
74         else {
75                 infl_req->op_flags |= CPT_OP_FLAGS_IPSEC_DIR_INBOUND;
76                 ret = process_inb_sa(op, sa, inst);
77         }
78
79         return ret;
80 }
81
82 static __rte_always_inline int __rte_hot
83 cpt_sym_inst_fill(struct cnxk_cpt_qp *qp, struct rte_crypto_op *op,
84                   struct cnxk_se_sess *sess, struct cpt_inflight_req *infl_req,
85                   struct cpt_inst_s *inst)
86 {
87         uint64_t cpt_op;
88         int ret;
89
90         cpt_op = sess->cpt_op;
91
92         if (cpt_op & ROC_SE_OP_CIPHER_MASK)
93                 ret = fill_fc_params(op, sess, &qp->meta_info, infl_req, inst);
94         else
95                 ret = fill_digest_params(op, sess, &qp->meta_info, infl_req,
96                                          inst);
97
98         return ret;
99 }
100
101 static inline int
102 cn10k_cpt_fill_inst(struct cnxk_cpt_qp *qp, struct rte_crypto_op *ops[],
103                     struct cpt_inst_s inst[], struct cpt_inflight_req *infl_req)
104 {
105         struct cn10k_sec_session *sec_sess;
106         struct rte_crypto_asym_op *asym_op;
107         struct rte_crypto_sym_op *sym_op;
108         struct cnxk_ae_sess *ae_sess;
109         struct cnxk_se_sess *sess;
110         struct rte_crypto_op *op;
111         uint64_t w7;
112         int ret;
113
114         const union cpt_res_s res = {
115                 .cn10k.compcode = CPT_COMP_NOT_DONE,
116         };
117
118         op = ops[0];
119
120         inst[0].w0.u64 = 0;
121         inst[0].w2.u64 = 0;
122         inst[0].w3.u64 = 0;
123
124         sym_op = op->sym;
125
126         if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
127                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
128                         sec_sess = get_sec_session_private_data(
129                                 sym_op->sec_session);
130                         ret = cpt_sec_inst_fill(qp, op, sec_sess, infl_req,
131                                                 &inst[0]);
132                         if (unlikely(ret))
133                                 return 0;
134                         w7 = sec_sess->sa.inst.w7;
135                 } else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
136                         sess = get_sym_session_private_data(
137                                 sym_op->session, cn10k_cryptodev_driver_id);
138                         ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
139                                                 &inst[0]);
140                         if (unlikely(ret))
141                                 return 0;
142                         w7 = sess->cpt_inst_w7;
143                 } else {
144                         sess = cn10k_cpt_sym_temp_sess_create(qp, op);
145                         if (unlikely(sess == NULL)) {
146                                 plt_dp_err("Could not create temp session");
147                                 return 0;
148                         }
149
150                         ret = cpt_sym_inst_fill(qp, op, sess, infl_req,
151                                                 &inst[0]);
152                         if (unlikely(ret)) {
153                                 sym_session_clear(cn10k_cryptodev_driver_id,
154                                                   op->sym->session);
155                                 rte_mempool_put(qp->sess_mp, op->sym->session);
156                                 return 0;
157                         }
158                         w7 = sess->cpt_inst_w7;
159                 }
160         } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
161
162                 if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
163                         asym_op = op->asym;
164                         ae_sess = get_asym_session_private_data(
165                                 asym_op->session, cn10k_cryptodev_driver_id);
166                         ret = cnxk_ae_enqueue(qp, op, infl_req, &inst[0],
167                                               ae_sess);
168                         if (unlikely(ret))
169                                 return 0;
170                         w7 = ae_sess->cpt_inst_w7;
171                 } else {
172                         plt_dp_err("Not supported Asym op without session");
173                         return 0;
174                 }
175         } else {
176                 plt_dp_err("Unsupported op type");
177                 return 0;
178         }
179
180         inst[0].res_addr = (uint64_t)&infl_req->res;
181         __atomic_store_n(&infl_req->res.u64[0], res.u64[0], __ATOMIC_RELAXED);
182         infl_req->cop = op;
183
184         inst[0].w7.u64 = w7;
185
186         return 1;
187 }
188
189 #define PKTS_PER_LOOP   32
190 #define PKTS_PER_STEORL 16
191
192 static uint16_t
193 cn10k_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
194 {
195         uint64_t lmt_base, lmt_arg, io_addr;
196         struct cpt_inflight_req *infl_req;
197         uint16_t nb_allowed, count = 0;
198         struct cnxk_cpt_qp *qp = qptr;
199         struct pending_queue *pend_q;
200         struct cpt_inst_s *inst;
201         uint16_t lmt_id;
202         uint64_t head;
203         int ret, i;
204
205         pend_q = &qp->pend_q;
206
207         const uint64_t pq_mask = pend_q->pq_mask;
208
209         head = pend_q->head;
210         nb_allowed = pending_queue_free_cnt(head, pend_q->tail, pq_mask);
211         nb_ops = RTE_MIN(nb_ops, nb_allowed);
212
213         if (unlikely(nb_ops == 0))
214                 return 0;
215
216         lmt_base = qp->lmtline.lmt_base;
217         io_addr = qp->lmtline.io_addr;
218
219         ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
220         inst = (struct cpt_inst_s *)lmt_base;
221
222 again:
223         for (i = 0; i < RTE_MIN(PKTS_PER_LOOP, nb_ops); i++) {
224                 infl_req = &pend_q->req_queue[head];
225                 infl_req->op_flags = 0;
226
227                 ret = cn10k_cpt_fill_inst(qp, ops + i, &inst[2 * i], infl_req);
228                 if (unlikely(ret != 1)) {
229                         plt_dp_err("Could not process op: %p", ops + i);
230                         if (i == 0)
231                                 goto pend_q_commit;
232                         break;
233                 }
234
235                 pending_queue_advance(&head, pq_mask);
236         }
237
238         if (i > PKTS_PER_STEORL) {
239                 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (PKTS_PER_STEORL - 1) << 12 |
240                           (uint64_t)lmt_id;
241                 roc_lmt_submit_steorl(lmt_arg, io_addr);
242                 lmt_arg = ROC_CN10K_CPT_LMT_ARG |
243                           (i - PKTS_PER_STEORL - 1) << 12 |
244                           (uint64_t)(lmt_id + PKTS_PER_STEORL);
245                 roc_lmt_submit_steorl(lmt_arg, io_addr);
246         } else {
247                 lmt_arg = ROC_CN10K_CPT_LMT_ARG | (i - 1) << 12 |
248                           (uint64_t)lmt_id;
249                 roc_lmt_submit_steorl(lmt_arg, io_addr);
250         }
251
252         rte_io_wmb();
253
254         if (nb_ops - i > 0 && i == PKTS_PER_LOOP) {
255                 nb_ops -= i;
256                 ops += i;
257                 count += i;
258                 goto again;
259         }
260
261 pend_q_commit:
262         rte_atomic_thread_fence(__ATOMIC_RELEASE);
263
264         pend_q->head = head;
265         pend_q->time_out = rte_get_timer_cycles() +
266                            DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
267
268         return count + i;
269 }
270
271 uint16_t
272 cn10k_cpt_crypto_adapter_enqueue(uintptr_t tag_op, struct rte_crypto_op *op)
273 {
274         union rte_event_crypto_metadata *ec_mdata;
275         struct cpt_inflight_req *infl_req;
276         struct rte_event *rsp_info;
277         uint64_t lmt_base, lmt_arg;
278         struct cpt_inst_s *inst;
279         struct cnxk_cpt_qp *qp;
280         uint8_t cdev_id;
281         uint16_t lmt_id;
282         uint16_t qp_id;
283         int ret;
284
285         ec_mdata = cnxk_event_crypto_mdata_get(op);
286         if (!ec_mdata) {
287                 rte_errno = EINVAL;
288                 return 0;
289         }
290
291         cdev_id = ec_mdata->request_info.cdev_id;
292         qp_id = ec_mdata->request_info.queue_pair_id;
293         qp = rte_cryptodevs[cdev_id].data->queue_pairs[qp_id];
294         rsp_info = &ec_mdata->response_info;
295
296         if (unlikely(!qp->ca.enabled)) {
297                 rte_errno = EINVAL;
298                 return 0;
299         }
300
301         if (unlikely(rte_mempool_get(qp->ca.req_mp, (void **)&infl_req))) {
302                 rte_errno = ENOMEM;
303                 return 0;
304         }
305         infl_req->op_flags = 0;
306
307         lmt_base = qp->lmtline.lmt_base;
308         ROC_LMT_BASE_ID_GET(lmt_base, lmt_id);
309         inst = (struct cpt_inst_s *)lmt_base;
310
311         ret = cn10k_cpt_fill_inst(qp, &op, inst, infl_req);
312         if (unlikely(ret != 1)) {
313                 plt_dp_err("Could not process op: %p", op);
314                 rte_mempool_put(qp->ca.req_mp, infl_req);
315                 return 0;
316         }
317
318         infl_req->cop = op;
319         infl_req->res.cn10k.compcode = CPT_COMP_NOT_DONE;
320         infl_req->qp = qp;
321         inst->w0.u64 = 0;
322         inst->res_addr = (uint64_t)&infl_req->res;
323         inst->w2.u64 = CNXK_CPT_INST_W2(
324                 (RTE_EVENT_TYPE_CRYPTODEV << 28) | rsp_info->flow_id,
325                 rsp_info->sched_type, rsp_info->queue_id, 0);
326         inst->w3.u64 = CNXK_CPT_INST_W3(1, infl_req);
327
328         if (roc_cpt_is_iq_full(&qp->lf)) {
329                 rte_mempool_put(qp->ca.req_mp, infl_req);
330                 rte_errno = EAGAIN;
331                 return 0;
332         }
333
334         if (!rsp_info->sched_type)
335                 roc_sso_hws_head_wait(tag_op);
336
337         lmt_arg = ROC_CN10K_CPT_LMT_ARG | (uint64_t)lmt_id;
338         roc_lmt_submit_steorl(lmt_arg, qp->lmtline.io_addr);
339
340         rte_io_wmb();
341
342         return 1;
343 }
344
345 static inline void
346 cn10k_cpt_sec_post_process(struct rte_crypto_op *cop,
347                            struct cpt_cn10k_res_s *res)
348 {
349         struct rte_mbuf *m = cop->sym->m_src;
350         const uint16_t m_len = res->rlen;
351
352         m->data_len = m_len;
353         m->pkt_len = m_len;
354 }
355
356 static inline void
357 cn10k_cpt_sec_ucc_process(struct rte_crypto_op *cop,
358                           struct cpt_inflight_req *infl_req,
359                           const uint8_t uc_compcode)
360 {
361         struct cn10k_sec_session *sess;
362         struct cn10k_ipsec_sa *sa;
363         struct rte_mbuf *mbuf;
364
365         if (uc_compcode == ROC_IE_OT_UCC_SUCCESS_SA_SOFTEXP_FIRST)
366                 cop->aux_flags = RTE_CRYPTO_OP_AUX_FLAGS_IPSEC_SOFT_EXPIRY;
367
368         if (!(infl_req->op_flags & CPT_OP_FLAGS_IPSEC_DIR_INBOUND))
369                 return;
370
371         sess = get_sec_session_private_data(cop->sym->sec_session);
372         sa = &sess->sa;
373
374         mbuf = cop->sym->m_src;
375
376         switch (uc_compcode) {
377         case ROC_IE_OT_UCC_SUCCESS:
378                 if (sa->ip_csum_enable)
379                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
380                 break;
381         case ROC_IE_OT_UCC_SUCCESS_PKT_IP_BADCSUM:
382                 mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
383                 break;
384         case ROC_IE_OT_UCC_SUCCESS_PKT_L4_GOODCSUM:
385                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
386                 if (sa->ip_csum_enable)
387                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
388                 break;
389         case ROC_IE_OT_UCC_SUCCESS_PKT_L4_BADCSUM:
390                 mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
391                 if (sa->ip_csum_enable)
392                         mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
393                 break;
394         default:
395                 break;
396         }
397 }
398
399 static inline void
400 cn10k_cpt_dequeue_post_process(struct cnxk_cpt_qp *qp,
401                                struct rte_crypto_op *cop,
402                                struct cpt_inflight_req *infl_req,
403                                struct cpt_cn10k_res_s *res)
404 {
405         const uint8_t uc_compcode = res->uc_compcode;
406         const uint8_t compcode = res->compcode;
407         unsigned int sz;
408
409         cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
410
411         if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC &&
412             cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
413                 if (likely(compcode == CPT_COMP_WARN)) {
414                         /* Success with additional info */
415                         cn10k_cpt_sec_ucc_process(cop, infl_req, uc_compcode);
416                         cn10k_cpt_sec_post_process(cop, res);
417                 } else {
418                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
419                         plt_dp_info("HW completion code 0x%x", res->compcode);
420                         if (compcode == CPT_COMP_GOOD) {
421                                 plt_dp_info(
422                                         "Request failed with microcode error");
423                                 plt_dp_info("MC completion code 0x%x",
424                                             uc_compcode);
425                         }
426                 }
427
428                 return;
429         }
430
431         if (likely(compcode == CPT_COMP_GOOD || compcode == CPT_COMP_WARN)) {
432                 if (unlikely(uc_compcode)) {
433                         if (uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
434                                 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
435                         else
436                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
437
438                         plt_dp_info("Request failed with microcode error");
439                         plt_dp_info("MC completion code 0x%x",
440                                     res->uc_compcode);
441                         goto temp_sess_free;
442                 }
443
444                 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
445                         /* Verify authentication data if required */
446                         if (unlikely(infl_req->op_flags &
447                                      CPT_OP_FLAGS_AUTH_VERIFY)) {
448                                 uintptr_t *rsp = infl_req->mdata;
449                                 compl_auth_verify(cop, (uint8_t *)rsp[0],
450                                                   rsp[1]);
451                         }
452                 } else if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
453                         struct rte_crypto_asym_op *op = cop->asym;
454                         uintptr_t *mdata = infl_req->mdata;
455                         struct cnxk_ae_sess *sess;
456
457                         sess = get_asym_session_private_data(
458                                 op->session, cn10k_cryptodev_driver_id);
459
460                         cnxk_ae_post_process(cop, sess, (uint8_t *)mdata[0]);
461                 }
462         } else {
463                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
464                 plt_dp_info("HW completion code 0x%x", res->compcode);
465
466                 switch (compcode) {
467                 case CPT_COMP_INSTERR:
468                         plt_dp_err("Request failed with instruction error");
469                         break;
470                 case CPT_COMP_FAULT:
471                         plt_dp_err("Request failed with DMA fault");
472                         break;
473                 case CPT_COMP_HWERR:
474                         plt_dp_err("Request failed with hardware error");
475                         break;
476                 default:
477                         plt_dp_err(
478                                 "Request failed with unknown completion code");
479                 }
480         }
481
482 temp_sess_free:
483         if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
484                 if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
485                         sym_session_clear(cn10k_cryptodev_driver_id,
486                                           cop->sym->session);
487                         sz = rte_cryptodev_sym_get_existing_header_session_size(
488                                 cop->sym->session);
489                         memset(cop->sym->session, 0, sz);
490                         rte_mempool_put(qp->sess_mp, cop->sym->session);
491                         cop->sym->session = NULL;
492                 }
493         }
494 }
495
496 uintptr_t
497 cn10k_cpt_crypto_adapter_dequeue(uintptr_t get_work1)
498 {
499         struct cpt_inflight_req *infl_req;
500         struct rte_crypto_op *cop;
501         struct cnxk_cpt_qp *qp;
502         union cpt_res_s res;
503
504         infl_req = (struct cpt_inflight_req *)(get_work1);
505         cop = infl_req->cop;
506         qp = infl_req->qp;
507
508         res.u64[0] = __atomic_load_n(&infl_req->res.u64[0], __ATOMIC_RELAXED);
509
510         cn10k_cpt_dequeue_post_process(qp, infl_req->cop, infl_req, &res.cn10k);
511
512         if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
513                 rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
514
515         rte_mempool_put(qp->ca.req_mp, infl_req);
516         return (uintptr_t)cop;
517 }
518
519 static uint16_t
520 cn10k_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
521 {
522         struct cpt_inflight_req *infl_req;
523         struct cnxk_cpt_qp *qp = qptr;
524         struct pending_queue *pend_q;
525         uint64_t infl_cnt, pq_tail;
526         struct rte_crypto_op *cop;
527         union cpt_res_s res;
528         int i;
529
530         pend_q = &qp->pend_q;
531
532         const uint64_t pq_mask = pend_q->pq_mask;
533
534         pq_tail = pend_q->tail;
535         infl_cnt = pending_queue_infl_cnt(pend_q->head, pq_tail, pq_mask);
536         nb_ops = RTE_MIN(nb_ops, infl_cnt);
537
538         /* Ensure infl_cnt isn't read before data lands */
539         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
540
541         for (i = 0; i < nb_ops; i++) {
542                 infl_req = &pend_q->req_queue[pq_tail];
543
544                 res.u64[0] = __atomic_load_n(&infl_req->res.u64[0],
545                                              __ATOMIC_RELAXED);
546
547                 if (unlikely(res.cn10k.compcode == CPT_COMP_NOT_DONE)) {
548                         if (unlikely(rte_get_timer_cycles() >
549                                      pend_q->time_out)) {
550                                 plt_err("Request timed out");
551                                 pend_q->time_out = rte_get_timer_cycles() +
552                                                    DEFAULT_COMMAND_TIMEOUT *
553                                                            rte_get_timer_hz();
554                         }
555                         break;
556                 }
557
558                 pending_queue_advance(&pq_tail, pq_mask);
559
560                 cop = infl_req->cop;
561
562                 ops[i] = cop;
563
564                 cn10k_cpt_dequeue_post_process(qp, cop, infl_req, &res.cn10k);
565
566                 if (unlikely(infl_req->op_flags & CPT_OP_FLAGS_METABUF))
567                         rte_mempool_put(qp->meta_info.pool, infl_req->mdata);
568         }
569
570         pend_q->tail = pq_tail;
571
572         return i;
573 }
574
575 void
576 cn10k_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
577 {
578         dev->enqueue_burst = cn10k_cpt_enqueue_burst;
579         dev->dequeue_burst = cn10k_cpt_dequeue_burst;
580
581         rte_mb();
582 }
583
584 static void
585 cn10k_cpt_dev_info_get(struct rte_cryptodev *dev,
586                        struct rte_cryptodev_info *info)
587 {
588         if (info != NULL) {
589                 cnxk_cpt_dev_info_get(dev, info);
590                 info->driver_id = cn10k_cryptodev_driver_id;
591         }
592 }
593
594 struct rte_cryptodev_ops cn10k_cpt_ops = {
595         /* Device control ops */
596         .dev_configure = cnxk_cpt_dev_config,
597         .dev_start = cnxk_cpt_dev_start,
598         .dev_stop = cnxk_cpt_dev_stop,
599         .dev_close = cnxk_cpt_dev_close,
600         .dev_infos_get = cn10k_cpt_dev_info_get,
601
602         .stats_get = NULL,
603         .stats_reset = NULL,
604         .queue_pair_setup = cnxk_cpt_queue_pair_setup,
605         .queue_pair_release = cnxk_cpt_queue_pair_release,
606
607         /* Symmetric crypto ops */
608         .sym_session_get_size = cnxk_cpt_sym_session_get_size,
609         .sym_session_configure = cnxk_cpt_sym_session_configure,
610         .sym_session_clear = cnxk_cpt_sym_session_clear,
611
612         /* Asymmetric crypto ops */
613         .asym_session_get_size = cnxk_ae_session_size_get,
614         .asym_session_configure = cnxk_ae_session_cfg,
615         .asym_session_clear = cnxk_ae_session_clear,
616
617 };