regex/mlx5: add data path scattered mbuf process
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2019 Marvell International Ltd.
3  */
4
5 #include <unistd.h>
6
7 #include <rte_cryptodev_pmd.h>
8 #include <rte_errno.h>
9 #include <rte_ethdev.h>
10
11 #include "otx2_cryptodev.h"
12 #include "otx2_cryptodev_capabilities.h"
13 #include "otx2_cryptodev_hw_access.h"
14 #include "otx2_cryptodev_mbox.h"
15 #include "otx2_cryptodev_ops.h"
16 #include "otx2_cryptodev_ops_helper.h"
17 #include "otx2_ipsec_anti_replay.h"
18 #include "otx2_ipsec_po_ops.h"
19 #include "otx2_mbox.h"
20 #include "otx2_sec_idev.h"
21 #include "otx2_security.h"
22
23 #include "cpt_hw_types.h"
24 #include "cpt_pmd_logs.h"
25 #include "cpt_pmd_ops_helper.h"
26 #include "cpt_ucode.h"
27 #include "cpt_ucode_asym.h"
28
29 #define METABUF_POOL_CACHE_SIZE 512
30
31 static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
32
33 /* Forward declarations */
34
35 static int
36 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
37
38 static void
39 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
40 {
41         snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
42 }
43
44 static int
45 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
46                                 struct otx2_cpt_qp *qp, uint8_t qp_id,
47                                 int nb_elements)
48 {
49         char mempool_name[RTE_MEMPOOL_NAMESIZE];
50         struct cpt_qp_meta_info *meta_info;
51         struct rte_mempool *pool;
52         int ret, max_mlen;
53         int asym_mlen = 0;
54         int lb_mlen = 0;
55         int sg_mlen = 0;
56
57         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
58
59                 /* Get meta len for scatter gather mode */
60                 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
61
62                 /* Extra 32B saved for future considerations */
63                 sg_mlen += 4 * sizeof(uint64_t);
64
65                 /* Get meta len for linear buffer (direct) mode */
66                 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
67
68                 /* Extra 32B saved for future considerations */
69                 lb_mlen += 4 * sizeof(uint64_t);
70         }
71
72         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
73
74                 /* Get meta len required for asymmetric operations */
75                 asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
76         }
77
78         /*
79          * Check max requirement for meta buffer to
80          * support crypto op of any type (sym/asym).
81          */
82         max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
83
84         /* Allocate mempool */
85
86         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
87                  dev->data->dev_id, qp_id);
88
89         pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
90                                         METABUF_POOL_CACHE_SIZE, 0,
91                                         rte_socket_id(), 0);
92
93         if (pool == NULL) {
94                 CPT_LOG_ERR("Could not create mempool for metabuf");
95                 return rte_errno;
96         }
97
98         ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
99                                          NULL);
100         if (ret) {
101                 CPT_LOG_ERR("Could not set mempool ops");
102                 goto mempool_free;
103         }
104
105         ret = rte_mempool_populate_default(pool);
106         if (ret <= 0) {
107                 CPT_LOG_ERR("Could not populate metabuf pool");
108                 goto mempool_free;
109         }
110
111         meta_info = &qp->meta_info;
112
113         meta_info->pool = pool;
114         meta_info->lb_mlen = lb_mlen;
115         meta_info->sg_mlen = sg_mlen;
116
117         return 0;
118
119 mempool_free:
120         rte_mempool_free(pool);
121         return ret;
122 }
123
124 static void
125 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
126 {
127         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
128
129         rte_mempool_free(meta_info->pool);
130
131         meta_info->pool = NULL;
132         meta_info->lb_mlen = 0;
133         meta_info->sg_mlen = 0;
134 }
135
136 static int
137 otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
138 {
139         static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
140         uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
141         int i, ret;
142
143         for (i = 0; i < nb_ethport; i++) {
144                 port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
145                 if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
146                         break;
147         }
148
149         if (i >= nb_ethport)
150                 return 0;
151
152         ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
153         if (ret)
154                 return ret;
155
156         /* Publish inline Tx QP to eth dev security */
157         ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
158         if (ret)
159                 return ret;
160
161         return 0;
162 }
163
164 static struct otx2_cpt_qp *
165 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
166                    uint8_t group)
167 {
168         struct otx2_cpt_vf *vf = dev->data->dev_private;
169         uint64_t pg_sz = sysconf(_SC_PAGESIZE);
170         const struct rte_memzone *lf_mem;
171         uint32_t len, iq_len, size_div40;
172         char name[RTE_MEMZONE_NAMESIZE];
173         uint64_t used_len, iova;
174         struct otx2_cpt_qp *qp;
175         uint64_t lmtline;
176         uint8_t *va;
177         int ret;
178
179         /* Allocate queue pair */
180         qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
181                                 OTX2_ALIGN, 0);
182         if (qp == NULL) {
183                 CPT_LOG_ERR("Could not allocate queue pair");
184                 return NULL;
185         }
186
187         iq_len = OTX2_CPT_IQ_LEN;
188
189         /*
190          * Queue size must be a multiple of 40 and effective queue size to
191          * software is (size_div40 - 1) * 40
192          */
193         size_div40 = (iq_len + 40 - 1) / 40 + 1;
194
195         /* For pending queue */
196         len = iq_len * sizeof(uintptr_t);
197
198         /* Space for instruction group memory */
199         len += size_div40 * 16;
200
201         /* So that instruction queues start as pg size aligned */
202         len = RTE_ALIGN(len, pg_sz);
203
204         /* For instruction queues */
205         len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
206
207         /* Wastage after instruction queues */
208         len = RTE_ALIGN(len, pg_sz);
209
210         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
211                             qp_id);
212
213         lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
214                         RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
215                         RTE_CACHE_LINE_SIZE);
216         if (lf_mem == NULL) {
217                 CPT_LOG_ERR("Could not allocate reserved memzone");
218                 goto qp_free;
219         }
220
221         va = lf_mem->addr;
222         iova = lf_mem->iova;
223
224         memset(va, 0, len);
225
226         ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
227         if (ret) {
228                 CPT_LOG_ERR("Could not create mempool for metabuf");
229                 goto lf_mem_free;
230         }
231
232         /* Initialize pending queue */
233         qp->pend_q.req_queue = (uintptr_t *)va;
234         qp->pend_q.enq_tail = 0;
235         qp->pend_q.deq_head = 0;
236         qp->pend_q.pending_count = 0;
237
238         used_len = iq_len * sizeof(uintptr_t);
239         used_len += size_div40 * 16;
240         used_len = RTE_ALIGN(used_len, pg_sz);
241         iova += used_len;
242
243         qp->iq_dma_addr = iova;
244         qp->id = qp_id;
245         qp->blkaddr = vf->lf_blkaddr[qp_id];
246         qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
247
248         lmtline = vf->otx2_dev.bar2 +
249                   (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
250                   OTX2_LMT_LF_LMTLINE(0);
251
252         qp->lmtline = (void *)lmtline;
253
254         qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
255
256         ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
257         if (ret && (ret != -ENOENT)) {
258                 CPT_LOG_ERR("Could not delete inline configuration");
259                 goto mempool_destroy;
260         }
261
262         otx2_cpt_iq_disable(qp);
263
264         ret = otx2_cpt_qp_inline_cfg(dev, qp);
265         if (ret) {
266                 CPT_LOG_ERR("Could not configure queue for inline IPsec");
267                 goto mempool_destroy;
268         }
269
270         ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
271                                  size_div40);
272         if (ret) {
273                 CPT_LOG_ERR("Could not enable instruction queue");
274                 goto mempool_destroy;
275         }
276
277         return qp;
278
279 mempool_destroy:
280         otx2_cpt_metabuf_mempool_destroy(qp);
281 lf_mem_free:
282         rte_memzone_free(lf_mem);
283 qp_free:
284         rte_free(qp);
285         return NULL;
286 }
287
288 static int
289 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
290 {
291         const struct rte_memzone *lf_mem;
292         char name[RTE_MEMZONE_NAMESIZE];
293         int ret;
294
295         ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
296         if (ret && (ret != -ENOENT)) {
297                 CPT_LOG_ERR("Could not delete inline configuration");
298                 return ret;
299         }
300
301         otx2_cpt_iq_disable(qp);
302
303         otx2_cpt_metabuf_mempool_destroy(qp);
304
305         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
306                             qp->id);
307
308         lf_mem = rte_memzone_lookup(name);
309
310         ret = rte_memzone_free(lf_mem);
311         if (ret)
312                 return ret;
313
314         rte_free(qp);
315
316         return 0;
317 }
318
319 static int
320 sym_xform_verify(struct rte_crypto_sym_xform *xform)
321 {
322         if (xform->next) {
323                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
324                     xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
325                     xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
326                         return -ENOTSUP;
327
328                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
329                     xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
330                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH)
331                         return -ENOTSUP;
332
333                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
334                     xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
335                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
336                     xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
337                         return -ENOTSUP;
338
339                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
340                     xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
341                     xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
342                     xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
343                         return -ENOTSUP;
344
345         } else {
346                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
347                     xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
348                     xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
349                         return -ENOTSUP;
350         }
351         return 0;
352 }
353
354 static int
355 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
356                       struct rte_cryptodev_sym_session *sess,
357                       struct rte_mempool *pool)
358 {
359         struct rte_crypto_sym_xform *temp_xform = xform;
360         struct cpt_sess_misc *misc;
361         vq_cmd_word3_t vq_cmd_w3;
362         void *priv;
363         int ret;
364
365         ret = sym_xform_verify(xform);
366         if (unlikely(ret))
367                 return ret;
368
369         if (unlikely(rte_mempool_get(pool, &priv))) {
370                 CPT_LOG_ERR("Could not allocate session private data");
371                 return -ENOMEM;
372         }
373
374         memset(priv, 0, sizeof(struct cpt_sess_misc) +
375                         offsetof(struct cpt_ctx, mc_ctx));
376
377         misc = priv;
378
379         for ( ; xform != NULL; xform = xform->next) {
380                 switch (xform->type) {
381                 case RTE_CRYPTO_SYM_XFORM_AEAD:
382                         ret = fill_sess_aead(xform, misc);
383                         break;
384                 case RTE_CRYPTO_SYM_XFORM_CIPHER:
385                         ret = fill_sess_cipher(xform, misc);
386                         break;
387                 case RTE_CRYPTO_SYM_XFORM_AUTH:
388                         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
389                                 ret = fill_sess_gmac(xform, misc);
390                         else
391                                 ret = fill_sess_auth(xform, misc);
392                         break;
393                 default:
394                         ret = -1;
395                 }
396
397                 if (ret)
398                         goto priv_put;
399         }
400
401         if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
402                         cpt_mac_len_verify(&temp_xform->auth)) {
403                 CPT_LOG_ERR("MAC length is not supported");
404                 ret = -ENOTSUP;
405                 goto priv_put;
406         }
407
408         set_sym_session_private_data(sess, driver_id, misc);
409
410         misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
411                              sizeof(struct cpt_sess_misc);
412
413         vq_cmd_w3.u64 = 0;
414         vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
415                                                          mc_ctx);
416
417         /*
418          * IE engines support IPsec operations
419          * SE engines support IPsec operations, Chacha-Poly and
420          * Air-Crypto operations
421          */
422         if (misc->zsk_flag || misc->chacha_poly)
423                 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
424         else
425                 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
426
427         misc->cpt_inst_w7 = vq_cmd_w3.u64;
428
429         return 0;
430
431 priv_put:
432         rte_mempool_put(pool, priv);
433
434         return -ENOTSUP;
435 }
436
437 static __rte_always_inline void __rte_hot
438 otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
439                     struct cpt_request_info *req,
440                     void *lmtline,
441                     uint64_t cpt_inst_w7)
442 {
443         union cpt_inst_s inst;
444         uint64_t lmt_status;
445
446         inst.u[0] = 0;
447         inst.s9x.res_addr = req->comp_baddr;
448         inst.u[2] = 0;
449         inst.u[3] = 0;
450
451         inst.s9x.ei0 = req->ist.ei0;
452         inst.s9x.ei1 = req->ist.ei1;
453         inst.s9x.ei2 = req->ist.ei2;
454         inst.s9x.ei3 = cpt_inst_w7;
455
456         inst.s9x.qord = 1;
457         inst.s9x.grp = qp->ev.queue_id;
458         inst.s9x.tt = qp->ev.sched_type;
459         inst.s9x.tag = (RTE_EVENT_TYPE_CRYPTODEV << 28) |
460                         qp->ev.flow_id;
461         inst.s9x.wq_ptr = (uint64_t)req >> 3;
462         req->qp = qp;
463
464         do {
465                 /* Copy CPT command to LMTLINE */
466                 memcpy(lmtline, &inst, sizeof(inst));
467
468                 /*
469                  * Make sure compiler does not reorder memcpy and ldeor.
470                  * LMTST transactions are always flushed from the write
471                  * buffer immediately, a DMB is not required to push out
472                  * LMTSTs.
473                  */
474                 rte_io_wmb();
475                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
476         } while (lmt_status == 0);
477
478 }
479
480 static __rte_always_inline int32_t __rte_hot
481 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
482                      struct pending_queue *pend_q,
483                      struct cpt_request_info *req,
484                      uint64_t cpt_inst_w7)
485 {
486         void *lmtline = qp->lmtline;
487         union cpt_inst_s inst;
488         uint64_t lmt_status;
489
490         if (qp->ca_enable) {
491                 otx2_ca_enqueue_req(qp, req, lmtline, cpt_inst_w7);
492                 return 0;
493         }
494
495         if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
496                 return -EAGAIN;
497
498         inst.u[0] = 0;
499         inst.s9x.res_addr = req->comp_baddr;
500         inst.u[2] = 0;
501         inst.u[3] = 0;
502
503         inst.s9x.ei0 = req->ist.ei0;
504         inst.s9x.ei1 = req->ist.ei1;
505         inst.s9x.ei2 = req->ist.ei2;
506         inst.s9x.ei3 = cpt_inst_w7;
507
508         req->time_out = rte_get_timer_cycles() +
509                         DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
510
511         do {
512                 /* Copy CPT command to LMTLINE */
513                 memcpy(lmtline, &inst, sizeof(inst));
514
515                 /*
516                  * Make sure compiler does not reorder memcpy and ldeor.
517                  * LMTST transactions are always flushed from the write
518                  * buffer immediately, a DMB is not required to push out
519                  * LMTSTs.
520                  */
521                 rte_io_wmb();
522                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
523         } while (lmt_status == 0);
524
525         pend_q->req_queue[pend_q->enq_tail] = (uintptr_t)req;
526
527         /* We will use soft queue length here to limit requests */
528         MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
529         pend_q->pending_count += 1;
530
531         return 0;
532 }
533
534 static __rte_always_inline int32_t __rte_hot
535 otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
536                       struct rte_crypto_op *op,
537                       struct pending_queue *pend_q)
538 {
539         struct cpt_qp_meta_info *minfo = &qp->meta_info;
540         struct rte_crypto_asym_op *asym_op = op->asym;
541         struct asym_op_params params = {0};
542         struct cpt_asym_sess_misc *sess;
543         uintptr_t *cop;
544         void *mdata;
545         int ret;
546
547         if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
548                 CPT_LOG_ERR("Could not allocate meta buffer for request");
549                 return -ENOMEM;
550         }
551
552         sess = get_asym_session_private_data(asym_op->session,
553                                              otx2_cryptodev_driver_id);
554
555         /* Store IO address of the mdata to meta_buf */
556         params.meta_buf = rte_mempool_virt2iova(mdata);
557
558         cop = mdata;
559         cop[0] = (uintptr_t)mdata;
560         cop[1] = (uintptr_t)op;
561         cop[2] = cop[3] = 0ULL;
562
563         params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
564         params.req->op = cop;
565
566         /* Adjust meta_buf to point to end of cpt_request_info structure */
567         params.meta_buf += (4 * sizeof(uintptr_t)) +
568                             sizeof(struct cpt_request_info);
569         switch (sess->xfrm_type) {
570         case RTE_CRYPTO_ASYM_XFORM_MODEX:
571                 ret = cpt_modex_prep(&params, &sess->mod_ctx);
572                 if (unlikely(ret))
573                         goto req_fail;
574                 break;
575         case RTE_CRYPTO_ASYM_XFORM_RSA:
576                 ret = cpt_enqueue_rsa_op(op, &params, sess);
577                 if (unlikely(ret))
578                         goto req_fail;
579                 break;
580         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
581                 ret = cpt_enqueue_ecdsa_op(op, &params, sess, otx2_fpm_iova);
582                 if (unlikely(ret))
583                         goto req_fail;
584                 break;
585         case RTE_CRYPTO_ASYM_XFORM_ECPM:
586                 ret = cpt_ecpm_prep(&asym_op->ecpm, &params,
587                                     sess->ec_ctx.curveid);
588                 if (unlikely(ret))
589                         goto req_fail;
590                 break;
591         default:
592                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
593                 ret = -EINVAL;
594                 goto req_fail;
595         }
596
597         ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, sess->cpt_inst_w7);
598
599         if (unlikely(ret)) {
600                 CPT_LOG_DP_ERR("Could not enqueue crypto req");
601                 goto req_fail;
602         }
603
604         return 0;
605
606 req_fail:
607         free_op_meta(mdata, minfo->pool);
608
609         return ret;
610 }
611
612 static __rte_always_inline int __rte_hot
613 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
614                      struct pending_queue *pend_q)
615 {
616         struct rte_crypto_sym_op *sym_op = op->sym;
617         struct cpt_request_info *req;
618         struct cpt_sess_misc *sess;
619         uint64_t cpt_op;
620         void *mdata;
621         int ret;
622
623         sess = get_sym_session_private_data(sym_op->session,
624                                             otx2_cryptodev_driver_id);
625
626         cpt_op = sess->cpt_op;
627
628         if (cpt_op & CPT_OP_CIPHER_MASK)
629                 ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
630                                      (void **)&req);
631         else
632                 ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
633                                          (void **)&req);
634
635         if (unlikely(ret)) {
636                 CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
637                                 op, (unsigned int)cpt_op, ret);
638                 return ret;
639         }
640
641         ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
642
643         if (unlikely(ret)) {
644                 /* Free buffer allocated by fill params routines */
645                 free_op_meta(mdata, qp->meta_info.pool);
646         }
647
648         return ret;
649 }
650
651 static __rte_always_inline int __rte_hot
652 otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
653                      struct pending_queue *pend_q)
654 {
655         uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
656         struct rte_mbuf *m_src = op->sym->m_src;
657         struct otx2_sec_session_ipsec_lp *sess;
658         struct otx2_ipsec_po_sa_ctl *ctl_wrd;
659         struct otx2_ipsec_po_in_sa *sa;
660         struct otx2_sec_session *priv;
661         struct cpt_request_info *req;
662         uint64_t seq_in_sa, seq = 0;
663         uint8_t esn;
664         int ret;
665
666         priv = get_sec_session_private_data(op->sym->sec_session);
667         sess = &priv->ipsec.lp;
668         sa = &sess->in_sa;
669
670         ctl_wrd = &sa->ctl;
671         esn = ctl_wrd->esn_en;
672         winsz = sa->replay_win_sz;
673
674         if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
675                 ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
676         else {
677                 if (winsz) {
678                         esn_low = rte_be_to_cpu_32(sa->esn_low);
679                         esn_hi = rte_be_to_cpu_32(sa->esn_hi);
680                         seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
681                                 sizeof(struct rte_ipv4_hdr) + 4);
682                         seql = rte_be_to_cpu_32(seql);
683
684                         if (!esn)
685                                 seq = (uint64_t)seql;
686                         else {
687                                 seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
688                                                 esn_low);
689                                 seq = ((uint64_t)seqh << 32) | seql;
690                         }
691
692                         if (unlikely(seq == 0))
693                                 return IPSEC_ANTI_REPLAY_FAILED;
694
695                         ret = anti_replay_check(sa->replay, seq, winsz);
696                         if (unlikely(ret)) {
697                                 otx2_err("Anti replay check failed");
698                                 return IPSEC_ANTI_REPLAY_FAILED;
699                         }
700                 }
701
702                 ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
703         }
704
705         if (unlikely(ret)) {
706                 otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
707                 return ret;
708         }
709
710         ret = otx2_cpt_enqueue_req(qp, pend_q, req, sess->cpt_inst_w7);
711
712         if (winsz && esn) {
713                 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
714                 if (seq > seq_in_sa) {
715                         sa->esn_low = rte_cpu_to_be_32(seql);
716                         sa->esn_hi = rte_cpu_to_be_32(seqh);
717                 }
718         }
719
720         return ret;
721 }
722
723 static __rte_always_inline int __rte_hot
724 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
725                               struct pending_queue *pend_q)
726 {
727         const int driver_id = otx2_cryptodev_driver_id;
728         struct rte_crypto_sym_op *sym_op = op->sym;
729         struct rte_cryptodev_sym_session *sess;
730         int ret;
731
732         /* Create temporary session */
733         sess = rte_cryptodev_sym_session_create(qp->sess_mp);
734         if (sess == NULL)
735                 return -ENOMEM;
736
737         ret = sym_session_configure(driver_id, sym_op->xform, sess,
738                                     qp->sess_mp_priv);
739         if (ret)
740                 goto sess_put;
741
742         sym_op->session = sess;
743
744         ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
745
746         if (unlikely(ret))
747                 goto priv_put;
748
749         return 0;
750
751 priv_put:
752         sym_session_clear(driver_id, sess);
753 sess_put:
754         rte_mempool_put(qp->sess_mp, sess);
755         return ret;
756 }
757
758 static uint16_t
759 otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
760 {
761         uint16_t nb_allowed, count = 0;
762         struct otx2_cpt_qp *qp = qptr;
763         struct pending_queue *pend_q;
764         struct rte_crypto_op *op;
765         int ret;
766
767         pend_q = &qp->pend_q;
768
769         nb_allowed = OTX2_CPT_DEFAULT_CMD_QLEN - pend_q->pending_count;
770         if (nb_ops > nb_allowed)
771                 nb_ops = nb_allowed;
772
773         for (count = 0; count < nb_ops; count++) {
774                 op = ops[count];
775                 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
776                         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
777                                 ret = otx2_cpt_enqueue_sec(qp, op, pend_q);
778                         else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
779                                 ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
780                         else
781                                 ret = otx2_cpt_enqueue_sym_sessless(qp, op,
782                                                                     pend_q);
783                 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
784                         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
785                                 ret = otx2_cpt_enqueue_asym(qp, op, pend_q);
786                         else
787                                 break;
788                 } else
789                         break;
790
791                 if (unlikely(ret))
792                         break;
793         }
794
795         return count;
796 }
797
798 static __rte_always_inline void
799 otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
800                      struct rte_crypto_rsa_xform *rsa_ctx)
801 {
802         struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
803
804         switch (rsa->op_type) {
805         case RTE_CRYPTO_ASYM_OP_ENCRYPT:
806                 rsa->cipher.length = rsa_ctx->n.length;
807                 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
808                 break;
809         case RTE_CRYPTO_ASYM_OP_DECRYPT:
810                 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
811                         rsa->message.length = rsa_ctx->n.length;
812                         memcpy(rsa->message.data, req->rptr,
813                                rsa->message.length);
814                 } else {
815                         /* Get length of decrypted output */
816                         rsa->message.length = rte_cpu_to_be_16
817                                              (*((uint16_t *)req->rptr));
818                         /*
819                          * Offset output data pointer by length field
820                          * (2 bytes) and copy decrypted data.
821                          */
822                         memcpy(rsa->message.data, req->rptr + 2,
823                                rsa->message.length);
824                 }
825                 break;
826         case RTE_CRYPTO_ASYM_OP_SIGN:
827                 rsa->sign.length = rsa_ctx->n.length;
828                 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
829                 break;
830         case RTE_CRYPTO_ASYM_OP_VERIFY:
831                 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
832                         rsa->sign.length = rsa_ctx->n.length;
833                         memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
834                 } else {
835                         /* Get length of signed output */
836                         rsa->sign.length = rte_cpu_to_be_16
837                                           (*((uint16_t *)req->rptr));
838                         /*
839                          * Offset output data pointer by length field
840                          * (2 bytes) and copy signed data.
841                          */
842                         memcpy(rsa->sign.data, req->rptr + 2,
843                                rsa->sign.length);
844                 }
845                 if (memcmp(rsa->sign.data, rsa->message.data,
846                            rsa->message.length)) {
847                         CPT_LOG_DP_ERR("RSA verification failed");
848                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
849                 }
850                 break;
851         default:
852                 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
853                 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
854                 break;
855         }
856 }
857
858 static __rte_always_inline void
859 otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
860                                struct cpt_request_info *req,
861                                struct cpt_asym_ec_ctx *ec)
862 {
863         int prime_len = ec_grp[ec->curveid].prime.length;
864
865         if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
866                 return;
867
868         /* Separate out sign r and s components */
869         memcpy(ecdsa->r.data, req->rptr, prime_len);
870         memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
871                prime_len);
872         ecdsa->r.length = prime_len;
873         ecdsa->s.length = prime_len;
874 }
875
876 static __rte_always_inline void
877 otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
878                              struct cpt_request_info *req,
879                              struct cpt_asym_ec_ctx *ec)
880 {
881         int prime_len = ec_grp[ec->curveid].prime.length;
882
883         memcpy(ecpm->r.x.data, req->rptr, prime_len);
884         memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
885                prime_len);
886         ecpm->r.x.length = prime_len;
887         ecpm->r.y.length = prime_len;
888 }
889
890 static void
891 otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
892                            struct cpt_request_info *req)
893 {
894         struct rte_crypto_asym_op *op = cop->asym;
895         struct cpt_asym_sess_misc *sess;
896
897         sess = get_asym_session_private_data(op->session,
898                                              otx2_cryptodev_driver_id);
899
900         switch (sess->xfrm_type) {
901         case RTE_CRYPTO_ASYM_XFORM_RSA:
902                 otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
903                 break;
904         case RTE_CRYPTO_ASYM_XFORM_MODEX:
905                 op->modex.result.length = sess->mod_ctx.modulus.length;
906                 memcpy(op->modex.result.data, req->rptr,
907                        op->modex.result.length);
908                 break;
909         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
910                 otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
911                 break;
912         case RTE_CRYPTO_ASYM_XFORM_ECPM:
913                 otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
914                 break;
915         default:
916                 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
917                 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
918                 break;
919         }
920 }
921
922 static void
923 otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
924 {
925         struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
926         vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
927         struct rte_crypto_sym_op *sym_op = cop->sym;
928         struct rte_mbuf *m = sym_op->m_src;
929         struct rte_ipv6_hdr *ip6;
930         struct rte_ipv4_hdr *ip;
931         uint16_t m_len;
932         int mdata_len;
933         char *data;
934
935         mdata_len = (int)rsp[3];
936         rte_pktmbuf_trim(m, mdata_len);
937
938         if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
939                 data = rte_pktmbuf_mtod(m, char *);
940
941                 if (rsp[4] == RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
942                         ip = (struct rte_ipv4_hdr *)(data +
943                                 OTX2_IPSEC_PO_INB_RPTR_HDR);
944                         m_len = rte_be_to_cpu_16(ip->total_length);
945                 } else {
946                         ip6 = (struct rte_ipv6_hdr *)(data +
947                                 OTX2_IPSEC_PO_INB_RPTR_HDR);
948                         m_len = rte_be_to_cpu_16(ip6->payload_len) +
949                                 sizeof(struct rte_ipv6_hdr);
950                 }
951
952                 m->data_len = m_len;
953                 m->pkt_len = m_len;
954                 m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
955         }
956 }
957
958 static inline void
959 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
960                               uintptr_t *rsp, uint8_t cc)
961 {
962         unsigned int sz;
963
964         if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
965                 if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
966                         if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
967                                 otx2_cpt_sec_post_process(cop, rsp);
968                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
969                         } else
970                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
971
972                         return;
973                 }
974
975                 if (likely(cc == NO_ERR)) {
976                         /* Verify authentication data if required */
977                         if (unlikely(rsp[2]))
978                                 compl_auth_verify(cop, (uint8_t *)rsp[2],
979                                                  rsp[3]);
980                         else
981                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
982                 } else {
983                         if (cc == ERR_GC_ICV_MISCOMPARE)
984                                 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
985                         else
986                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
987                 }
988
989                 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
990                         sym_session_clear(otx2_cryptodev_driver_id,
991                                           cop->sym->session);
992                         sz = rte_cryptodev_sym_get_existing_header_session_size(
993                                         cop->sym->session);
994                         memset(cop->sym->session, 0, sz);
995                         rte_mempool_put(qp->sess_mp, cop->sym->session);
996                         cop->sym->session = NULL;
997                 }
998         }
999
1000         if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1001                 if (likely(cc == NO_ERR)) {
1002                         cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1003                         /*
1004                          * Pass cpt_req_info stored in metabuf during
1005                          * enqueue.
1006                          */
1007                         rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
1008                         otx2_cpt_asym_post_process(cop,
1009                                         (struct cpt_request_info *)rsp);
1010                 } else
1011                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1012         }
1013 }
1014
1015 static uint16_t
1016 otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
1017 {
1018         int i, nb_pending, nb_completed;
1019         struct otx2_cpt_qp *qp = qptr;
1020         struct pending_queue *pend_q;
1021         struct cpt_request_info *req;
1022         struct rte_crypto_op *cop;
1023         uint8_t cc[nb_ops];
1024         uintptr_t *rsp;
1025         void *metabuf;
1026
1027         pend_q = &qp->pend_q;
1028
1029         nb_pending = pend_q->pending_count;
1030
1031         if (nb_ops > nb_pending)
1032                 nb_ops = nb_pending;
1033
1034         for (i = 0; i < nb_ops; i++) {
1035                 req = (struct cpt_request_info *)
1036                                 pend_q->req_queue[pend_q->deq_head];
1037
1038                 cc[i] = otx2_cpt_compcode_get(req);
1039
1040                 if (unlikely(cc[i] == ERR_REQ_PENDING))
1041                         break;
1042
1043                 ops[i] = req->op;
1044
1045                 MOD_INC(pend_q->deq_head, OTX2_CPT_DEFAULT_CMD_QLEN);
1046                 pend_q->pending_count -= 1;
1047         }
1048
1049         nb_completed = i;
1050
1051         for (i = 0; i < nb_completed; i++) {
1052                 rsp = (void *)ops[i];
1053
1054                 metabuf = (void *)rsp[0];
1055                 cop = (void *)rsp[1];
1056
1057                 ops[i] = cop;
1058
1059                 otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
1060
1061                 free_op_meta(metabuf, qp->meta_info.pool);
1062         }
1063
1064         return nb_completed;
1065 }
1066
1067 void
1068 otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
1069 {
1070         dev->enqueue_burst = otx2_cpt_enqueue_burst;
1071         dev->dequeue_burst = otx2_cpt_dequeue_burst;
1072
1073         rte_mb();
1074 }
1075
1076 /* PMD ops */
1077
1078 static int
1079 otx2_cpt_dev_config(struct rte_cryptodev *dev,
1080                     struct rte_cryptodev_config *conf)
1081 {
1082         struct otx2_cpt_vf *vf = dev->data->dev_private;
1083         int ret;
1084
1085         if (conf->nb_queue_pairs > vf->max_queues) {
1086                 CPT_LOG_ERR("Invalid number of queue pairs requested");
1087                 return -EINVAL;
1088         }
1089
1090         dev->feature_flags &= ~conf->ff_disable;
1091
1092         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
1093                 /* Initialize shared FPM table */
1094                 ret = cpt_fpm_init(otx2_fpm_iova);
1095                 if (ret)
1096                         return ret;
1097         }
1098
1099         /* Unregister error interrupts */
1100         if (vf->err_intr_registered)
1101                 otx2_cpt_err_intr_unregister(dev);
1102
1103         /* Detach queues */
1104         if (vf->nb_queues) {
1105                 ret = otx2_cpt_queues_detach(dev);
1106                 if (ret) {
1107                         CPT_LOG_ERR("Could not detach CPT queues");
1108                         return ret;
1109                 }
1110         }
1111
1112         /* Attach queues */
1113         ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
1114         if (ret) {
1115                 CPT_LOG_ERR("Could not attach CPT queues");
1116                 return -ENODEV;
1117         }
1118
1119         ret = otx2_cpt_msix_offsets_get(dev);
1120         if (ret) {
1121                 CPT_LOG_ERR("Could not get MSI-X offsets");
1122                 goto queues_detach;
1123         }
1124
1125         /* Register error interrupts */
1126         ret = otx2_cpt_err_intr_register(dev);
1127         if (ret) {
1128                 CPT_LOG_ERR("Could not register error interrupts");
1129                 goto queues_detach;
1130         }
1131
1132         ret = otx2_cpt_inline_init(dev);
1133         if (ret) {
1134                 CPT_LOG_ERR("Could not enable inline IPsec");
1135                 goto intr_unregister;
1136         }
1137
1138         otx2_cpt_set_enqdeq_fns(dev);
1139
1140         return 0;
1141
1142 intr_unregister:
1143         otx2_cpt_err_intr_unregister(dev);
1144 queues_detach:
1145         otx2_cpt_queues_detach(dev);
1146         return ret;
1147 }
1148
1149 static int
1150 otx2_cpt_dev_start(struct rte_cryptodev *dev)
1151 {
1152         RTE_SET_USED(dev);
1153
1154         CPT_PMD_INIT_FUNC_TRACE();
1155
1156         return 0;
1157 }
1158
1159 static void
1160 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
1161 {
1162         CPT_PMD_INIT_FUNC_TRACE();
1163
1164         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
1165                 cpt_fpm_clear();
1166 }
1167
1168 static int
1169 otx2_cpt_dev_close(struct rte_cryptodev *dev)
1170 {
1171         struct otx2_cpt_vf *vf = dev->data->dev_private;
1172         int i, ret = 0;
1173
1174         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1175                 ret = otx2_cpt_queue_pair_release(dev, i);
1176                 if (ret)
1177                         return ret;
1178         }
1179
1180         /* Unregister error interrupts */
1181         if (vf->err_intr_registered)
1182                 otx2_cpt_err_intr_unregister(dev);
1183
1184         /* Detach queues */
1185         if (vf->nb_queues) {
1186                 ret = otx2_cpt_queues_detach(dev);
1187                 if (ret)
1188                         CPT_LOG_ERR("Could not detach CPT queues");
1189         }
1190
1191         return ret;
1192 }
1193
1194 static void
1195 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
1196                       struct rte_cryptodev_info *info)
1197 {
1198         struct otx2_cpt_vf *vf = dev->data->dev_private;
1199
1200         if (info != NULL) {
1201                 info->max_nb_queue_pairs = vf->max_queues;
1202                 info->feature_flags = dev->feature_flags;
1203                 info->capabilities = otx2_cpt_capabilities_get();
1204                 info->sym.max_nb_sessions = 0;
1205                 info->driver_id = otx2_cryptodev_driver_id;
1206                 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
1207                 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
1208         }
1209 }
1210
1211 static int
1212 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1213                           const struct rte_cryptodev_qp_conf *conf,
1214                           int socket_id __rte_unused)
1215 {
1216         uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
1217         struct rte_pci_device *pci_dev;
1218         struct otx2_cpt_qp *qp;
1219
1220         CPT_PMD_INIT_FUNC_TRACE();
1221
1222         if (dev->data->queue_pairs[qp_id] != NULL)
1223                 otx2_cpt_queue_pair_release(dev, qp_id);
1224
1225         if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
1226                 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
1227                             conf->nb_descriptors);
1228                 return -EINVAL;
1229         }
1230
1231         pci_dev = RTE_DEV_TO_PCI(dev->device);
1232
1233         if (pci_dev->mem_resource[2].addr == NULL) {
1234                 CPT_LOG_ERR("Invalid PCI mem address");
1235                 return -EIO;
1236         }
1237
1238         qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
1239         if (qp == NULL) {
1240                 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
1241                 return -ENOMEM;
1242         }
1243
1244         qp->sess_mp = conf->mp_session;
1245         qp->sess_mp_priv = conf->mp_session_private;
1246         dev->data->queue_pairs[qp_id] = qp;
1247
1248         return 0;
1249 }
1250
1251 static int
1252 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
1253 {
1254         struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
1255         int ret;
1256
1257         CPT_PMD_INIT_FUNC_TRACE();
1258
1259         if (qp == NULL)
1260                 return -EINVAL;
1261
1262         CPT_LOG_INFO("Releasing queue pair %d", qp_id);
1263
1264         ret = otx2_cpt_qp_destroy(dev, qp);
1265         if (ret) {
1266                 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
1267                 return ret;
1268         }
1269
1270         dev->data->queue_pairs[qp_id] = NULL;
1271
1272         return 0;
1273 }
1274
1275 static unsigned int
1276 otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1277 {
1278         return cpt_get_session_size();
1279 }
1280
1281 static int
1282 otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
1283                                struct rte_crypto_sym_xform *xform,
1284                                struct rte_cryptodev_sym_session *sess,
1285                                struct rte_mempool *pool)
1286 {
1287         CPT_PMD_INIT_FUNC_TRACE();
1288
1289         return sym_session_configure(dev->driver_id, xform, sess, pool);
1290 }
1291
1292 static void
1293 otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
1294                            struct rte_cryptodev_sym_session *sess)
1295 {
1296         CPT_PMD_INIT_FUNC_TRACE();
1297
1298         return sym_session_clear(dev->driver_id, sess);
1299 }
1300
1301 static unsigned int
1302 otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
1303 {
1304         return sizeof(struct cpt_asym_sess_misc);
1305 }
1306
1307 static int
1308 otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
1309                           struct rte_crypto_asym_xform *xform,
1310                           struct rte_cryptodev_asym_session *sess,
1311                           struct rte_mempool *pool)
1312 {
1313         struct cpt_asym_sess_misc *priv;
1314         vq_cmd_word3_t vq_cmd_w3;
1315         int ret;
1316
1317         CPT_PMD_INIT_FUNC_TRACE();
1318
1319         if (rte_mempool_get(pool, (void **)&priv)) {
1320                 CPT_LOG_ERR("Could not allocate session_private_data");
1321                 return -ENOMEM;
1322         }
1323
1324         memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
1325
1326         ret = cpt_fill_asym_session_parameters(priv, xform);
1327         if (ret) {
1328                 CPT_LOG_ERR("Could not configure session parameters");
1329
1330                 /* Return session to mempool */
1331                 rte_mempool_put(pool, priv);
1332                 return ret;
1333         }
1334
1335         vq_cmd_w3.u64 = 0;
1336         vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
1337         priv->cpt_inst_w7 = vq_cmd_w3.u64;
1338
1339         set_asym_session_private_data(sess, dev->driver_id, priv);
1340
1341         return 0;
1342 }
1343
1344 static void
1345 otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
1346                             struct rte_cryptodev_asym_session *sess)
1347 {
1348         struct cpt_asym_sess_misc *priv;
1349         struct rte_mempool *sess_mp;
1350
1351         CPT_PMD_INIT_FUNC_TRACE();
1352
1353         priv = get_asym_session_private_data(sess, dev->driver_id);
1354         if (priv == NULL)
1355                 return;
1356
1357         /* Free resources allocated in session_cfg */
1358         cpt_free_asym_session_parameters(priv);
1359
1360         /* Reset and free object back to pool */
1361         memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
1362         sess_mp = rte_mempool_from_obj(priv);
1363         set_asym_session_private_data(sess, dev->driver_id, NULL);
1364         rte_mempool_put(sess_mp, priv);
1365 }
1366
1367 struct rte_cryptodev_ops otx2_cpt_ops = {
1368         /* Device control ops */
1369         .dev_configure = otx2_cpt_dev_config,
1370         .dev_start = otx2_cpt_dev_start,
1371         .dev_stop = otx2_cpt_dev_stop,
1372         .dev_close = otx2_cpt_dev_close,
1373         .dev_infos_get = otx2_cpt_dev_info_get,
1374
1375         .stats_get = NULL,
1376         .stats_reset = NULL,
1377         .queue_pair_setup = otx2_cpt_queue_pair_setup,
1378         .queue_pair_release = otx2_cpt_queue_pair_release,
1379
1380         /* Symmetric crypto ops */
1381         .sym_session_get_size = otx2_cpt_sym_session_get_size,
1382         .sym_session_configure = otx2_cpt_sym_session_configure,
1383         .sym_session_clear = otx2_cpt_sym_session_clear,
1384
1385         /* Asymmetric crypto ops */
1386         .asym_session_get_size = otx2_cpt_asym_session_size_get,
1387         .asym_session_configure = otx2_cpt_asym_session_cfg,
1388         .asym_session_clear = otx2_cpt_asym_session_clear,
1389
1390 };