ethdev: hide internal structures
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2019 Marvell International Ltd.
3  */
4
5 #include <unistd.h>
6
7 #include <cryptodev_pmd.h>
8 #include <rte_errno.h>
9 #include <ethdev_driver.h>
10 #include <rte_event_crypto_adapter.h>
11
12 #include "otx2_cryptodev.h"
13 #include "otx2_cryptodev_capabilities.h"
14 #include "otx2_cryptodev_hw_access.h"
15 #include "otx2_cryptodev_mbox.h"
16 #include "otx2_cryptodev_ops.h"
17 #include "otx2_cryptodev_ops_helper.h"
18 #include "otx2_ipsec_anti_replay.h"
19 #include "otx2_ipsec_po_ops.h"
20 #include "otx2_mbox.h"
21 #include "otx2_sec_idev.h"
22 #include "otx2_security.h"
23
24 #include "cpt_hw_types.h"
25 #include "cpt_pmd_logs.h"
26 #include "cpt_pmd_ops_helper.h"
27 #include "cpt_ucode.h"
28 #include "cpt_ucode_asym.h"
29
30 #define METABUF_POOL_CACHE_SIZE 512
31
32 static uint64_t otx2_fpm_iova[CPT_EC_ID_PMAX];
33
34 /* Forward declarations */
35
36 static int
37 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
38
39 static void
40 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
41 {
42         snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
43 }
44
45 static int
46 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
47                                 struct otx2_cpt_qp *qp, uint8_t qp_id,
48                                 unsigned int nb_elements)
49 {
50         char mempool_name[RTE_MEMPOOL_NAMESIZE];
51         struct cpt_qp_meta_info *meta_info;
52         int lcore_cnt = rte_lcore_count();
53         int ret, max_mlen, mb_pool_sz;
54         struct rte_mempool *pool;
55         int asym_mlen = 0;
56         int lb_mlen = 0;
57         int sg_mlen = 0;
58
59         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
60
61                 /* Get meta len for scatter gather mode */
62                 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
63
64                 /* Extra 32B saved for future considerations */
65                 sg_mlen += 4 * sizeof(uint64_t);
66
67                 /* Get meta len for linear buffer (direct) mode */
68                 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
69
70                 /* Extra 32B saved for future considerations */
71                 lb_mlen += 4 * sizeof(uint64_t);
72         }
73
74         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
75
76                 /* Get meta len required for asymmetric operations */
77                 asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
78         }
79
80         /*
81          * Check max requirement for meta buffer to
82          * support crypto op of any type (sym/asym).
83          */
84         max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
85
86         /* Allocate mempool */
87
88         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
89                  dev->data->dev_id, qp_id);
90
91         mb_pool_sz = nb_elements;
92
93         /* For poll mode, core that enqueues and core that dequeues can be
94          * different. For event mode, all cores are allowed to use same crypto
95          * queue pair.
96          */
97         mb_pool_sz += (RTE_MAX(2, lcore_cnt) * METABUF_POOL_CACHE_SIZE);
98
99         pool = rte_mempool_create_empty(mempool_name, mb_pool_sz, max_mlen,
100                                         METABUF_POOL_CACHE_SIZE, 0,
101                                         rte_socket_id(), 0);
102
103         if (pool == NULL) {
104                 CPT_LOG_ERR("Could not create mempool for metabuf");
105                 return rte_errno;
106         }
107
108         ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
109                                          NULL);
110         if (ret) {
111                 CPT_LOG_ERR("Could not set mempool ops");
112                 goto mempool_free;
113         }
114
115         ret = rte_mempool_populate_default(pool);
116         if (ret <= 0) {
117                 CPT_LOG_ERR("Could not populate metabuf pool");
118                 goto mempool_free;
119         }
120
121         meta_info = &qp->meta_info;
122
123         meta_info->pool = pool;
124         meta_info->lb_mlen = lb_mlen;
125         meta_info->sg_mlen = sg_mlen;
126
127         return 0;
128
129 mempool_free:
130         rte_mempool_free(pool);
131         return ret;
132 }
133
134 static void
135 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
136 {
137         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
138
139         rte_mempool_free(meta_info->pool);
140
141         meta_info->pool = NULL;
142         meta_info->lb_mlen = 0;
143         meta_info->sg_mlen = 0;
144 }
145
146 static int
147 otx2_cpt_qp_inline_cfg(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
148 {
149         static rte_atomic16_t port_offset = RTE_ATOMIC16_INIT(-1);
150         uint16_t port_id, nb_ethport = rte_eth_dev_count_avail();
151         int i, ret;
152
153         for (i = 0; i < nb_ethport; i++) {
154                 port_id = rte_atomic16_add_return(&port_offset, 1) % nb_ethport;
155                 if (otx2_eth_dev_is_sec_capable(&rte_eth_devices[port_id]))
156                         break;
157         }
158
159         if (i >= nb_ethport)
160                 return 0;
161
162         ret = otx2_cpt_qp_ethdev_bind(dev, qp, port_id);
163         if (ret)
164                 return ret;
165
166         /* Publish inline Tx QP to eth dev security */
167         ret = otx2_sec_idev_tx_cpt_qp_add(port_id, qp);
168         if (ret)
169                 return ret;
170
171         return 0;
172 }
173
174 static struct otx2_cpt_qp *
175 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
176                    uint8_t group)
177 {
178         struct otx2_cpt_vf *vf = dev->data->dev_private;
179         uint64_t pg_sz = sysconf(_SC_PAGESIZE);
180         const struct rte_memzone *lf_mem;
181         uint32_t len, iq_len, size_div40;
182         char name[RTE_MEMZONE_NAMESIZE];
183         uint64_t used_len, iova;
184         struct otx2_cpt_qp *qp;
185         uint64_t lmtline;
186         uint8_t *va;
187         int ret;
188
189         /* Allocate queue pair */
190         qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
191                                 OTX2_ALIGN, 0);
192         if (qp == NULL) {
193                 CPT_LOG_ERR("Could not allocate queue pair");
194                 return NULL;
195         }
196
197         /*
198          * Pending queue updates make assumption that queue size is a power
199          * of 2.
200          */
201         RTE_BUILD_BUG_ON(!RTE_IS_POWER_OF_2(OTX2_CPT_DEFAULT_CMD_QLEN));
202
203         iq_len = OTX2_CPT_DEFAULT_CMD_QLEN;
204
205         /*
206          * Queue size must be a multiple of 40 and effective queue size to
207          * software is (size_div40 - 1) * 40
208          */
209         size_div40 = (iq_len + 40 - 1) / 40 + 1;
210
211         /* For pending queue */
212         len = iq_len * RTE_ALIGN(sizeof(qp->pend_q.rid_queue[0]), 8);
213
214         /* Space for instruction group memory */
215         len += size_div40 * 16;
216
217         /* So that instruction queues start as pg size aligned */
218         len = RTE_ALIGN(len, pg_sz);
219
220         /* For instruction queues */
221         len += OTX2_CPT_DEFAULT_CMD_QLEN * sizeof(union cpt_inst_s);
222
223         /* Wastage after instruction queues */
224         len = RTE_ALIGN(len, pg_sz);
225
226         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
227                             qp_id);
228
229         lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
230                         RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
231                         RTE_CACHE_LINE_SIZE);
232         if (lf_mem == NULL) {
233                 CPT_LOG_ERR("Could not allocate reserved memzone");
234                 goto qp_free;
235         }
236
237         va = lf_mem->addr;
238         iova = lf_mem->iova;
239
240         memset(va, 0, len);
241
242         ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
243         if (ret) {
244                 CPT_LOG_ERR("Could not create mempool for metabuf");
245                 goto lf_mem_free;
246         }
247
248         /* Initialize pending queue */
249         qp->pend_q.rid_queue = (void **)va;
250         qp->pend_q.tail = 0;
251         qp->pend_q.head = 0;
252
253         used_len = iq_len * RTE_ALIGN(sizeof(qp->pend_q.rid_queue[0]), 8);
254         used_len += size_div40 * 16;
255         used_len = RTE_ALIGN(used_len, pg_sz);
256         iova += used_len;
257
258         qp->iq_dma_addr = iova;
259         qp->id = qp_id;
260         qp->blkaddr = vf->lf_blkaddr[qp_id];
261         qp->base = OTX2_CPT_LF_BAR2(vf, qp->blkaddr, qp_id);
262
263         lmtline = vf->otx2_dev.bar2 +
264                   (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
265                   OTX2_LMT_LF_LMTLINE(0);
266
267         qp->lmtline = (void *)lmtline;
268
269         qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
270
271         ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
272         if (ret && (ret != -ENOENT)) {
273                 CPT_LOG_ERR("Could not delete inline configuration");
274                 goto mempool_destroy;
275         }
276
277         otx2_cpt_iq_disable(qp);
278
279         ret = otx2_cpt_qp_inline_cfg(dev, qp);
280         if (ret) {
281                 CPT_LOG_ERR("Could not configure queue for inline IPsec");
282                 goto mempool_destroy;
283         }
284
285         ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
286                                  size_div40);
287         if (ret) {
288                 CPT_LOG_ERR("Could not enable instruction queue");
289                 goto mempool_destroy;
290         }
291
292         return qp;
293
294 mempool_destroy:
295         otx2_cpt_metabuf_mempool_destroy(qp);
296 lf_mem_free:
297         rte_memzone_free(lf_mem);
298 qp_free:
299         rte_free(qp);
300         return NULL;
301 }
302
303 static int
304 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
305 {
306         const struct rte_memzone *lf_mem;
307         char name[RTE_MEMZONE_NAMESIZE];
308         int ret;
309
310         ret = otx2_sec_idev_tx_cpt_qp_remove(qp);
311         if (ret && (ret != -ENOENT)) {
312                 CPT_LOG_ERR("Could not delete inline configuration");
313                 return ret;
314         }
315
316         otx2_cpt_iq_disable(qp);
317
318         otx2_cpt_metabuf_mempool_destroy(qp);
319
320         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
321                             qp->id);
322
323         lf_mem = rte_memzone_lookup(name);
324
325         ret = rte_memzone_free(lf_mem);
326         if (ret)
327                 return ret;
328
329         rte_free(qp);
330
331         return 0;
332 }
333
334 static int
335 sym_xform_verify(struct rte_crypto_sym_xform *xform)
336 {
337         if (xform->next) {
338                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
339                     xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
340                     xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT &&
341                     (xform->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC ||
342                      xform->next->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC))
343                         return -ENOTSUP;
344
345                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
346                     xform->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT &&
347                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
348                     (xform->cipher.algo != RTE_CRYPTO_CIPHER_AES_CBC ||
349                      xform->next->auth.algo != RTE_CRYPTO_AUTH_SHA1_HMAC))
350                         return -ENOTSUP;
351
352                 if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
353                     xform->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC &&
354                     xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
355                     xform->next->auth.algo == RTE_CRYPTO_AUTH_SHA1)
356                         return -ENOTSUP;
357
358                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
359                     xform->auth.algo == RTE_CRYPTO_AUTH_SHA1 &&
360                     xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
361                     xform->next->cipher.algo == RTE_CRYPTO_CIPHER_3DES_CBC)
362                         return -ENOTSUP;
363
364         } else {
365                 if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
366                     xform->auth.algo == RTE_CRYPTO_AUTH_NULL &&
367                     xform->auth.op == RTE_CRYPTO_AUTH_OP_VERIFY)
368                         return -ENOTSUP;
369         }
370         return 0;
371 }
372
373 static int
374 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
375                       struct rte_cryptodev_sym_session *sess,
376                       struct rte_mempool *pool)
377 {
378         struct rte_crypto_sym_xform *temp_xform = xform;
379         struct cpt_sess_misc *misc;
380         vq_cmd_word3_t vq_cmd_w3;
381         void *priv;
382         int ret;
383
384         ret = sym_xform_verify(xform);
385         if (unlikely(ret))
386                 return ret;
387
388         if (unlikely(rte_mempool_get(pool, &priv))) {
389                 CPT_LOG_ERR("Could not allocate session private data");
390                 return -ENOMEM;
391         }
392
393         memset(priv, 0, sizeof(struct cpt_sess_misc) +
394                         offsetof(struct cpt_ctx, mc_ctx));
395
396         misc = priv;
397
398         for ( ; xform != NULL; xform = xform->next) {
399                 switch (xform->type) {
400                 case RTE_CRYPTO_SYM_XFORM_AEAD:
401                         ret = fill_sess_aead(xform, misc);
402                         break;
403                 case RTE_CRYPTO_SYM_XFORM_CIPHER:
404                         ret = fill_sess_cipher(xform, misc);
405                         break;
406                 case RTE_CRYPTO_SYM_XFORM_AUTH:
407                         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
408                                 ret = fill_sess_gmac(xform, misc);
409                         else
410                                 ret = fill_sess_auth(xform, misc);
411                         break;
412                 default:
413                         ret = -1;
414                 }
415
416                 if (ret)
417                         goto priv_put;
418         }
419
420         if ((GET_SESS_FC_TYPE(misc) == HASH_HMAC) &&
421                         cpt_mac_len_verify(&temp_xform->auth)) {
422                 CPT_LOG_ERR("MAC length is not supported");
423                 struct cpt_ctx *ctx = SESS_PRIV(misc);
424                 if (ctx->auth_key != NULL) {
425                         rte_free(ctx->auth_key);
426                         ctx->auth_key = NULL;
427                 }
428                 ret = -ENOTSUP;
429                 goto priv_put;
430         }
431
432         set_sym_session_private_data(sess, driver_id, misc);
433
434         misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
435                              sizeof(struct cpt_sess_misc);
436
437         vq_cmd_w3.u64 = 0;
438         vq_cmd_w3.s.cptr = misc->ctx_dma_addr + offsetof(struct cpt_ctx,
439                                                          mc_ctx);
440
441         /*
442          * IE engines support IPsec operations
443          * SE engines support IPsec operations, Chacha-Poly and
444          * Air-Crypto operations
445          */
446         if (misc->zsk_flag || misc->chacha_poly)
447                 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE;
448         else
449                 vq_cmd_w3.s.grp = OTX2_CPT_EGRP_SE_IE;
450
451         misc->cpt_inst_w7 = vq_cmd_w3.u64;
452
453         return 0;
454
455 priv_put:
456         rte_mempool_put(pool, priv);
457
458         return -ENOTSUP;
459 }
460
461 static __rte_always_inline int32_t __rte_hot
462 otx2_ca_enqueue_req(const struct otx2_cpt_qp *qp,
463                     struct cpt_request_info *req,
464                     void *lmtline,
465                     struct rte_crypto_op *op,
466                     uint64_t cpt_inst_w7)
467 {
468         union rte_event_crypto_metadata *m_data;
469         union cpt_inst_s inst;
470         uint64_t lmt_status;
471
472         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION) {
473                 m_data = rte_cryptodev_sym_session_get_user_data(
474                                                 op->sym->session);
475                 if (m_data == NULL) {
476                         rte_pktmbuf_free(op->sym->m_src);
477                         rte_crypto_op_free(op);
478                         rte_errno = EINVAL;
479                         return -EINVAL;
480                 }
481         } else if (op->sess_type == RTE_CRYPTO_OP_SESSIONLESS &&
482                    op->private_data_offset) {
483                 m_data = (union rte_event_crypto_metadata *)
484                          ((uint8_t *)op +
485                           op->private_data_offset);
486         } else {
487                 return -EINVAL;
488         }
489
490         inst.u[0] = 0;
491         inst.s9x.res_addr = req->comp_baddr;
492         inst.u[2] = 0;
493         inst.u[3] = 0;
494
495         inst.s9x.ei0 = req->ist.ei0;
496         inst.s9x.ei1 = req->ist.ei1;
497         inst.s9x.ei2 = req->ist.ei2;
498         inst.s9x.ei3 = cpt_inst_w7;
499
500         inst.u[2] = (((RTE_EVENT_TYPE_CRYPTODEV << 28) |
501                       m_data->response_info.flow_id) |
502                      ((uint64_t)m_data->response_info.sched_type << 32) |
503                      ((uint64_t)m_data->response_info.queue_id << 34));
504         inst.u[3] = 1 | (((uint64_t)req >> 3) << 3);
505         req->qp = qp;
506
507         do {
508                 /* Copy CPT command to LMTLINE */
509                 memcpy(lmtline, &inst, sizeof(inst));
510
511                 /*
512                  * Make sure compiler does not reorder memcpy and ldeor.
513                  * LMTST transactions are always flushed from the write
514                  * buffer immediately, a DMB is not required to push out
515                  * LMTSTs.
516                  */
517                 rte_io_wmb();
518                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
519         } while (lmt_status == 0);
520
521         return 0;
522 }
523
524 static __rte_always_inline int32_t __rte_hot
525 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
526                      struct pending_queue *pend_q,
527                      struct cpt_request_info *req,
528                      struct rte_crypto_op *op,
529                      uint64_t cpt_inst_w7,
530                      unsigned int burst_index)
531 {
532         void *lmtline = qp->lmtline;
533         union cpt_inst_s inst;
534         uint64_t lmt_status;
535
536         if (qp->ca_enable)
537                 return otx2_ca_enqueue_req(qp, req, lmtline, op, cpt_inst_w7);
538
539         inst.u[0] = 0;
540         inst.s9x.res_addr = req->comp_baddr;
541         inst.u[2] = 0;
542         inst.u[3] = 0;
543
544         inst.s9x.ei0 = req->ist.ei0;
545         inst.s9x.ei1 = req->ist.ei1;
546         inst.s9x.ei2 = req->ist.ei2;
547         inst.s9x.ei3 = cpt_inst_w7;
548
549         req->time_out = rte_get_timer_cycles() +
550                         DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
551
552         do {
553                 /* Copy CPT command to LMTLINE */
554                 memcpy(lmtline, &inst, sizeof(inst));
555
556                 /*
557                  * Make sure compiler does not reorder memcpy and ldeor.
558                  * LMTST transactions are always flushed from the write
559                  * buffer immediately, a DMB is not required to push out
560                  * LMTSTs.
561                  */
562                 rte_io_wmb();
563                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
564         } while (lmt_status == 0);
565
566         pending_queue_push(pend_q, req, burst_index, OTX2_CPT_DEFAULT_CMD_QLEN);
567
568         return 0;
569 }
570
571 static __rte_always_inline int32_t __rte_hot
572 otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
573                       struct rte_crypto_op *op,
574                       struct pending_queue *pend_q,
575                       unsigned int burst_index)
576 {
577         struct cpt_qp_meta_info *minfo = &qp->meta_info;
578         struct rte_crypto_asym_op *asym_op = op->asym;
579         struct asym_op_params params = {0};
580         struct cpt_asym_sess_misc *sess;
581         uintptr_t *cop;
582         void *mdata;
583         int ret;
584
585         if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
586                 CPT_LOG_ERR("Could not allocate meta buffer for request");
587                 return -ENOMEM;
588         }
589
590         sess = get_asym_session_private_data(asym_op->session,
591                                              otx2_cryptodev_driver_id);
592
593         /* Store IO address of the mdata to meta_buf */
594         params.meta_buf = rte_mempool_virt2iova(mdata);
595
596         cop = mdata;
597         cop[0] = (uintptr_t)mdata;
598         cop[1] = (uintptr_t)op;
599         cop[2] = cop[3] = 0ULL;
600
601         params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
602         params.req->op = cop;
603
604         /* Adjust meta_buf to point to end of cpt_request_info structure */
605         params.meta_buf += (4 * sizeof(uintptr_t)) +
606                             sizeof(struct cpt_request_info);
607         switch (sess->xfrm_type) {
608         case RTE_CRYPTO_ASYM_XFORM_MODEX:
609                 ret = cpt_modex_prep(&params, &sess->mod_ctx);
610                 if (unlikely(ret))
611                         goto req_fail;
612                 break;
613         case RTE_CRYPTO_ASYM_XFORM_RSA:
614                 ret = cpt_enqueue_rsa_op(op, &params, sess);
615                 if (unlikely(ret))
616                         goto req_fail;
617                 break;
618         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
619                 ret = cpt_enqueue_ecdsa_op(op, &params, sess, otx2_fpm_iova);
620                 if (unlikely(ret))
621                         goto req_fail;
622                 break;
623         case RTE_CRYPTO_ASYM_XFORM_ECPM:
624                 ret = cpt_ecpm_prep(&asym_op->ecpm, &params,
625                                     sess->ec_ctx.curveid);
626                 if (unlikely(ret))
627                         goto req_fail;
628                 break;
629         default:
630                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
631                 ret = -EINVAL;
632                 goto req_fail;
633         }
634
635         ret = otx2_cpt_enqueue_req(qp, pend_q, params.req, op,
636                                    sess->cpt_inst_w7, burst_index);
637         if (unlikely(ret)) {
638                 CPT_LOG_DP_ERR("Could not enqueue crypto req");
639                 goto req_fail;
640         }
641
642         return 0;
643
644 req_fail:
645         free_op_meta(mdata, minfo->pool);
646
647         return ret;
648 }
649
650 static __rte_always_inline int __rte_hot
651 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
652                      struct pending_queue *pend_q, unsigned int burst_index)
653 {
654         struct rte_crypto_sym_op *sym_op = op->sym;
655         struct cpt_request_info *req;
656         struct cpt_sess_misc *sess;
657         uint64_t cpt_op;
658         void *mdata;
659         int ret;
660
661         sess = get_sym_session_private_data(sym_op->session,
662                                             otx2_cryptodev_driver_id);
663
664         cpt_op = sess->cpt_op;
665
666         if (cpt_op & CPT_OP_CIPHER_MASK)
667                 ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
668                                      (void **)&req);
669         else
670                 ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
671                                          (void **)&req);
672
673         if (unlikely(ret)) {
674                 CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
675                                 op, (unsigned int)cpt_op, ret);
676                 return ret;
677         }
678
679         ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7,
680                                     burst_index);
681         if (unlikely(ret)) {
682                 /* Free buffer allocated by fill params routines */
683                 free_op_meta(mdata, qp->meta_info.pool);
684         }
685
686         return ret;
687 }
688
689 static __rte_always_inline int __rte_hot
690 otx2_cpt_enqueue_sec(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
691                      struct pending_queue *pend_q,
692                      const unsigned int burst_index)
693 {
694         uint32_t winsz, esn_low = 0, esn_hi = 0, seql = 0, seqh = 0;
695         struct rte_mbuf *m_src = op->sym->m_src;
696         struct otx2_sec_session_ipsec_lp *sess;
697         struct otx2_ipsec_po_sa_ctl *ctl_wrd;
698         struct otx2_ipsec_po_in_sa *sa;
699         struct otx2_sec_session *priv;
700         struct cpt_request_info *req;
701         uint64_t seq_in_sa, seq = 0;
702         uint8_t esn;
703         int ret;
704
705         priv = get_sec_session_private_data(op->sym->sec_session);
706         sess = &priv->ipsec.lp;
707         sa = &sess->in_sa;
708
709         ctl_wrd = &sa->ctl;
710         esn = ctl_wrd->esn_en;
711         winsz = sa->replay_win_sz;
712
713         if (ctl_wrd->direction == OTX2_IPSEC_PO_SA_DIRECTION_OUTBOUND)
714                 ret = process_outb_sa(op, sess, &qp->meta_info, (void **)&req);
715         else {
716                 if (winsz) {
717                         esn_low = rte_be_to_cpu_32(sa->esn_low);
718                         esn_hi = rte_be_to_cpu_32(sa->esn_hi);
719                         seql = *rte_pktmbuf_mtod_offset(m_src, uint32_t *,
720                                 sizeof(struct rte_ipv4_hdr) + 4);
721                         seql = rte_be_to_cpu_32(seql);
722
723                         if (!esn)
724                                 seq = (uint64_t)seql;
725                         else {
726                                 seqh = anti_replay_get_seqh(winsz, seql, esn_hi,
727                                                 esn_low);
728                                 seq = ((uint64_t)seqh << 32) | seql;
729                         }
730
731                         if (unlikely(seq == 0))
732                                 return IPSEC_ANTI_REPLAY_FAILED;
733
734                         ret = anti_replay_check(sa->replay, seq, winsz);
735                         if (unlikely(ret)) {
736                                 otx2_err("Anti replay check failed");
737                                 return IPSEC_ANTI_REPLAY_FAILED;
738                         }
739                 }
740
741                 ret = process_inb_sa(op, sess, &qp->meta_info, (void **)&req);
742         }
743
744         if (unlikely(ret)) {
745                 otx2_err("Crypto req : op %p, ret 0x%x", op, ret);
746                 return ret;
747         }
748
749         ret = otx2_cpt_enqueue_req(qp, pend_q, req, op, sess->cpt_inst_w7,
750                                     burst_index);
751
752         if (winsz && esn) {
753                 seq_in_sa = ((uint64_t)esn_hi << 32) | esn_low;
754                 if (seq > seq_in_sa) {
755                         sa->esn_low = rte_cpu_to_be_32(seql);
756                         sa->esn_hi = rte_cpu_to_be_32(seqh);
757                 }
758         }
759
760         return ret;
761 }
762
763 static __rte_always_inline int __rte_hot
764 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
765                               struct pending_queue *pend_q,
766                               unsigned int burst_index)
767 {
768         const int driver_id = otx2_cryptodev_driver_id;
769         struct rte_crypto_sym_op *sym_op = op->sym;
770         struct rte_cryptodev_sym_session *sess;
771         int ret;
772
773         /* Create temporary session */
774         sess = rte_cryptodev_sym_session_create(qp->sess_mp);
775         if (sess == NULL)
776                 return -ENOMEM;
777
778         ret = sym_session_configure(driver_id, sym_op->xform, sess,
779                                     qp->sess_mp_priv);
780         if (ret)
781                 goto sess_put;
782
783         sym_op->session = sess;
784
785         ret = otx2_cpt_enqueue_sym(qp, op, pend_q, burst_index);
786
787         if (unlikely(ret))
788                 goto priv_put;
789
790         return 0;
791
792 priv_put:
793         sym_session_clear(driver_id, sess);
794 sess_put:
795         rte_mempool_put(qp->sess_mp, sess);
796         return ret;
797 }
798
799 static uint16_t
800 otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
801 {
802         uint16_t nb_allowed, count = 0;
803         struct otx2_cpt_qp *qp = qptr;
804         struct pending_queue *pend_q;
805         struct rte_crypto_op *op;
806         int ret;
807
808         pend_q = &qp->pend_q;
809
810         nb_allowed = pending_queue_free_slots(pend_q,
811                                 OTX2_CPT_DEFAULT_CMD_QLEN, 0);
812         nb_ops = RTE_MIN(nb_ops, nb_allowed);
813
814         for (count = 0; count < nb_ops; count++) {
815                 op = ops[count];
816                 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
817                         if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION)
818                                 ret = otx2_cpt_enqueue_sec(qp, op, pend_q,
819                                                            count);
820                         else if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
821                                 ret = otx2_cpt_enqueue_sym(qp, op, pend_q,
822                                                            count);
823                         else
824                                 ret = otx2_cpt_enqueue_sym_sessless(qp, op,
825                                                 pend_q, count);
826                 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
827                         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
828                                 ret = otx2_cpt_enqueue_asym(qp, op, pend_q,
829                                                                 count);
830                         else
831                                 break;
832                 } else
833                         break;
834
835                 if (unlikely(ret))
836                         break;
837         }
838
839         if (unlikely(!qp->ca_enable))
840                 pending_queue_commit(pend_q, count, OTX2_CPT_DEFAULT_CMD_QLEN);
841
842         return count;
843 }
844
845 static __rte_always_inline void
846 otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
847                      struct rte_crypto_rsa_xform *rsa_ctx)
848 {
849         struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
850
851         switch (rsa->op_type) {
852         case RTE_CRYPTO_ASYM_OP_ENCRYPT:
853                 rsa->cipher.length = rsa_ctx->n.length;
854                 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
855                 break;
856         case RTE_CRYPTO_ASYM_OP_DECRYPT:
857                 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
858                         rsa->message.length = rsa_ctx->n.length;
859                         memcpy(rsa->message.data, req->rptr,
860                                rsa->message.length);
861                 } else {
862                         /* Get length of decrypted output */
863                         rsa->message.length = rte_cpu_to_be_16
864                                              (*((uint16_t *)req->rptr));
865                         /*
866                          * Offset output data pointer by length field
867                          * (2 bytes) and copy decrypted data.
868                          */
869                         memcpy(rsa->message.data, req->rptr + 2,
870                                rsa->message.length);
871                 }
872                 break;
873         case RTE_CRYPTO_ASYM_OP_SIGN:
874                 rsa->sign.length = rsa_ctx->n.length;
875                 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
876                 break;
877         case RTE_CRYPTO_ASYM_OP_VERIFY:
878                 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
879                         rsa->sign.length = rsa_ctx->n.length;
880                         memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
881                 } else {
882                         /* Get length of signed output */
883                         rsa->sign.length = rte_cpu_to_be_16
884                                           (*((uint16_t *)req->rptr));
885                         /*
886                          * Offset output data pointer by length field
887                          * (2 bytes) and copy signed data.
888                          */
889                         memcpy(rsa->sign.data, req->rptr + 2,
890                                rsa->sign.length);
891                 }
892                 if (memcmp(rsa->sign.data, rsa->message.data,
893                            rsa->message.length)) {
894                         CPT_LOG_DP_ERR("RSA verification failed");
895                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
896                 }
897                 break;
898         default:
899                 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
900                 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
901                 break;
902         }
903 }
904
905 static __rte_always_inline void
906 otx2_cpt_asym_dequeue_ecdsa_op(struct rte_crypto_ecdsa_op_param *ecdsa,
907                                struct cpt_request_info *req,
908                                struct cpt_asym_ec_ctx *ec)
909 {
910         int prime_len = ec_grp[ec->curveid].prime.length;
911
912         if (ecdsa->op_type == RTE_CRYPTO_ASYM_OP_VERIFY)
913                 return;
914
915         /* Separate out sign r and s components */
916         memcpy(ecdsa->r.data, req->rptr, prime_len);
917         memcpy(ecdsa->s.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
918                prime_len);
919         ecdsa->r.length = prime_len;
920         ecdsa->s.length = prime_len;
921 }
922
923 static __rte_always_inline void
924 otx2_cpt_asym_dequeue_ecpm_op(struct rte_crypto_ecpm_op_param *ecpm,
925                              struct cpt_request_info *req,
926                              struct cpt_asym_ec_ctx *ec)
927 {
928         int prime_len = ec_grp[ec->curveid].prime.length;
929
930         memcpy(ecpm->r.x.data, req->rptr, prime_len);
931         memcpy(ecpm->r.y.data, req->rptr + RTE_ALIGN_CEIL(prime_len, 8),
932                prime_len);
933         ecpm->r.x.length = prime_len;
934         ecpm->r.y.length = prime_len;
935 }
936
937 static void
938 otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
939                            struct cpt_request_info *req)
940 {
941         struct rte_crypto_asym_op *op = cop->asym;
942         struct cpt_asym_sess_misc *sess;
943
944         sess = get_asym_session_private_data(op->session,
945                                              otx2_cryptodev_driver_id);
946
947         switch (sess->xfrm_type) {
948         case RTE_CRYPTO_ASYM_XFORM_RSA:
949                 otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
950                 break;
951         case RTE_CRYPTO_ASYM_XFORM_MODEX:
952                 op->modex.result.length = sess->mod_ctx.modulus.length;
953                 memcpy(op->modex.result.data, req->rptr,
954                        op->modex.result.length);
955                 break;
956         case RTE_CRYPTO_ASYM_XFORM_ECDSA:
957                 otx2_cpt_asym_dequeue_ecdsa_op(&op->ecdsa, req, &sess->ec_ctx);
958                 break;
959         case RTE_CRYPTO_ASYM_XFORM_ECPM:
960                 otx2_cpt_asym_dequeue_ecpm_op(&op->ecpm, req, &sess->ec_ctx);
961                 break;
962         default:
963                 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
964                 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
965                 break;
966         }
967 }
968
969 static void
970 otx2_cpt_sec_post_process(struct rte_crypto_op *cop, uintptr_t *rsp)
971 {
972         struct cpt_request_info *req = (struct cpt_request_info *)rsp[2];
973         vq_cmd_word0_t *word0 = (vq_cmd_word0_t *)&req->ist.ei0;
974         struct rte_crypto_sym_op *sym_op = cop->sym;
975         struct rte_mbuf *m = sym_op->m_src;
976         struct rte_ipv6_hdr *ip6;
977         struct rte_ipv4_hdr *ip;
978         uint16_t m_len = 0;
979         int mdata_len;
980         char *data;
981
982         mdata_len = (int)rsp[3];
983         rte_pktmbuf_trim(m, mdata_len);
984
985         if (word0->s.opcode.major == OTX2_IPSEC_PO_PROCESS_IPSEC_INB) {
986                 data = rte_pktmbuf_mtod(m, char *);
987
988                 if (rsp[4] == OTX2_IPSEC_PO_TRANSPORT ||
989                     rsp[4] == OTX2_IPSEC_PO_TUNNEL_IPV4) {
990                         ip = (struct rte_ipv4_hdr *)(data +
991                                 OTX2_IPSEC_PO_INB_RPTR_HDR);
992                         m_len = rte_be_to_cpu_16(ip->total_length);
993                 } else if (rsp[4] == OTX2_IPSEC_PO_TUNNEL_IPV6) {
994                         ip6 = (struct rte_ipv6_hdr *)(data +
995                                 OTX2_IPSEC_PO_INB_RPTR_HDR);
996                         m_len = rte_be_to_cpu_16(ip6->payload_len) +
997                                 sizeof(struct rte_ipv6_hdr);
998                 }
999
1000                 m->data_len = m_len;
1001                 m->pkt_len = m_len;
1002                 m->data_off += OTX2_IPSEC_PO_INB_RPTR_HDR;
1003         }
1004 }
1005
1006 static inline void
1007 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
1008                               uintptr_t *rsp, uint8_t cc)
1009 {
1010         unsigned int sz;
1011
1012         if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
1013                 if (cop->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
1014                         if (likely(cc == OTX2_IPSEC_PO_CC_SUCCESS)) {
1015                                 otx2_cpt_sec_post_process(cop, rsp);
1016                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1017                         } else
1018                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1019
1020                         return;
1021                 }
1022
1023                 if (likely(cc == NO_ERR)) {
1024                         /* Verify authentication data if required */
1025                         if (unlikely(rsp[2]))
1026                                 compl_auth_verify(cop, (uint8_t *)rsp[2],
1027                                                  rsp[3]);
1028                         else
1029                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1030                 } else {
1031                         if (cc == ERR_GC_ICV_MISCOMPARE)
1032                                 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
1033                         else
1034                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1035                 }
1036
1037                 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
1038                         sym_session_clear(otx2_cryptodev_driver_id,
1039                                           cop->sym->session);
1040                         sz = rte_cryptodev_sym_get_existing_header_session_size(
1041                                         cop->sym->session);
1042                         memset(cop->sym->session, 0, sz);
1043                         rte_mempool_put(qp->sess_mp, cop->sym->session);
1044                         cop->sym->session = NULL;
1045                 }
1046         }
1047
1048         if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
1049                 if (likely(cc == NO_ERR)) {
1050                         cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
1051                         /*
1052                          * Pass cpt_req_info stored in metabuf during
1053                          * enqueue.
1054                          */
1055                         rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
1056                         otx2_cpt_asym_post_process(cop,
1057                                         (struct cpt_request_info *)rsp);
1058                 } else
1059                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
1060         }
1061 }
1062
1063 static uint16_t
1064 otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
1065 {
1066         int i, nb_pending, nb_completed;
1067         struct otx2_cpt_qp *qp = qptr;
1068         struct pending_queue *pend_q;
1069         struct cpt_request_info *req;
1070         struct rte_crypto_op *cop;
1071         uint8_t cc[nb_ops];
1072         uintptr_t *rsp;
1073         void *metabuf;
1074
1075         pend_q = &qp->pend_q;
1076
1077         nb_pending = pending_queue_level(pend_q, OTX2_CPT_DEFAULT_CMD_QLEN);
1078
1079         /* Ensure pcount isn't read before data lands */
1080         rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
1081
1082         nb_ops = RTE_MIN(nb_ops, nb_pending);
1083
1084         for (i = 0; i < nb_ops; i++) {
1085                 pending_queue_peek(pend_q, (void **)&req,
1086                         OTX2_CPT_DEFAULT_CMD_QLEN, 0);
1087
1088                 cc[i] = otx2_cpt_compcode_get(req);
1089
1090                 if (unlikely(cc[i] == ERR_REQ_PENDING))
1091                         break;
1092
1093                 ops[i] = req->op;
1094
1095                 pending_queue_pop(pend_q, OTX2_CPT_DEFAULT_CMD_QLEN);
1096         }
1097
1098         nb_completed = i;
1099
1100         for (i = 0; i < nb_completed; i++) {
1101                 rsp = (void *)ops[i];
1102
1103                 metabuf = (void *)rsp[0];
1104                 cop = (void *)rsp[1];
1105
1106                 ops[i] = cop;
1107
1108                 otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
1109
1110                 free_op_meta(metabuf, qp->meta_info.pool);
1111         }
1112
1113         return nb_completed;
1114 }
1115
1116 void
1117 otx2_cpt_set_enqdeq_fns(struct rte_cryptodev *dev)
1118 {
1119         dev->enqueue_burst = otx2_cpt_enqueue_burst;
1120         dev->dequeue_burst = otx2_cpt_dequeue_burst;
1121
1122         rte_mb();
1123 }
1124
1125 /* PMD ops */
1126
1127 static int
1128 otx2_cpt_dev_config(struct rte_cryptodev *dev,
1129                     struct rte_cryptodev_config *conf)
1130 {
1131         struct otx2_cpt_vf *vf = dev->data->dev_private;
1132         int ret;
1133
1134         if (conf->nb_queue_pairs > vf->max_queues) {
1135                 CPT_LOG_ERR("Invalid number of queue pairs requested");
1136                 return -EINVAL;
1137         }
1138
1139         dev->feature_flags = otx2_cpt_default_ff_get() & ~conf->ff_disable;
1140
1141         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
1142                 /* Initialize shared FPM table */
1143                 ret = cpt_fpm_init(otx2_fpm_iova);
1144                 if (ret)
1145                         return ret;
1146         }
1147
1148         /* Unregister error interrupts */
1149         if (vf->err_intr_registered)
1150                 otx2_cpt_err_intr_unregister(dev);
1151
1152         /* Detach queues */
1153         if (vf->nb_queues) {
1154                 ret = otx2_cpt_queues_detach(dev);
1155                 if (ret) {
1156                         CPT_LOG_ERR("Could not detach CPT queues");
1157                         return ret;
1158                 }
1159         }
1160
1161         /* Attach queues */
1162         ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
1163         if (ret) {
1164                 CPT_LOG_ERR("Could not attach CPT queues");
1165                 return -ENODEV;
1166         }
1167
1168         ret = otx2_cpt_msix_offsets_get(dev);
1169         if (ret) {
1170                 CPT_LOG_ERR("Could not get MSI-X offsets");
1171                 goto queues_detach;
1172         }
1173
1174         /* Register error interrupts */
1175         ret = otx2_cpt_err_intr_register(dev);
1176         if (ret) {
1177                 CPT_LOG_ERR("Could not register error interrupts");
1178                 goto queues_detach;
1179         }
1180
1181         ret = otx2_cpt_inline_init(dev);
1182         if (ret) {
1183                 CPT_LOG_ERR("Could not enable inline IPsec");
1184                 goto intr_unregister;
1185         }
1186
1187         otx2_cpt_set_enqdeq_fns(dev);
1188
1189         return 0;
1190
1191 intr_unregister:
1192         otx2_cpt_err_intr_unregister(dev);
1193 queues_detach:
1194         otx2_cpt_queues_detach(dev);
1195         return ret;
1196 }
1197
1198 static int
1199 otx2_cpt_dev_start(struct rte_cryptodev *dev)
1200 {
1201         RTE_SET_USED(dev);
1202
1203         CPT_PMD_INIT_FUNC_TRACE();
1204
1205         return 0;
1206 }
1207
1208 static void
1209 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
1210 {
1211         CPT_PMD_INIT_FUNC_TRACE();
1212
1213         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO)
1214                 cpt_fpm_clear();
1215 }
1216
1217 static int
1218 otx2_cpt_dev_close(struct rte_cryptodev *dev)
1219 {
1220         struct otx2_cpt_vf *vf = dev->data->dev_private;
1221         int i, ret = 0;
1222
1223         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
1224                 ret = otx2_cpt_queue_pair_release(dev, i);
1225                 if (ret)
1226                         return ret;
1227         }
1228
1229         /* Unregister error interrupts */
1230         if (vf->err_intr_registered)
1231                 otx2_cpt_err_intr_unregister(dev);
1232
1233         /* Detach queues */
1234         if (vf->nb_queues) {
1235                 ret = otx2_cpt_queues_detach(dev);
1236                 if (ret)
1237                         CPT_LOG_ERR("Could not detach CPT queues");
1238         }
1239
1240         return ret;
1241 }
1242
1243 static void
1244 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
1245                       struct rte_cryptodev_info *info)
1246 {
1247         struct otx2_cpt_vf *vf = dev->data->dev_private;
1248
1249         if (info != NULL) {
1250                 info->max_nb_queue_pairs = vf->max_queues;
1251                 info->feature_flags = otx2_cpt_default_ff_get();
1252                 info->capabilities = otx2_cpt_capabilities_get();
1253                 info->sym.max_nb_sessions = 0;
1254                 info->driver_id = otx2_cryptodev_driver_id;
1255                 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
1256                 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
1257         }
1258 }
1259
1260 static int
1261 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1262                           const struct rte_cryptodev_qp_conf *conf,
1263                           int socket_id __rte_unused)
1264 {
1265         uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
1266         struct rte_pci_device *pci_dev;
1267         struct otx2_cpt_qp *qp;
1268
1269         CPT_PMD_INIT_FUNC_TRACE();
1270
1271         if (dev->data->queue_pairs[qp_id] != NULL)
1272                 otx2_cpt_queue_pair_release(dev, qp_id);
1273
1274         if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
1275                 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
1276                             conf->nb_descriptors);
1277                 return -EINVAL;
1278         }
1279
1280         pci_dev = RTE_DEV_TO_PCI(dev->device);
1281
1282         if (pci_dev->mem_resource[2].addr == NULL) {
1283                 CPT_LOG_ERR("Invalid PCI mem address");
1284                 return -EIO;
1285         }
1286
1287         qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
1288         if (qp == NULL) {
1289                 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
1290                 return -ENOMEM;
1291         }
1292
1293         qp->sess_mp = conf->mp_session;
1294         qp->sess_mp_priv = conf->mp_session_private;
1295         dev->data->queue_pairs[qp_id] = qp;
1296
1297         return 0;
1298 }
1299
1300 static int
1301 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
1302 {
1303         struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
1304         int ret;
1305
1306         CPT_PMD_INIT_FUNC_TRACE();
1307
1308         if (qp == NULL)
1309                 return -EINVAL;
1310
1311         CPT_LOG_INFO("Releasing queue pair %d", qp_id);
1312
1313         ret = otx2_cpt_qp_destroy(dev, qp);
1314         if (ret) {
1315                 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
1316                 return ret;
1317         }
1318
1319         dev->data->queue_pairs[qp_id] = NULL;
1320
1321         return 0;
1322 }
1323
1324 static unsigned int
1325 otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1326 {
1327         return cpt_get_session_size();
1328 }
1329
1330 static int
1331 otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
1332                                struct rte_crypto_sym_xform *xform,
1333                                struct rte_cryptodev_sym_session *sess,
1334                                struct rte_mempool *pool)
1335 {
1336         CPT_PMD_INIT_FUNC_TRACE();
1337
1338         return sym_session_configure(dev->driver_id, xform, sess, pool);
1339 }
1340
1341 static void
1342 otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
1343                            struct rte_cryptodev_sym_session *sess)
1344 {
1345         CPT_PMD_INIT_FUNC_TRACE();
1346
1347         return sym_session_clear(dev->driver_id, sess);
1348 }
1349
1350 static unsigned int
1351 otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
1352 {
1353         return sizeof(struct cpt_asym_sess_misc);
1354 }
1355
1356 static int
1357 otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
1358                           struct rte_crypto_asym_xform *xform,
1359                           struct rte_cryptodev_asym_session *sess,
1360                           struct rte_mempool *pool)
1361 {
1362         struct cpt_asym_sess_misc *priv;
1363         vq_cmd_word3_t vq_cmd_w3;
1364         int ret;
1365
1366         CPT_PMD_INIT_FUNC_TRACE();
1367
1368         if (rte_mempool_get(pool, (void **)&priv)) {
1369                 CPT_LOG_ERR("Could not allocate session_private_data");
1370                 return -ENOMEM;
1371         }
1372
1373         memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
1374
1375         ret = cpt_fill_asym_session_parameters(priv, xform);
1376         if (ret) {
1377                 CPT_LOG_ERR("Could not configure session parameters");
1378
1379                 /* Return session to mempool */
1380                 rte_mempool_put(pool, priv);
1381                 return ret;
1382         }
1383
1384         vq_cmd_w3.u64 = 0;
1385         vq_cmd_w3.s.grp = OTX2_CPT_EGRP_AE;
1386         priv->cpt_inst_w7 = vq_cmd_w3.u64;
1387
1388         set_asym_session_private_data(sess, dev->driver_id, priv);
1389
1390         return 0;
1391 }
1392
1393 static void
1394 otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
1395                             struct rte_cryptodev_asym_session *sess)
1396 {
1397         struct cpt_asym_sess_misc *priv;
1398         struct rte_mempool *sess_mp;
1399
1400         CPT_PMD_INIT_FUNC_TRACE();
1401
1402         priv = get_asym_session_private_data(sess, dev->driver_id);
1403         if (priv == NULL)
1404                 return;
1405
1406         /* Free resources allocated in session_cfg */
1407         cpt_free_asym_session_parameters(priv);
1408
1409         /* Reset and free object back to pool */
1410         memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
1411         sess_mp = rte_mempool_from_obj(priv);
1412         set_asym_session_private_data(sess, dev->driver_id, NULL);
1413         rte_mempool_put(sess_mp, priv);
1414 }
1415
1416 struct rte_cryptodev_ops otx2_cpt_ops = {
1417         /* Device control ops */
1418         .dev_configure = otx2_cpt_dev_config,
1419         .dev_start = otx2_cpt_dev_start,
1420         .dev_stop = otx2_cpt_dev_stop,
1421         .dev_close = otx2_cpt_dev_close,
1422         .dev_infos_get = otx2_cpt_dev_info_get,
1423
1424         .stats_get = NULL,
1425         .stats_reset = NULL,
1426         .queue_pair_setup = otx2_cpt_queue_pair_setup,
1427         .queue_pair_release = otx2_cpt_queue_pair_release,
1428
1429         /* Symmetric crypto ops */
1430         .sym_session_get_size = otx2_cpt_sym_session_get_size,
1431         .sym_session_configure = otx2_cpt_sym_session_configure,
1432         .sym_session_clear = otx2_cpt_sym_session_clear,
1433
1434         /* Asymmetric crypto ops */
1435         .asym_session_get_size = otx2_cpt_asym_session_size_get,
1436         .asym_session_configure = otx2_cpt_asym_session_cfg,
1437         .asym_session_clear = otx2_cpt_asym_session_clear,
1438
1439 };