net/bnxt: fix handling interface change status
[dpdk.git] / drivers / crypto / octeontx2 / otx2_cryptodev_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (C) 2019 Marvell International Ltd.
3  */
4
5 #include <unistd.h>
6
7 #include <rte_cryptodev_pmd.h>
8 #include <rte_errno.h>
9
10 #include "otx2_cryptodev.h"
11 #include "otx2_cryptodev_capabilities.h"
12 #include "otx2_cryptodev_hw_access.h"
13 #include "otx2_cryptodev_mbox.h"
14 #include "otx2_cryptodev_ops.h"
15 #include "otx2_mbox.h"
16
17 #include "cpt_hw_types.h"
18 #include "cpt_pmd_logs.h"
19 #include "cpt_pmd_ops_helper.h"
20 #include "cpt_ucode.h"
21 #include "cpt_ucode_asym.h"
22
23 #define METABUF_POOL_CACHE_SIZE 512
24
25 /* Forward declarations */
26
27 static int
28 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id);
29
30 static void
31 qp_memzone_name_get(char *name, int size, int dev_id, int qp_id)
32 {
33         snprintf(name, size, "otx2_cpt_lf_mem_%u:%u", dev_id, qp_id);
34 }
35
36 static int
37 otx2_cpt_metabuf_mempool_create(const struct rte_cryptodev *dev,
38                                 struct otx2_cpt_qp *qp, uint8_t qp_id,
39                                 int nb_elements)
40 {
41         char mempool_name[RTE_MEMPOOL_NAMESIZE];
42         struct cpt_qp_meta_info *meta_info;
43         struct rte_mempool *pool;
44         int ret, max_mlen;
45         int asym_mlen = 0;
46         int lb_mlen = 0;
47         int sg_mlen = 0;
48
49         if (dev->feature_flags & RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO) {
50
51                 /* Get meta len for scatter gather mode */
52                 sg_mlen = cpt_pmd_ops_helper_get_mlen_sg_mode();
53
54                 /* Extra 32B saved for future considerations */
55                 sg_mlen += 4 * sizeof(uint64_t);
56
57                 /* Get meta len for linear buffer (direct) mode */
58                 lb_mlen = cpt_pmd_ops_helper_get_mlen_direct_mode();
59
60                 /* Extra 32B saved for future considerations */
61                 lb_mlen += 4 * sizeof(uint64_t);
62         }
63
64         if (dev->feature_flags & RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO) {
65
66                 /* Get meta len required for asymmetric operations */
67                 asym_mlen = cpt_pmd_ops_helper_asym_get_mlen();
68         }
69
70         /*
71          * Check max requirement for meta buffer to
72          * support crypto op of any type (sym/asym).
73          */
74         max_mlen = RTE_MAX(RTE_MAX(lb_mlen, sg_mlen), asym_mlen);
75
76         /* Allocate mempool */
77
78         snprintf(mempool_name, RTE_MEMPOOL_NAMESIZE, "otx2_cpt_mb_%u:%u",
79                  dev->data->dev_id, qp_id);
80
81         pool = rte_mempool_create_empty(mempool_name, nb_elements, max_mlen,
82                                         METABUF_POOL_CACHE_SIZE, 0,
83                                         rte_socket_id(), 0);
84
85         if (pool == NULL) {
86                 CPT_LOG_ERR("Could not create mempool for metabuf");
87                 return rte_errno;
88         }
89
90         ret = rte_mempool_set_ops_byname(pool, RTE_MBUF_DEFAULT_MEMPOOL_OPS,
91                                          NULL);
92         if (ret) {
93                 CPT_LOG_ERR("Could not set mempool ops");
94                 goto mempool_free;
95         }
96
97         ret = rte_mempool_populate_default(pool);
98         if (ret <= 0) {
99                 CPT_LOG_ERR("Could not populate metabuf pool");
100                 goto mempool_free;
101         }
102
103         meta_info = &qp->meta_info;
104
105         meta_info->pool = pool;
106         meta_info->lb_mlen = lb_mlen;
107         meta_info->sg_mlen = sg_mlen;
108
109         return 0;
110
111 mempool_free:
112         rte_mempool_free(pool);
113         return ret;
114 }
115
116 static void
117 otx2_cpt_metabuf_mempool_destroy(struct otx2_cpt_qp *qp)
118 {
119         struct cpt_qp_meta_info *meta_info = &qp->meta_info;
120
121         rte_mempool_free(meta_info->pool);
122
123         meta_info->pool = NULL;
124         meta_info->lb_mlen = 0;
125         meta_info->sg_mlen = 0;
126 }
127
128 static struct otx2_cpt_qp *
129 otx2_cpt_qp_create(const struct rte_cryptodev *dev, uint16_t qp_id,
130                    uint8_t group)
131 {
132         struct otx2_cpt_vf *vf = dev->data->dev_private;
133         uint64_t pg_sz = sysconf(_SC_PAGESIZE);
134         const struct rte_memzone *lf_mem;
135         uint32_t len, iq_len, size_div40;
136         char name[RTE_MEMZONE_NAMESIZE];
137         uint64_t used_len, iova;
138         struct otx2_cpt_qp *qp;
139         uint64_t lmtline;
140         uint8_t *va;
141         int ret;
142
143         /* Allocate queue pair */
144         qp = rte_zmalloc_socket("OCTEON TX2 Crypto PMD Queue Pair", sizeof(*qp),
145                                 OTX2_ALIGN, 0);
146         if (qp == NULL) {
147                 CPT_LOG_ERR("Could not allocate queue pair");
148                 return NULL;
149         }
150
151         iq_len = OTX2_CPT_IQ_LEN;
152
153         /*
154          * Queue size must be a multiple of 40 and effective queue size to
155          * software is (size_div40 - 1) * 40
156          */
157         size_div40 = (iq_len + 40 - 1) / 40 + 1;
158
159         /* For pending queue */
160         len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
161
162         /* Space for instruction group memory */
163         len += size_div40 * 16;
164
165         /* So that instruction queues start as pg size aligned */
166         len = RTE_ALIGN(len, pg_sz);
167
168         /* For instruction queues */
169         len += OTX2_CPT_IQ_LEN * sizeof(union cpt_inst_s);
170
171         /* Wastage after instruction queues */
172         len = RTE_ALIGN(len, pg_sz);
173
174         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
175                             qp_id);
176
177         lf_mem = rte_memzone_reserve_aligned(name, len, vf->otx2_dev.node,
178                         RTE_MEMZONE_SIZE_HINT_ONLY | RTE_MEMZONE_256MB,
179                         RTE_CACHE_LINE_SIZE);
180         if (lf_mem == NULL) {
181                 CPT_LOG_ERR("Could not allocate reserved memzone");
182                 goto qp_free;
183         }
184
185         va = lf_mem->addr;
186         iova = lf_mem->iova;
187
188         memset(va, 0, len);
189
190         ret = otx2_cpt_metabuf_mempool_create(dev, qp, qp_id, iq_len);
191         if (ret) {
192                 CPT_LOG_ERR("Could not create mempool for metabuf");
193                 goto lf_mem_free;
194         }
195
196         /* Initialize pending queue */
197         qp->pend_q.rid_queue = (struct rid *)va;
198         qp->pend_q.enq_tail = 0;
199         qp->pend_q.deq_head = 0;
200         qp->pend_q.pending_count = 0;
201
202         used_len = iq_len * RTE_ALIGN(sizeof(struct rid), 8);
203         used_len += size_div40 * 16;
204         used_len = RTE_ALIGN(used_len, pg_sz);
205         iova += used_len;
206
207         qp->iq_dma_addr = iova;
208         qp->id = qp_id;
209         qp->base = OTX2_CPT_LF_BAR2(vf, qp_id);
210
211         lmtline = vf->otx2_dev.bar2 +
212                   (RVU_BLOCK_ADDR_LMT << 20 | qp_id << 12) +
213                   OTX2_LMT_LF_LMTLINE(0);
214
215         qp->lmtline = (void *)lmtline;
216
217         qp->lf_nq_reg = qp->base + OTX2_CPT_LF_NQ(0);
218
219         otx2_cpt_iq_disable(qp);
220
221         ret = otx2_cpt_iq_enable(dev, qp, group, OTX2_CPT_QUEUE_HI_PRIO,
222                                  size_div40);
223         if (ret) {
224                 CPT_LOG_ERR("Could not enable instruction queue");
225                 goto mempool_destroy;
226         }
227
228         return qp;
229
230 mempool_destroy:
231         otx2_cpt_metabuf_mempool_destroy(qp);
232 lf_mem_free:
233         rte_memzone_free(lf_mem);
234 qp_free:
235         rte_free(qp);
236         return NULL;
237 }
238
239 static int
240 otx2_cpt_qp_destroy(const struct rte_cryptodev *dev, struct otx2_cpt_qp *qp)
241 {
242         const struct rte_memzone *lf_mem;
243         char name[RTE_MEMZONE_NAMESIZE];
244         int ret;
245
246         otx2_cpt_iq_disable(qp);
247
248         otx2_cpt_metabuf_mempool_destroy(qp);
249
250         qp_memzone_name_get(name, RTE_MEMZONE_NAMESIZE, dev->data->dev_id,
251                             qp->id);
252
253         lf_mem = rte_memzone_lookup(name);
254
255         ret = rte_memzone_free(lf_mem);
256         if (ret)
257                 return ret;
258
259         rte_free(qp);
260
261         return 0;
262 }
263
264 static int
265 sym_session_configure(int driver_id, struct rte_crypto_sym_xform *xform,
266                       struct rte_cryptodev_sym_session *sess,
267                       struct rte_mempool *pool)
268 {
269         struct cpt_sess_misc *misc;
270         void *priv;
271         int ret;
272
273         if (unlikely(cpt_is_algo_supported(xform))) {
274                 CPT_LOG_ERR("Crypto xform not supported");
275                 return -ENOTSUP;
276         }
277
278         if (unlikely(rte_mempool_get(pool, &priv))) {
279                 CPT_LOG_ERR("Could not allocate session private data");
280                 return -ENOMEM;
281         }
282
283         misc = priv;
284
285         for ( ; xform != NULL; xform = xform->next) {
286                 switch (xform->type) {
287                 case RTE_CRYPTO_SYM_XFORM_AEAD:
288                         ret = fill_sess_aead(xform, misc);
289                         break;
290                 case RTE_CRYPTO_SYM_XFORM_CIPHER:
291                         ret = fill_sess_cipher(xform, misc);
292                         break;
293                 case RTE_CRYPTO_SYM_XFORM_AUTH:
294                         if (xform->auth.algo == RTE_CRYPTO_AUTH_AES_GMAC)
295                                 ret = fill_sess_gmac(xform, misc);
296                         else
297                                 ret = fill_sess_auth(xform, misc);
298                         break;
299                 default:
300                         ret = -1;
301                 }
302
303                 if (ret)
304                         goto priv_put;
305         }
306
307         set_sym_session_private_data(sess, driver_id, misc);
308
309         misc->ctx_dma_addr = rte_mempool_virt2iova(misc) +
310                              sizeof(struct cpt_sess_misc);
311
312         /*
313          * IE engines support IPsec operations
314          * SE engines support IPsec operations and Air-Crypto operations
315          */
316         if (misc->zsk_flag)
317                 misc->egrp = OTX2_CPT_EGRP_SE;
318         else
319                 misc->egrp = OTX2_CPT_EGRP_SE_IE;
320
321         return 0;
322
323 priv_put:
324         rte_mempool_put(pool, priv);
325
326         CPT_LOG_ERR("Crypto xform not supported");
327         return -ENOTSUP;
328 }
329
330 static void
331 sym_session_clear(int driver_id, struct rte_cryptodev_sym_session *sess)
332 {
333         void *priv = get_sym_session_private_data(sess, driver_id);
334         struct rte_mempool *pool;
335
336         if (priv == NULL)
337                 return;
338
339         memset(priv, 0, cpt_get_session_size());
340
341         pool = rte_mempool_from_obj(priv);
342
343         set_sym_session_private_data(sess, driver_id, NULL);
344
345         rte_mempool_put(pool, priv);
346 }
347
348 static __rte_always_inline int32_t __hot
349 otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
350                      struct pending_queue *pend_q,
351                      struct cpt_request_info *req)
352 {
353         void *lmtline = qp->lmtline;
354         union cpt_inst_s inst;
355         uint64_t lmt_status;
356
357         if (unlikely(pend_q->pending_count >= OTX2_CPT_DEFAULT_CMD_QLEN))
358                 return -EAGAIN;
359
360         inst.u[0] = 0;
361         inst.s9x.res_addr = req->comp_baddr;
362         inst.u[2] = 0;
363         inst.u[3] = 0;
364
365         inst.s9x.ei0 = req->ist.ei0;
366         inst.s9x.ei1 = req->ist.ei1;
367         inst.s9x.ei2 = req->ist.ei2;
368         inst.s9x.ei3 = req->ist.ei3;
369
370         req->time_out = rte_get_timer_cycles() +
371                         DEFAULT_COMMAND_TIMEOUT * rte_get_timer_hz();
372
373         do {
374                 /* Copy CPT command to LMTLINE */
375                 memcpy(lmtline, &inst, sizeof(inst));
376
377                 /*
378                  * Make sure compiler does not reorder memcpy and ldeor.
379                  * LMTST transactions are always flushed from the write
380                  * buffer immediately, a DMB is not required to push out
381                  * LMTSTs.
382                  */
383                 rte_cio_wmb();
384                 lmt_status = otx2_lmt_submit(qp->lf_nq_reg);
385         } while (lmt_status == 0);
386
387         pend_q->rid_queue[pend_q->enq_tail].rid = (uintptr_t)req;
388
389         /* We will use soft queue length here to limit requests */
390         MOD_INC(pend_q->enq_tail, OTX2_CPT_DEFAULT_CMD_QLEN);
391         pend_q->pending_count += 1;
392
393         return 0;
394 }
395
396 static __rte_always_inline int32_t __hot
397 otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
398                       struct rte_crypto_op *op,
399                       struct pending_queue *pend_q)
400 {
401         struct cpt_qp_meta_info *minfo = &qp->meta_info;
402         struct rte_crypto_asym_op *asym_op = op->asym;
403         struct asym_op_params params = {0};
404         struct cpt_asym_sess_misc *sess;
405         vq_cmd_word3_t *w3;
406         uintptr_t *cop;
407         void *mdata;
408         int ret;
409
410         if (unlikely(rte_mempool_get(minfo->pool, &mdata) < 0)) {
411                 CPT_LOG_ERR("Could not allocate meta buffer for request");
412                 return -ENOMEM;
413         }
414
415         sess = get_asym_session_private_data(asym_op->session,
416                                              otx2_cryptodev_driver_id);
417
418         /* Store IO address of the mdata to meta_buf */
419         params.meta_buf = rte_mempool_virt2iova(mdata);
420
421         cop = mdata;
422         cop[0] = (uintptr_t)mdata;
423         cop[1] = (uintptr_t)op;
424         cop[2] = cop[3] = 0ULL;
425
426         params.req = RTE_PTR_ADD(cop, 4 * sizeof(uintptr_t));
427         params.req->op = cop;
428
429         /* Adjust meta_buf to point to end of cpt_request_info structure */
430         params.meta_buf += (4 * sizeof(uintptr_t)) +
431                             sizeof(struct cpt_request_info);
432         switch (sess->xfrm_type) {
433         case RTE_CRYPTO_ASYM_XFORM_MODEX:
434                 ret = cpt_modex_prep(&params, &sess->mod_ctx);
435                 if (unlikely(ret))
436                         goto req_fail;
437                 break;
438         case RTE_CRYPTO_ASYM_XFORM_RSA:
439                 ret = cpt_enqueue_rsa_op(op, &params, sess);
440                 if (unlikely(ret))
441                         goto req_fail;
442                 break;
443         default:
444                 op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
445                 ret = -EINVAL;
446                 goto req_fail;
447         }
448
449         /* Set engine group of AE */
450         w3 = (vq_cmd_word3_t *)&params.req->ist.ei3;
451         w3->s.grp = OTX2_CPT_EGRP_AE;
452
453         ret = otx2_cpt_enqueue_req(qp, pend_q, params.req);
454
455         if (unlikely(ret)) {
456                 CPT_LOG_DP_ERR("Could not enqueue crypto req");
457                 goto req_fail;
458         }
459
460         return 0;
461
462 req_fail:
463         free_op_meta(mdata, minfo->pool);
464
465         return ret;
466 }
467
468 static __rte_always_inline int __hot
469 otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
470                      struct pending_queue *pend_q)
471 {
472         struct rte_crypto_sym_op *sym_op = op->sym;
473         struct cpt_request_info *req;
474         struct cpt_sess_misc *sess;
475         vq_cmd_word3_t *w3;
476         uint64_t cpt_op;
477         void *mdata;
478         int ret;
479
480         sess = get_sym_session_private_data(sym_op->session,
481                                             otx2_cryptodev_driver_id);
482
483         cpt_op = sess->cpt_op;
484
485         if (cpt_op & CPT_OP_CIPHER_MASK)
486                 ret = fill_fc_params(op, sess, &qp->meta_info, &mdata,
487                                      (void **)&req);
488         else
489                 ret = fill_digest_params(op, sess, &qp->meta_info, &mdata,
490                                          (void **)&req);
491
492         if (unlikely(ret)) {
493                 CPT_LOG_DP_ERR("Crypto req : op %p, cpt_op 0x%x ret 0x%x",
494                                 op, (unsigned int)cpt_op, ret);
495                 return ret;
496         }
497
498         w3 = ((vq_cmd_word3_t *)(&req->ist.ei3));
499         w3->s.grp = sess->egrp;
500
501         ret = otx2_cpt_enqueue_req(qp, pend_q, req);
502
503         if (unlikely(ret)) {
504                 /* Free buffer allocated by fill params routines */
505                 free_op_meta(mdata, qp->meta_info.pool);
506         }
507
508         return ret;
509 }
510
511 static __rte_always_inline int __hot
512 otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
513                               struct pending_queue *pend_q)
514 {
515         const int driver_id = otx2_cryptodev_driver_id;
516         struct rte_crypto_sym_op *sym_op = op->sym;
517         struct rte_cryptodev_sym_session *sess;
518         int ret;
519
520         /* Create temporary session */
521
522         if (rte_mempool_get(qp->sess_mp, (void **)&sess))
523                 return -ENOMEM;
524
525         ret = sym_session_configure(driver_id, sym_op->xform, sess,
526                                     qp->sess_mp_priv);
527         if (ret)
528                 goto sess_put;
529
530         sym_op->session = sess;
531
532         ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
533
534         if (unlikely(ret))
535                 goto priv_put;
536
537         return 0;
538
539 priv_put:
540         sym_session_clear(driver_id, sess);
541 sess_put:
542         rte_mempool_put(qp->sess_mp, sess);
543         return ret;
544 }
545
546 static uint16_t
547 otx2_cpt_enqueue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
548 {
549         uint16_t nb_allowed, count = 0;
550         struct otx2_cpt_qp *qp = qptr;
551         struct pending_queue *pend_q;
552         struct rte_crypto_op *op;
553         int ret;
554
555         pend_q = &qp->pend_q;
556
557         nb_allowed = OTX2_CPT_DEFAULT_CMD_QLEN - pend_q->pending_count;
558         if (nb_ops > nb_allowed)
559                 nb_ops = nb_allowed;
560
561         for (count = 0; count < nb_ops; count++) {
562                 op = ops[count];
563                 if (op->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
564                         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
565                                 ret = otx2_cpt_enqueue_sym(qp, op, pend_q);
566                         else
567                                 ret = otx2_cpt_enqueue_sym_sessless(qp, op,
568                                                                     pend_q);
569                 } else if (op->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
570                         if (op->sess_type == RTE_CRYPTO_OP_WITH_SESSION)
571                                 ret = otx2_cpt_enqueue_asym(qp, op, pend_q);
572                         else
573                                 break;
574                 } else
575                         break;
576
577                 if (unlikely(ret))
578                         break;
579         }
580
581         return count;
582 }
583
584 static __rte_always_inline void
585 otx2_cpt_asym_rsa_op(struct rte_crypto_op *cop, struct cpt_request_info *req,
586                      struct rte_crypto_rsa_xform *rsa_ctx)
587 {
588         struct rte_crypto_rsa_op_param *rsa = &cop->asym->rsa;
589
590         switch (rsa->op_type) {
591         case RTE_CRYPTO_ASYM_OP_ENCRYPT:
592                 rsa->cipher.length = rsa_ctx->n.length;
593                 memcpy(rsa->cipher.data, req->rptr, rsa->cipher.length);
594                 break;
595         case RTE_CRYPTO_ASYM_OP_DECRYPT:
596                 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
597                         rsa->message.length = rsa_ctx->n.length;
598                         memcpy(rsa->message.data, req->rptr,
599                                rsa->message.length);
600                 } else {
601                         /* Get length of decrypted output */
602                         rsa->message.length = rte_cpu_to_be_16
603                                              (*((uint16_t *)req->rptr));
604                         /*
605                          * Offset output data pointer by length field
606                          * (2 bytes) and copy decrypted data.
607                          */
608                         memcpy(rsa->message.data, req->rptr + 2,
609                                rsa->message.length);
610                 }
611                 break;
612         case RTE_CRYPTO_ASYM_OP_SIGN:
613                 rsa->sign.length = rsa_ctx->n.length;
614                 memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
615                 break;
616         case RTE_CRYPTO_ASYM_OP_VERIFY:
617                 if (rsa->pad == RTE_CRYPTO_RSA_PADDING_NONE) {
618                         rsa->sign.length = rsa_ctx->n.length;
619                         memcpy(rsa->sign.data, req->rptr, rsa->sign.length);
620                 } else {
621                         /* Get length of signed output */
622                         rsa->sign.length = rte_cpu_to_be_16
623                                           (*((uint16_t *)req->rptr));
624                         /*
625                          * Offset output data pointer by length field
626                          * (2 bytes) and copy signed data.
627                          */
628                         memcpy(rsa->sign.data, req->rptr + 2,
629                                rsa->sign.length);
630                 }
631                 if (memcmp(rsa->sign.data, rsa->message.data,
632                            rsa->message.length)) {
633                         CPT_LOG_DP_ERR("RSA verification failed");
634                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
635                 }
636                 break;
637         default:
638                 CPT_LOG_DP_DEBUG("Invalid RSA operation type");
639                 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
640                 break;
641         }
642 }
643
644 static void
645 otx2_cpt_asym_post_process(struct rte_crypto_op *cop,
646                            struct cpt_request_info *req)
647 {
648         struct rte_crypto_asym_op *op = cop->asym;
649         struct cpt_asym_sess_misc *sess;
650
651         sess = get_asym_session_private_data(op->session,
652                                              otx2_cryptodev_driver_id);
653
654         switch (sess->xfrm_type) {
655         case RTE_CRYPTO_ASYM_XFORM_RSA:
656                 otx2_cpt_asym_rsa_op(cop, req, &sess->rsa_ctx);
657                 break;
658         case RTE_CRYPTO_ASYM_XFORM_MODEX:
659                 op->modex.result.length = sess->mod_ctx.modulus.length;
660                 memcpy(op->modex.result.data, req->rptr,
661                        op->modex.result.length);
662                 break;
663         default:
664                 CPT_LOG_DP_DEBUG("Invalid crypto xform type");
665                 cop->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS;
666                 break;
667         }
668 }
669
670 static inline void
671 otx2_cpt_dequeue_post_process(struct otx2_cpt_qp *qp, struct rte_crypto_op *cop,
672                               uintptr_t *rsp, uint8_t cc)
673 {
674         if (cop->type == RTE_CRYPTO_OP_TYPE_SYMMETRIC) {
675                 if (likely(cc == NO_ERR)) {
676                         /* Verify authentication data if required */
677                         if (unlikely(rsp[2]))
678                                 compl_auth_verify(cop, (uint8_t *)rsp[2],
679                                                  rsp[3]);
680                         else
681                                 cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
682                 } else {
683                         if (cc == ERR_GC_ICV_MISCOMPARE)
684                                 cop->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED;
685                         else
686                                 cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
687                 }
688
689                 if (unlikely(cop->sess_type == RTE_CRYPTO_OP_SESSIONLESS)) {
690                         sym_session_clear(otx2_cryptodev_driver_id,
691                                           cop->sym->session);
692                         rte_mempool_put(qp->sess_mp, cop->sym->session);
693                         cop->sym->session = NULL;
694                 }
695         }
696
697         if (cop->type == RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
698                 if (likely(cc == NO_ERR)) {
699                         cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
700                         /*
701                          * Pass cpt_req_info stored in metabuf during
702                          * enqueue.
703                          */
704                         rsp = RTE_PTR_ADD(rsp, 4 * sizeof(uintptr_t));
705                         otx2_cpt_asym_post_process(cop,
706                                         (struct cpt_request_info *)rsp);
707                 } else
708                         cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
709         }
710 }
711
712 static __rte_always_inline uint8_t
713 otx2_cpt_compcode_get(struct cpt_request_info *req)
714 {
715         volatile struct cpt_res_s_9s *res;
716         uint8_t ret;
717
718         res = (volatile struct cpt_res_s_9s *)req->completion_addr;
719
720         if (unlikely(res->compcode == CPT_9X_COMP_E_NOTDONE)) {
721                 if (rte_get_timer_cycles() < req->time_out)
722                         return ERR_REQ_PENDING;
723
724                 CPT_LOG_DP_ERR("Request timed out");
725                 return ERR_REQ_TIMEOUT;
726         }
727
728         if (likely(res->compcode == CPT_9X_COMP_E_GOOD)) {
729                 ret = NO_ERR;
730                 if (unlikely(res->uc_compcode)) {
731                         ret = res->uc_compcode;
732                         CPT_LOG_DP_DEBUG("Request failed with microcode error");
733                         CPT_LOG_DP_DEBUG("MC completion code 0x%x",
734                                          res->uc_compcode);
735                 }
736         } else {
737                 CPT_LOG_DP_DEBUG("HW completion code 0x%x", res->compcode);
738
739                 ret = res->compcode;
740                 switch (res->compcode) {
741                 case CPT_9X_COMP_E_INSTERR:
742                         CPT_LOG_DP_ERR("Request failed with instruction error");
743                         break;
744                 case CPT_9X_COMP_E_FAULT:
745                         CPT_LOG_DP_ERR("Request failed with DMA fault");
746                         break;
747                 case CPT_9X_COMP_E_HWERR:
748                         CPT_LOG_DP_ERR("Request failed with hardware error");
749                         break;
750                 default:
751                         CPT_LOG_DP_ERR("Request failed with unknown completion code");
752                 }
753         }
754
755         return ret;
756 }
757
758 static uint16_t
759 otx2_cpt_dequeue_burst(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops)
760 {
761         int i, nb_pending, nb_completed;
762         struct otx2_cpt_qp *qp = qptr;
763         struct pending_queue *pend_q;
764         struct cpt_request_info *req;
765         struct rte_crypto_op *cop;
766         uint8_t cc[nb_ops];
767         struct rid *rid;
768         uintptr_t *rsp;
769         void *metabuf;
770
771         pend_q = &qp->pend_q;
772
773         nb_pending = pend_q->pending_count;
774
775         if (nb_ops > nb_pending)
776                 nb_ops = nb_pending;
777
778         for (i = 0; i < nb_ops; i++) {
779                 rid = &pend_q->rid_queue[pend_q->deq_head];
780                 req = (struct cpt_request_info *)(rid->rid);
781
782                 cc[i] = otx2_cpt_compcode_get(req);
783
784                 if (unlikely(cc[i] == ERR_REQ_PENDING))
785                         break;
786
787                 ops[i] = req->op;
788
789                 MOD_INC(pend_q->deq_head, OTX2_CPT_DEFAULT_CMD_QLEN);
790                 pend_q->pending_count -= 1;
791         }
792
793         nb_completed = i;
794
795         for (i = 0; i < nb_completed; i++) {
796                 rsp = (void *)ops[i];
797
798                 metabuf = (void *)rsp[0];
799                 cop = (void *)rsp[1];
800
801                 ops[i] = cop;
802
803                 otx2_cpt_dequeue_post_process(qp, cop, rsp, cc[i]);
804
805                 free_op_meta(metabuf, qp->meta_info.pool);
806         }
807
808         return nb_completed;
809 }
810
811 /* PMD ops */
812
813 static int
814 otx2_cpt_dev_config(struct rte_cryptodev *dev,
815                     struct rte_cryptodev_config *conf)
816 {
817         struct otx2_cpt_vf *vf = dev->data->dev_private;
818         int ret;
819
820         if (conf->nb_queue_pairs > vf->max_queues) {
821                 CPT_LOG_ERR("Invalid number of queue pairs requested");
822                 return -EINVAL;
823         }
824
825         dev->feature_flags &= ~conf->ff_disable;
826
827         /* Unregister error interrupts */
828         if (vf->err_intr_registered)
829                 otx2_cpt_err_intr_unregister(dev);
830
831         /* Detach queues */
832         if (vf->nb_queues) {
833                 ret = otx2_cpt_queues_detach(dev);
834                 if (ret) {
835                         CPT_LOG_ERR("Could not detach CPT queues");
836                         return ret;
837                 }
838         }
839
840         /* Attach queues */
841         ret = otx2_cpt_queues_attach(dev, conf->nb_queue_pairs);
842         if (ret) {
843                 CPT_LOG_ERR("Could not attach CPT queues");
844                 return -ENODEV;
845         }
846
847         ret = otx2_cpt_msix_offsets_get(dev);
848         if (ret) {
849                 CPT_LOG_ERR("Could not get MSI-X offsets");
850                 goto queues_detach;
851         }
852
853         /* Register error interrupts */
854         ret = otx2_cpt_err_intr_register(dev);
855         if (ret) {
856                 CPT_LOG_ERR("Could not register error interrupts");
857                 goto queues_detach;
858         }
859
860         dev->enqueue_burst = otx2_cpt_enqueue_burst;
861         dev->dequeue_burst = otx2_cpt_dequeue_burst;
862
863         rte_mb();
864         return 0;
865
866 queues_detach:
867         otx2_cpt_queues_detach(dev);
868         return ret;
869 }
870
871 static int
872 otx2_cpt_dev_start(struct rte_cryptodev *dev)
873 {
874         RTE_SET_USED(dev);
875
876         CPT_PMD_INIT_FUNC_TRACE();
877
878         return 0;
879 }
880
881 static void
882 otx2_cpt_dev_stop(struct rte_cryptodev *dev)
883 {
884         RTE_SET_USED(dev);
885
886         CPT_PMD_INIT_FUNC_TRACE();
887 }
888
889 static int
890 otx2_cpt_dev_close(struct rte_cryptodev *dev)
891 {
892         struct otx2_cpt_vf *vf = dev->data->dev_private;
893         int i, ret = 0;
894
895         for (i = 0; i < dev->data->nb_queue_pairs; i++) {
896                 ret = otx2_cpt_queue_pair_release(dev, i);
897                 if (ret)
898                         return ret;
899         }
900
901         /* Unregister error interrupts */
902         if (vf->err_intr_registered)
903                 otx2_cpt_err_intr_unregister(dev);
904
905         /* Detach queues */
906         if (vf->nb_queues) {
907                 ret = otx2_cpt_queues_detach(dev);
908                 if (ret)
909                         CPT_LOG_ERR("Could not detach CPT queues");
910         }
911
912         return ret;
913 }
914
915 static void
916 otx2_cpt_dev_info_get(struct rte_cryptodev *dev,
917                       struct rte_cryptodev_info *info)
918 {
919         struct otx2_cpt_vf *vf = dev->data->dev_private;
920
921         if (info != NULL) {
922                 info->max_nb_queue_pairs = vf->max_queues;
923                 info->feature_flags = dev->feature_flags;
924                 info->capabilities = otx2_cpt_capabilities_get();
925                 info->sym.max_nb_sessions = 0;
926                 info->driver_id = otx2_cryptodev_driver_id;
927                 info->min_mbuf_headroom_req = OTX2_CPT_MIN_HEADROOM_REQ;
928                 info->min_mbuf_tailroom_req = OTX2_CPT_MIN_TAILROOM_REQ;
929         }
930 }
931
932 static int
933 otx2_cpt_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
934                           const struct rte_cryptodev_qp_conf *conf,
935                           int socket_id __rte_unused)
936 {
937         uint8_t grp_mask = OTX2_CPT_ENG_GRPS_MASK;
938         struct rte_pci_device *pci_dev;
939         struct otx2_cpt_qp *qp;
940
941         CPT_PMD_INIT_FUNC_TRACE();
942
943         if (dev->data->queue_pairs[qp_id] != NULL)
944                 otx2_cpt_queue_pair_release(dev, qp_id);
945
946         if (conf->nb_descriptors > OTX2_CPT_DEFAULT_CMD_QLEN) {
947                 CPT_LOG_ERR("Could not setup queue pair for %u descriptors",
948                             conf->nb_descriptors);
949                 return -EINVAL;
950         }
951
952         pci_dev = RTE_DEV_TO_PCI(dev->device);
953
954         if (pci_dev->mem_resource[2].addr == NULL) {
955                 CPT_LOG_ERR("Invalid PCI mem address");
956                 return -EIO;
957         }
958
959         qp = otx2_cpt_qp_create(dev, qp_id, grp_mask);
960         if (qp == NULL) {
961                 CPT_LOG_ERR("Could not create queue pair %d", qp_id);
962                 return -ENOMEM;
963         }
964
965         qp->sess_mp = conf->mp_session;
966         qp->sess_mp_priv = conf->mp_session_private;
967         dev->data->queue_pairs[qp_id] = qp;
968
969         return 0;
970 }
971
972 static int
973 otx2_cpt_queue_pair_release(struct rte_cryptodev *dev, uint16_t qp_id)
974 {
975         struct otx2_cpt_qp *qp = dev->data->queue_pairs[qp_id];
976         int ret;
977
978         CPT_PMD_INIT_FUNC_TRACE();
979
980         if (qp == NULL)
981                 return -EINVAL;
982
983         CPT_LOG_INFO("Releasing queue pair %d", qp_id);
984
985         ret = otx2_cpt_qp_destroy(dev, qp);
986         if (ret) {
987                 CPT_LOG_ERR("Could not destroy queue pair %d", qp_id);
988                 return ret;
989         }
990
991         dev->data->queue_pairs[qp_id] = NULL;
992
993         return 0;
994 }
995
996 static unsigned int
997 otx2_cpt_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
998 {
999         return cpt_get_session_size();
1000 }
1001
1002 static int
1003 otx2_cpt_sym_session_configure(struct rte_cryptodev *dev,
1004                                struct rte_crypto_sym_xform *xform,
1005                                struct rte_cryptodev_sym_session *sess,
1006                                struct rte_mempool *pool)
1007 {
1008         CPT_PMD_INIT_FUNC_TRACE();
1009
1010         return sym_session_configure(dev->driver_id, xform, sess, pool);
1011 }
1012
1013 static void
1014 otx2_cpt_sym_session_clear(struct rte_cryptodev *dev,
1015                            struct rte_cryptodev_sym_session *sess)
1016 {
1017         CPT_PMD_INIT_FUNC_TRACE();
1018
1019         return sym_session_clear(dev->driver_id, sess);
1020 }
1021
1022 static unsigned int
1023 otx2_cpt_asym_session_size_get(struct rte_cryptodev *dev __rte_unused)
1024 {
1025         return sizeof(struct cpt_asym_sess_misc);
1026 }
1027
1028 static int
1029 otx2_cpt_asym_session_cfg(struct rte_cryptodev *dev,
1030                           struct rte_crypto_asym_xform *xform,
1031                           struct rte_cryptodev_asym_session *sess,
1032                           struct rte_mempool *pool)
1033 {
1034         struct cpt_asym_sess_misc *priv;
1035         int ret;
1036
1037         CPT_PMD_INIT_FUNC_TRACE();
1038
1039         if (rte_mempool_get(pool, (void **)&priv)) {
1040                 CPT_LOG_ERR("Could not allocate session_private_data");
1041                 return -ENOMEM;
1042         }
1043
1044         memset(priv, 0, sizeof(struct cpt_asym_sess_misc));
1045
1046         ret = cpt_fill_asym_session_parameters(priv, xform);
1047         if (ret) {
1048                 CPT_LOG_ERR("Could not configure session parameters");
1049
1050                 /* Return session to mempool */
1051                 rte_mempool_put(pool, priv);
1052                 return ret;
1053         }
1054
1055         set_asym_session_private_data(sess, dev->driver_id, priv);
1056         return 0;
1057 }
1058
1059 static void
1060 otx2_cpt_asym_session_clear(struct rte_cryptodev *dev,
1061                             struct rte_cryptodev_asym_session *sess)
1062 {
1063         struct cpt_asym_sess_misc *priv;
1064         struct rte_mempool *sess_mp;
1065
1066         CPT_PMD_INIT_FUNC_TRACE();
1067
1068         priv = get_asym_session_private_data(sess, dev->driver_id);
1069         if (priv == NULL)
1070                 return;
1071
1072         /* Free resources allocated in session_cfg */
1073         cpt_free_asym_session_parameters(priv);
1074
1075         /* Reset and free object back to pool */
1076         memset(priv, 0, otx2_cpt_asym_session_size_get(dev));
1077         sess_mp = rte_mempool_from_obj(priv);
1078         set_asym_session_private_data(sess, dev->driver_id, NULL);
1079         rte_mempool_put(sess_mp, priv);
1080 }
1081
1082 struct rte_cryptodev_ops otx2_cpt_ops = {
1083         /* Device control ops */
1084         .dev_configure = otx2_cpt_dev_config,
1085         .dev_start = otx2_cpt_dev_start,
1086         .dev_stop = otx2_cpt_dev_stop,
1087         .dev_close = otx2_cpt_dev_close,
1088         .dev_infos_get = otx2_cpt_dev_info_get,
1089
1090         .stats_get = NULL,
1091         .stats_reset = NULL,
1092         .queue_pair_setup = otx2_cpt_queue_pair_setup,
1093         .queue_pair_release = otx2_cpt_queue_pair_release,
1094         .queue_pair_count = NULL,
1095
1096         /* Symmetric crypto ops */
1097         .sym_session_get_size = otx2_cpt_sym_session_get_size,
1098         .sym_session_configure = otx2_cpt_sym_session_configure,
1099         .sym_session_clear = otx2_cpt_sym_session_clear,
1100
1101         /* Asymmetric crypto ops */
1102         .asym_session_get_size = otx2_cpt_asym_session_size_get,
1103         .asym_session_configure = otx2_cpt_asym_session_cfg,
1104         .asym_session_clear = otx2_cpt_asym_session_clear,
1105
1106 };