crypto/dpaa_sec: optimize virtual address conversion
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cycles.h>
19 #include <rte_dev.h>
20 #include <rte_kvargs.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_memcpy.h>
24 #include <rte_string_fns.h>
25
26 #include <fsl_usd.h>
27 #include <fsl_qman.h>
28 #include <of.h>
29
30 /* RTA header files */
31 #include <hw/desc/common.h>
32 #include <hw/desc/algo.h>
33 #include <hw/desc/ipsec.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <dpaa_sec.h>
37 #include <dpaa_sec_log.h>
38
39 enum rta_sec_era rta_sec_era;
40
41 static uint8_t cryptodev_driver_id;
42
43 static __thread struct rte_crypto_op **dpaa_sec_ops;
44 static __thread int dpaa_sec_op_nb;
45
46 static inline void
47 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
48 {
49         if (!ctx->fd_status) {
50                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
51         } else {
52                 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
53                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
54         }
55
56         /* report op status to sym->op and then free the ctx memeory  */
57         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
58 }
59
60 static inline struct dpaa_sec_op_ctx *
61 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
62 {
63         struct dpaa_sec_op_ctx *ctx;
64         int retval;
65
66         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
67         if (!ctx || retval) {
68                 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
69                 return NULL;
70         }
71         /*
72          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
73          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
74          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
75          * each packet, memset is costlier than dcbz_64().
76          */
77         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
78         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
79         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
80         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
81
82         ctx->ctx_pool = ses->ctx_pool;
83         ctx->vtop_offset = (uint64_t) ctx
84                                 - rte_mempool_virt2iova(ctx);
85
86         return ctx;
87 }
88
89 static inline rte_iova_t
90 dpaa_mem_vtop(void *vaddr)
91 {
92         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
93         uint64_t vaddr_64, paddr;
94         int i;
95
96         vaddr_64 = (uint64_t)vaddr;
97         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
98                 if (vaddr_64 >= memseg[i].addr_64 &&
99                     vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
100                         paddr = memseg[i].iova +
101                                 (vaddr_64 - memseg[i].addr_64);
102
103                         return (rte_iova_t)paddr;
104                 }
105         }
106         return (rte_iova_t)(NULL);
107 }
108
109 /* virtual address conversin when mempool support is available for ctx */
110 static inline phys_addr_t
111 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
112 {
113         return (uint64_t)vaddr - ctx->vtop_offset;
114 }
115
116 static inline void *
117 dpaa_mem_ptov(rte_iova_t paddr)
118 {
119         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
120         int i;
121
122         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
123                 if (paddr >= memseg[i].iova &&
124                     (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
125                         return (void *)(memseg[i].addr_64 +
126                                         (paddr - memseg[i].iova));
127         }
128         return NULL;
129 }
130
131 static void
132 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
133                    struct qman_fq *fq,
134                    const struct qm_mr_entry *msg)
135 {
136         RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
137                    fq->fqid, msg->ern.rc, msg->ern.seqnum);
138 }
139
140 /* initialize the queue with dest chan as caam chan so that
141  * all the packets in this queue could be dispatched into caam
142  */
143 static int
144 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
145                  uint32_t fqid_out)
146 {
147         struct qm_mcc_initfq fq_opts;
148         uint32_t flags;
149         int ret = -1;
150
151         /* Clear FQ options */
152         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
153
154         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
155                 QMAN_FQ_FLAG_TO_DCPORTAL;
156
157         ret = qman_create_fq(0, flags, fq_in);
158         if (unlikely(ret != 0)) {
159                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
160                 return ret;
161         }
162
163         flags = QMAN_INITFQ_FLAG_SCHED;
164         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
165                           QM_INITFQ_WE_CONTEXTB;
166
167         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
168         fq_opts.fqd.context_b = fqid_out;
169         fq_opts.fqd.dest.channel = qm_channel_caam;
170         fq_opts.fqd.dest.wq = 0;
171
172         fq_in->cb.ern  = ern_sec_fq_handler;
173
174         ret = qman_init_fq(fq_in, flags, &fq_opts);
175         if (unlikely(ret != 0))
176                 PMD_INIT_LOG(ERR, "qman_init_fq failed");
177
178         return ret;
179 }
180
181 /* something is put into in_fq and caam put the crypto result into out_fq */
182 static enum qman_cb_dqrr_result
183 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
184                   struct qman_fq *fq __always_unused,
185                   const struct qm_dqrr_entry *dqrr)
186 {
187         const struct qm_fd *fd;
188         struct dpaa_sec_job *job;
189         struct dpaa_sec_op_ctx *ctx;
190
191         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
192                 return qman_cb_dqrr_defer;
193
194         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
195                 return qman_cb_dqrr_consume;
196
197         fd = &dqrr->fd;
198         /* sg is embedded in an op ctx,
199          * sg[0] is for output
200          * sg[1] for input
201          */
202         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
203         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
204         ctx->fd_status = fd->status;
205         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
206         dpaa_sec_op_ending(ctx);
207
208         return qman_cb_dqrr_consume;
209 }
210
211 /* caam result is put into this queue */
212 static int
213 dpaa_sec_init_tx(struct qman_fq *fq)
214 {
215         int ret;
216         struct qm_mcc_initfq opts;
217         uint32_t flags;
218
219         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
220                 QMAN_FQ_FLAG_DYNAMIC_FQID;
221
222         ret = qman_create_fq(0, flags, fq);
223         if (unlikely(ret)) {
224                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
225                 return ret;
226         }
227
228         memset(&opts, 0, sizeof(opts));
229         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
230                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
231
232         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
233
234         fq->cb.dqrr = dqrr_out_fq_cb_rx;
235         fq->cb.ern  = ern_sec_fq_handler;
236
237         ret = qman_init_fq(fq, 0, &opts);
238         if (unlikely(ret)) {
239                 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
240                 return ret;
241         }
242
243         return ret;
244 }
245
246 static inline int is_cipher_only(dpaa_sec_session *ses)
247 {
248         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
249                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
250 }
251
252 static inline int is_auth_only(dpaa_sec_session *ses)
253 {
254         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
255                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
256 }
257
258 static inline int is_aead(dpaa_sec_session *ses)
259 {
260         return ((ses->cipher_alg == 0) &&
261                 (ses->auth_alg == 0) &&
262                 (ses->aead_alg != 0));
263 }
264
265 static inline int is_auth_cipher(dpaa_sec_session *ses)
266 {
267         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
268                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
269 }
270
271 static inline int is_encode(dpaa_sec_session *ses)
272 {
273         return ses->dir == DIR_ENC;
274 }
275
276 static inline int is_decode(dpaa_sec_session *ses)
277 {
278         return ses->dir == DIR_DEC;
279 }
280
281 static inline void
282 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
283 {
284         switch (ses->auth_alg) {
285         case RTE_CRYPTO_AUTH_NULL:
286                 ses->digest_length = 0;
287                 break;
288         case RTE_CRYPTO_AUTH_MD5_HMAC:
289                 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
290                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
291                 break;
292         case RTE_CRYPTO_AUTH_SHA1_HMAC:
293                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
294                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
295                 break;
296         case RTE_CRYPTO_AUTH_SHA224_HMAC:
297                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
298                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
299                 break;
300         case RTE_CRYPTO_AUTH_SHA256_HMAC:
301                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
302                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
303                 break;
304         case RTE_CRYPTO_AUTH_SHA384_HMAC:
305                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
306                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307                 break;
308         case RTE_CRYPTO_AUTH_SHA512_HMAC:
309                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
310                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
311                 break;
312         default:
313                 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
314         }
315 }
316
317 static inline void
318 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
319 {
320         switch (ses->cipher_alg) {
321         case RTE_CRYPTO_CIPHER_NULL:
322                 break;
323         case RTE_CRYPTO_CIPHER_AES_CBC:
324                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
325                 alginfo_c->algmode = OP_ALG_AAI_CBC;
326                 break;
327         case RTE_CRYPTO_CIPHER_3DES_CBC:
328                 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
329                 alginfo_c->algmode = OP_ALG_AAI_CBC;
330                 break;
331         case RTE_CRYPTO_CIPHER_AES_CTR:
332                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
333                 alginfo_c->algmode = OP_ALG_AAI_CTR;
334                 break;
335         default:
336                 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
337         }
338 }
339
340 static inline void
341 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
342 {
343         switch (ses->aead_alg) {
344         case RTE_CRYPTO_AEAD_AES_GCM:
345                 alginfo->algtype = OP_ALG_ALGSEL_AES;
346                 alginfo->algmode = OP_ALG_AAI_GCM;
347                 break;
348         default:
349                 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
350         }
351 }
352
353
354 /* prepare command block of the session */
355 static int
356 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
357 {
358         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
359         uint32_t shared_desc_len = 0;
360         struct sec_cdb *cdb = &ses->qp->cdb;
361         int err;
362 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
363         int swap = false;
364 #else
365         int swap = true;
366 #endif
367
368         memset(cdb, 0, sizeof(struct sec_cdb));
369
370         if (is_cipher_only(ses)) {
371                 caam_cipher_alg(ses, &alginfo_c);
372                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
373                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
374                         return -ENOTSUP;
375                 }
376
377                 alginfo_c.key = (uint64_t)ses->cipher_key.data;
378                 alginfo_c.keylen = ses->cipher_key.length;
379                 alginfo_c.key_enc_flags = 0;
380                 alginfo_c.key_type = RTA_DATA_IMM;
381
382                 shared_desc_len = cnstr_shdsc_blkcipher(
383                                                 cdb->sh_desc, true,
384                                                 swap, &alginfo_c,
385                                                 NULL,
386                                                 ses->iv.length,
387                                                 ses->dir);
388         } else if (is_auth_only(ses)) {
389                 caam_auth_alg(ses, &alginfo_a);
390                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
391                         PMD_TX_LOG(ERR, "not supported auth alg\n");
392                         return -ENOTSUP;
393                 }
394
395                 alginfo_a.key = (uint64_t)ses->auth_key.data;
396                 alginfo_a.keylen = ses->auth_key.length;
397                 alginfo_a.key_enc_flags = 0;
398                 alginfo_a.key_type = RTA_DATA_IMM;
399
400                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
401                                                    swap, &alginfo_a,
402                                                    !ses->dir,
403                                                    ses->digest_length);
404         } else if (is_aead(ses)) {
405                 caam_aead_alg(ses, &alginfo);
406                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
407                         PMD_TX_LOG(ERR, "not supported aead alg\n");
408                         return -ENOTSUP;
409                 }
410                 alginfo.key = (uint64_t)ses->aead_key.data;
411                 alginfo.keylen = ses->aead_key.length;
412                 alginfo.key_enc_flags = 0;
413                 alginfo.key_type = RTA_DATA_IMM;
414
415                 if (ses->dir == DIR_ENC)
416                         shared_desc_len = cnstr_shdsc_gcm_encap(
417                                         cdb->sh_desc, true, swap,
418                                         &alginfo,
419                                         ses->iv.length,
420                                         ses->digest_length);
421                 else
422                         shared_desc_len = cnstr_shdsc_gcm_decap(
423                                         cdb->sh_desc, true, swap,
424                                         &alginfo,
425                                         ses->iv.length,
426                                         ses->digest_length);
427         } else {
428                 caam_cipher_alg(ses, &alginfo_c);
429                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
430                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
431                         return -ENOTSUP;
432                 }
433
434                 alginfo_c.key = (uint64_t)ses->cipher_key.data;
435                 alginfo_c.keylen = ses->cipher_key.length;
436                 alginfo_c.key_enc_flags = 0;
437                 alginfo_c.key_type = RTA_DATA_IMM;
438
439                 caam_auth_alg(ses, &alginfo_a);
440                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
441                         PMD_TX_LOG(ERR, "not supported auth alg\n");
442                         return -ENOTSUP;
443                 }
444
445                 alginfo_a.key = (uint64_t)ses->auth_key.data;
446                 alginfo_a.keylen = ses->auth_key.length;
447                 alginfo_a.key_enc_flags = 0;
448                 alginfo_a.key_type = RTA_DATA_IMM;
449
450                 cdb->sh_desc[0] = alginfo_c.keylen;
451                 cdb->sh_desc[1] = alginfo_a.keylen;
452                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
453                                        MIN_JOB_DESC_SIZE,
454                                        (unsigned int *)cdb->sh_desc,
455                                        &cdb->sh_desc[2], 2);
456
457                 if (err < 0) {
458                         PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
459                         return err;
460                 }
461                 if (cdb->sh_desc[2] & 1)
462                         alginfo_c.key_type = RTA_DATA_IMM;
463                 else {
464                         alginfo_c.key = (uint64_t)dpaa_mem_vtop(
465                                                         (void *)alginfo_c.key);
466                         alginfo_c.key_type = RTA_DATA_PTR;
467                 }
468                 if (cdb->sh_desc[2] & (1<<1))
469                         alginfo_a.key_type = RTA_DATA_IMM;
470                 else {
471                         alginfo_a.key = (uint64_t)dpaa_mem_vtop(
472                                                         (void *)alginfo_a.key);
473                         alginfo_a.key_type = RTA_DATA_PTR;
474                 }
475                 cdb->sh_desc[0] = 0;
476                 cdb->sh_desc[1] = 0;
477                 cdb->sh_desc[2] = 0;
478
479                 /* Auth_only_len is set as 0 here and it will be overwritten
480                  *  in fd for each packet.
481                  */
482                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
483                                 true, swap, &alginfo_c, &alginfo_a,
484                                 ses->iv.length, 0,
485                                 ses->digest_length, ses->dir);
486         }
487         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
488         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
489         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
490
491         return 0;
492 }
493
494 static inline unsigned int
495 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
496 {
497         unsigned int pkts = 0;
498         int ret;
499         struct qm_mcr_queryfq_np np;
500         enum qman_fq_state state;
501         uint32_t flags;
502         uint32_t vdqcr;
503
504         qman_query_fq_np(fq, &np);
505         if (np.frm_cnt) {
506                 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
507                 if (exact)
508                         vdqcr |= QM_VDQCR_EXACT;
509                 ret = qman_volatile_dequeue(fq, 0, vdqcr);
510                 if (ret)
511                         return 0;
512                 do {
513                         pkts += qman_poll_dqrr(len);
514                         qman_fq_state(fq, &state, &flags);
515                 } while (flags & QMAN_FQ_STATE_VDQCR);
516         }
517         return pkts;
518 }
519
520 /* qp is lockless, should be accessed by only one thread */
521 static int
522 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
523 {
524         struct qman_fq *fq;
525
526         fq = &qp->outq;
527         dpaa_sec_op_nb = 0;
528         dpaa_sec_ops = ops;
529
530         if (unlikely(nb_ops > DPAA_SEC_BURST))
531                 nb_ops = DPAA_SEC_BURST;
532
533         return dpaa_volatile_deq(fq, nb_ops, 1);
534 }
535
536 /**
537  * packet looks like:
538  *              |<----data_len------->|
539  *    |ip_header|ah_header|icv|payload|
540  *              ^
541  *              |
542  *         mbuf->pkt.data
543  */
544 static inline struct dpaa_sec_job *
545 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
546 {
547         struct rte_crypto_sym_op *sym = op->sym;
548         struct rte_mbuf *mbuf = sym->m_src;
549         struct dpaa_sec_job *cf;
550         struct dpaa_sec_op_ctx *ctx;
551         struct qm_sg_entry *sg;
552         rte_iova_t start_addr;
553         uint8_t *old_digest;
554
555         ctx = dpaa_sec_alloc_ctx(ses);
556         if (!ctx)
557                 return NULL;
558
559         cf = &ctx->job;
560         ctx->op = op;
561         old_digest = ctx->digest;
562
563         start_addr = rte_pktmbuf_iova(mbuf);
564         /* output */
565         sg = &cf->sg[0];
566         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
567         sg->length = ses->digest_length;
568         cpu_to_hw_sg(sg);
569
570         /* input */
571         sg = &cf->sg[1];
572         if (is_decode(ses)) {
573                 /* need to extend the input to a compound frame */
574                 sg->extension = 1;
575                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
576                 sg->length = sym->auth.data.length + ses->digest_length;
577                 sg->final = 1;
578                 cpu_to_hw_sg(sg);
579
580                 sg = &cf->sg[2];
581                 /* hash result or digest, save digest first */
582                 rte_memcpy(old_digest, sym->auth.digest.data,
583                            ses->digest_length);
584                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
585                 sg->length = sym->auth.data.length;
586                 cpu_to_hw_sg(sg);
587
588                 /* let's check digest by hw */
589                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
590                 sg++;
591                 qm_sg_entry_set64(sg, start_addr);
592                 sg->length = ses->digest_length;
593                 sg->final = 1;
594                 cpu_to_hw_sg(sg);
595         } else {
596                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
597                 sg->length = sym->auth.data.length;
598                 sg->final = 1;
599                 cpu_to_hw_sg(sg);
600         }
601
602         return cf;
603 }
604
605 static inline struct dpaa_sec_job *
606 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
607 {
608         struct rte_crypto_sym_op *sym = op->sym;
609         struct dpaa_sec_job *cf;
610         struct dpaa_sec_op_ctx *ctx;
611         struct qm_sg_entry *sg;
612         rte_iova_t src_start_addr, dst_start_addr;
613         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
614                         ses->iv.offset);
615
616         ctx = dpaa_sec_alloc_ctx(ses);
617         if (!ctx)
618                 return NULL;
619
620         cf = &ctx->job;
621         ctx->op = op;
622
623         src_start_addr = rte_pktmbuf_iova(sym->m_src);
624
625         if (sym->m_dst)
626                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
627         else
628                 dst_start_addr = src_start_addr;
629
630         /* output */
631         sg = &cf->sg[0];
632         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
633         sg->length = sym->cipher.data.length + ses->iv.length;
634         cpu_to_hw_sg(sg);
635
636         /* input */
637         sg = &cf->sg[1];
638
639         /* need to extend the input to a compound frame */
640         sg->extension = 1;
641         sg->final = 1;
642         sg->length = sym->cipher.data.length + ses->iv.length;
643         qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
644         cpu_to_hw_sg(sg);
645
646         sg = &cf->sg[2];
647         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
648         sg->length = ses->iv.length;
649         cpu_to_hw_sg(sg);
650
651         sg++;
652         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
653         sg->length = sym->cipher.data.length;
654         sg->final = 1;
655         cpu_to_hw_sg(sg);
656
657         return cf;
658 }
659
660 static inline struct dpaa_sec_job *
661 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
662 {
663         struct rte_crypto_sym_op *sym = op->sym;
664         struct dpaa_sec_job *cf;
665         struct dpaa_sec_op_ctx *ctx;
666         struct qm_sg_entry *sg;
667         uint32_t length = 0;
668         rte_iova_t src_start_addr, dst_start_addr;
669         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
670                         ses->iv.offset);
671
672         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
673
674         if (sym->m_dst)
675                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
676         else
677                 dst_start_addr = src_start_addr;
678
679         ctx = dpaa_sec_alloc_ctx(ses);
680         if (!ctx)
681                 return NULL;
682
683         cf = &ctx->job;
684         ctx->op = op;
685
686         /* input */
687         rte_prefetch0(cf->sg);
688         sg = &cf->sg[2];
689         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
690         if (is_encode(ses)) {
691                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
692                 sg->length = ses->iv.length;
693                 length += sg->length;
694                 cpu_to_hw_sg(sg);
695
696                 sg++;
697                 if (ses->auth_only_len) {
698                         qm_sg_entry_set64(sg,
699                                           dpaa_mem_vtop(sym->aead.aad.data));
700                         sg->length = ses->auth_only_len;
701                         length += sg->length;
702                         cpu_to_hw_sg(sg);
703                         sg++;
704                 }
705                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
706                 sg->length = sym->aead.data.length;
707                 length += sg->length;
708                 sg->final = 1;
709                 cpu_to_hw_sg(sg);
710         } else {
711                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
712                 sg->length = ses->iv.length;
713                 length += sg->length;
714                 cpu_to_hw_sg(sg);
715
716                 sg++;
717                 if (ses->auth_only_len) {
718                         qm_sg_entry_set64(sg,
719                                           dpaa_mem_vtop(sym->aead.aad.data));
720                         sg->length = ses->auth_only_len;
721                         length += sg->length;
722                         cpu_to_hw_sg(sg);
723                         sg++;
724                 }
725                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
726                 sg->length = sym->aead.data.length;
727                 length += sg->length;
728                 cpu_to_hw_sg(sg);
729
730                 memcpy(ctx->digest, sym->aead.digest.data,
731                        ses->digest_length);
732                 sg++;
733
734                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
735                 sg->length = ses->digest_length;
736                 length += sg->length;
737                 sg->final = 1;
738                 cpu_to_hw_sg(sg);
739         }
740         /* input compound frame */
741         cf->sg[1].length = length;
742         cf->sg[1].extension = 1;
743         cf->sg[1].final = 1;
744         cpu_to_hw_sg(&cf->sg[1]);
745
746         /* output */
747         sg++;
748         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
749         qm_sg_entry_set64(sg,
750                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
751         sg->length = sym->aead.data.length + ses->auth_only_len;
752         length = sg->length;
753         if (is_encode(ses)) {
754                 cpu_to_hw_sg(sg);
755                 /* set auth output */
756                 sg++;
757                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
758                 sg->length = ses->digest_length;
759                 length += sg->length;
760         }
761         sg->final = 1;
762         cpu_to_hw_sg(sg);
763
764         /* output compound frame */
765         cf->sg[0].length = length;
766         cf->sg[0].extension = 1;
767         cpu_to_hw_sg(&cf->sg[0]);
768
769         return cf;
770 }
771
772 static inline struct dpaa_sec_job *
773 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
774 {
775         struct rte_crypto_sym_op *sym = op->sym;
776         struct dpaa_sec_job *cf;
777         struct dpaa_sec_op_ctx *ctx;
778         struct qm_sg_entry *sg;
779         rte_iova_t src_start_addr, dst_start_addr;
780         uint32_t length = 0;
781         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
782                         ses->iv.offset);
783
784         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
785         if (sym->m_dst)
786                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
787         else
788                 dst_start_addr = src_start_addr;
789
790         ctx = dpaa_sec_alloc_ctx(ses);
791         if (!ctx)
792                 return NULL;
793
794         cf = &ctx->job;
795         ctx->op = op;
796
797         /* input */
798         rte_prefetch0(cf->sg);
799         sg = &cf->sg[2];
800         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
801         if (is_encode(ses)) {
802                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
803                 sg->length = ses->iv.length;
804                 length += sg->length;
805                 cpu_to_hw_sg(sg);
806
807                 sg++;
808                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
809                 sg->length = sym->auth.data.length;
810                 length += sg->length;
811                 sg->final = 1;
812                 cpu_to_hw_sg(sg);
813         } else {
814                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
815                 sg->length = ses->iv.length;
816                 length += sg->length;
817                 cpu_to_hw_sg(sg);
818
819                 sg++;
820
821                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
822                 sg->length = sym->auth.data.length;
823                 length += sg->length;
824                 cpu_to_hw_sg(sg);
825
826                 memcpy(ctx->digest, sym->auth.digest.data,
827                        ses->digest_length);
828                 sg++;
829
830                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
831                 sg->length = ses->digest_length;
832                 length += sg->length;
833                 sg->final = 1;
834                 cpu_to_hw_sg(sg);
835         }
836         /* input compound frame */
837         cf->sg[1].length = length;
838         cf->sg[1].extension = 1;
839         cf->sg[1].final = 1;
840         cpu_to_hw_sg(&cf->sg[1]);
841
842         /* output */
843         sg++;
844         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
845         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
846         sg->length = sym->cipher.data.length;
847         length = sg->length;
848         if (is_encode(ses)) {
849                 cpu_to_hw_sg(sg);
850                 /* set auth output */
851                 sg++;
852                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
853                 sg->length = ses->digest_length;
854                 length += sg->length;
855         }
856         sg->final = 1;
857         cpu_to_hw_sg(sg);
858
859         /* output compound frame */
860         cf->sg[0].length = length;
861         cf->sg[0].extension = 1;
862         cpu_to_hw_sg(&cf->sg[0]);
863
864         return cf;
865 }
866
867 static int
868 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
869 {
870         struct dpaa_sec_job *cf;
871         dpaa_sec_session *ses;
872         struct qm_fd fd;
873         int ret;
874         uint32_t auth_only_len = op->sym->auth.data.length -
875                                 op->sym->cipher.data.length;
876
877         ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
878                                         cryptodev_driver_id);
879
880         if (unlikely(!qp->ses || qp->ses != ses)) {
881                 qp->ses = ses;
882                 ses->qp = qp;
883                 ret = dpaa_sec_prep_cdb(ses);
884                 if (ret)
885                         return ret;
886         }
887
888         /*
889          * Segmented buffer is not supported.
890          */
891         if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
892                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
893                 return -ENOTSUP;
894         }
895         if (is_auth_only(ses)) {
896                 cf = build_auth_only(op, ses);
897         } else if (is_cipher_only(ses)) {
898                 cf = build_cipher_only(op, ses);
899         } else if (is_aead(ses)) {
900                 cf = build_cipher_auth_gcm(op, ses);
901                 auth_only_len = ses->auth_only_len;
902         } else if (is_auth_cipher(ses)) {
903                 cf = build_cipher_auth(op, ses);
904         } else {
905                 PMD_TX_LOG(ERR, "not supported sec op");
906                 return -ENOTSUP;
907         }
908         if (unlikely(!cf))
909                 return -ENOMEM;
910
911         memset(&fd, 0, sizeof(struct qm_fd));
912         qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
913         fd._format1 = qm_fd_compound;
914         fd.length29 = 2 * sizeof(struct qm_sg_entry);
915         /* Auth_only_len is set as 0 in descriptor and it is overwritten
916          * here in the fd.cmd which will update the DPOVRD reg.
917          */
918         if (auth_only_len)
919                 fd.cmd = 0x80000000 | auth_only_len;
920         do {
921                 ret = qman_enqueue(&qp->inq, &fd, 0);
922         } while (ret != 0);
923
924         return 0;
925 }
926
927 static uint16_t
928 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
929                        uint16_t nb_ops)
930 {
931         /* Function to transmit the frames to given device and queuepair */
932         uint32_t loop;
933         int32_t ret;
934         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
935         uint16_t num_tx = 0;
936
937         if (unlikely(nb_ops == 0))
938                 return 0;
939
940         /*Prepare each packet which is to be sent*/
941         for (loop = 0; loop < nb_ops; loop++) {
942                 if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
943                         PMD_TX_LOG(ERR, "sessionless crypto op not supported");
944                         return 0;
945                 }
946                 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
947                 if (!ret)
948                         num_tx++;
949         }
950         dpaa_qp->tx_pkts += num_tx;
951         dpaa_qp->tx_errs += nb_ops - num_tx;
952
953         return num_tx;
954 }
955
956 static uint16_t
957 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
958                        uint16_t nb_ops)
959 {
960         uint16_t num_rx;
961         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
962
963         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
964
965         dpaa_qp->rx_pkts += num_rx;
966         dpaa_qp->rx_errs += nb_ops - num_rx;
967
968         PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
969
970         return num_rx;
971 }
972
973 /** Release queue pair */
974 static int
975 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
976                             uint16_t qp_id)
977 {
978         struct dpaa_sec_dev_private *internals;
979         struct dpaa_sec_qp *qp = NULL;
980
981         PMD_INIT_FUNC_TRACE();
982
983         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
984
985         internals = dev->data->dev_private;
986         if (qp_id >= internals->max_nb_queue_pairs) {
987                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
988                              internals->max_nb_queue_pairs);
989                 return -EINVAL;
990         }
991
992         qp = &internals->qps[qp_id];
993         qp->internals = NULL;
994         dev->data->queue_pairs[qp_id] = NULL;
995
996         return 0;
997 }
998
999 /** Setup a queue pair */
1000 static int
1001 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1002                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1003                 __rte_unused int socket_id,
1004                 __rte_unused struct rte_mempool *session_pool)
1005 {
1006         struct dpaa_sec_dev_private *internals;
1007         struct dpaa_sec_qp *qp = NULL;
1008
1009         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1010                      dev, qp_id, qp_conf);
1011
1012         internals = dev->data->dev_private;
1013         if (qp_id >= internals->max_nb_queue_pairs) {
1014                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1015                              internals->max_nb_queue_pairs);
1016                 return -EINVAL;
1017         }
1018
1019         qp = &internals->qps[qp_id];
1020         qp->internals = internals;
1021         dev->data->queue_pairs[qp_id] = qp;
1022
1023         return 0;
1024 }
1025
1026 /** Start queue pair */
1027 static int
1028 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1029                           __rte_unused uint16_t queue_pair_id)
1030 {
1031         PMD_INIT_FUNC_TRACE();
1032
1033         return 0;
1034 }
1035
1036 /** Stop queue pair */
1037 static int
1038 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1039                          __rte_unused uint16_t queue_pair_id)
1040 {
1041         PMD_INIT_FUNC_TRACE();
1042
1043         return 0;
1044 }
1045
1046 /** Return the number of allocated queue pairs */
1047 static uint32_t
1048 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1049 {
1050         PMD_INIT_FUNC_TRACE();
1051
1052         return dev->data->nb_queue_pairs;
1053 }
1054
1055 /** Returns the size of session structure */
1056 static unsigned int
1057 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1058 {
1059         PMD_INIT_FUNC_TRACE();
1060
1061         return sizeof(dpaa_sec_session);
1062 }
1063
1064 static int
1065 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1066                      struct rte_crypto_sym_xform *xform,
1067                      dpaa_sec_session *session)
1068 {
1069         session->cipher_alg = xform->cipher.algo;
1070         session->iv.length = xform->cipher.iv.length;
1071         session->iv.offset = xform->cipher.iv.offset;
1072         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1073                                                RTE_CACHE_LINE_SIZE);
1074         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1075                 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1076                 return -ENOMEM;
1077         }
1078         session->cipher_key.length = xform->cipher.key.length;
1079
1080         memcpy(session->cipher_key.data, xform->cipher.key.data,
1081                xform->cipher.key.length);
1082         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1083                         DIR_ENC : DIR_DEC;
1084
1085         return 0;
1086 }
1087
1088 static int
1089 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1090                    struct rte_crypto_sym_xform *xform,
1091                    dpaa_sec_session *session)
1092 {
1093         session->auth_alg = xform->auth.algo;
1094         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1095                                              RTE_CACHE_LINE_SIZE);
1096         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1097                 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1098                 return -ENOMEM;
1099         }
1100         session->auth_key.length = xform->auth.key.length;
1101         session->digest_length = xform->auth.digest_length;
1102
1103         memcpy(session->auth_key.data, xform->auth.key.data,
1104                xform->auth.key.length);
1105         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1106                         DIR_ENC : DIR_DEC;
1107
1108         return 0;
1109 }
1110
1111 static int
1112 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1113                    struct rte_crypto_sym_xform *xform,
1114                    dpaa_sec_session *session)
1115 {
1116         session->aead_alg = xform->aead.algo;
1117         session->iv.length = xform->aead.iv.length;
1118         session->iv.offset = xform->aead.iv.offset;
1119         session->auth_only_len = xform->aead.aad_length;
1120         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1121                                              RTE_CACHE_LINE_SIZE);
1122         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1123                 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1124                 return -ENOMEM;
1125         }
1126         session->aead_key.length = xform->aead.key.length;
1127         session->digest_length = xform->aead.digest_length;
1128
1129         memcpy(session->aead_key.data, xform->aead.key.data,
1130                xform->aead.key.length);
1131         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1132                         DIR_ENC : DIR_DEC;
1133
1134         return 0;
1135 }
1136
1137 static int
1138 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1139 {
1140         dpaa_sec_session *sess = ses;
1141         struct dpaa_sec_qp *qp;
1142
1143         PMD_INIT_FUNC_TRACE();
1144
1145         qp = dev->data->queue_pairs[qp_id];
1146         if (qp->ses != NULL) {
1147                 PMD_INIT_LOG(ERR, "qp in-use by another session\n");
1148                 return -EBUSY;
1149         }
1150
1151         qp->ses = sess;
1152         sess->qp = qp;
1153
1154         return dpaa_sec_prep_cdb(sess);
1155 }
1156
1157 static int
1158 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev, uint16_t qp_id, void *ses)
1159 {
1160         dpaa_sec_session *sess = ses;
1161         struct dpaa_sec_qp *qp;
1162
1163         PMD_INIT_FUNC_TRACE();
1164
1165         qp = dev->data->queue_pairs[qp_id];
1166         if (qp->ses != NULL) {
1167                 qp->ses = NULL;
1168                 sess->qp = NULL;
1169                 return 0;
1170         }
1171
1172         PMD_DRV_LOG(ERR, "No session attached to qp");
1173         return -EINVAL;
1174 }
1175
1176 static int
1177 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1178                             struct rte_crypto_sym_xform *xform, void *sess)
1179 {
1180         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1181         dpaa_sec_session *session = sess;
1182
1183         PMD_INIT_FUNC_TRACE();
1184
1185         if (unlikely(sess == NULL)) {
1186                 RTE_LOG(ERR, PMD, "invalid session struct\n");
1187                 return -EINVAL;
1188         }
1189
1190         /* Default IV length = 0 */
1191         session->iv.length = 0;
1192
1193         /* Cipher Only */
1194         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1195                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1196                 dpaa_sec_cipher_init(dev, xform, session);
1197
1198         /* Authentication Only */
1199         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1200                    xform->next == NULL) {
1201                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1202                 dpaa_sec_auth_init(dev, xform, session);
1203
1204         /* Cipher then Authenticate */
1205         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1206                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1207                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1208                         dpaa_sec_cipher_init(dev, xform, session);
1209                         dpaa_sec_auth_init(dev, xform->next, session);
1210                 } else {
1211                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1212                         return -EINVAL;
1213                 }
1214
1215         /* Authenticate then Cipher */
1216         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1217                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1218                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1219                         dpaa_sec_auth_init(dev, xform, session);
1220                         dpaa_sec_cipher_init(dev, xform->next, session);
1221                 } else {
1222                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1223                         return -EINVAL;
1224                 }
1225
1226         /* AEAD operation for AES-GCM kind of Algorithms */
1227         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1228                    xform->next == NULL) {
1229                 dpaa_sec_aead_init(dev, xform, session);
1230
1231         } else {
1232                 PMD_DRV_LOG(ERR, "Invalid crypto type");
1233                 return -EINVAL;
1234         }
1235         session->ctx_pool = internals->ctx_pool;
1236
1237         return 0;
1238 }
1239
1240 static int
1241 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1242                 struct rte_crypto_sym_xform *xform,
1243                 struct rte_cryptodev_sym_session *sess,
1244                 struct rte_mempool *mempool)
1245 {
1246         void *sess_private_data;
1247         int ret;
1248
1249         PMD_INIT_FUNC_TRACE();
1250
1251         if (rte_mempool_get(mempool, &sess_private_data)) {
1252                 CDEV_LOG_ERR(
1253                         "Couldn't get object from session mempool");
1254                 return -ENOMEM;
1255         }
1256
1257         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1258         if (ret != 0) {
1259                 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1260                                 "session parameters");
1261
1262                 /* Return session to mempool */
1263                 rte_mempool_put(mempool, sess_private_data);
1264                 return ret;
1265         }
1266
1267         set_session_private_data(sess, dev->driver_id,
1268                         sess_private_data);
1269
1270         return 0;
1271 }
1272
1273 /** Clear the memory of session so it doesn't leave key material behind */
1274 static void
1275 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1276                 struct rte_cryptodev_sym_session *sess)
1277 {
1278         PMD_INIT_FUNC_TRACE();
1279         uint8_t index = dev->driver_id;
1280         void *sess_priv = get_session_private_data(sess, index);
1281         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1282
1283         if (sess_priv) {
1284                 rte_free(s->cipher_key.data);
1285                 rte_free(s->auth_key.data);
1286                 memset(s, 0, sizeof(dpaa_sec_session));
1287                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1288                 set_session_private_data(sess, index, NULL);
1289                 rte_mempool_put(sess_mp, sess_priv);
1290         }
1291 }
1292
1293 static int
1294 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1295                        struct rte_cryptodev_config *config __rte_unused)
1296 {
1297         PMD_INIT_FUNC_TRACE();
1298
1299         return 0;
1300 }
1301
1302 static int
1303 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1304 {
1305         PMD_INIT_FUNC_TRACE();
1306         return 0;
1307 }
1308
1309 static void
1310 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1311 {
1312         PMD_INIT_FUNC_TRACE();
1313 }
1314
1315 static int
1316 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1317 {
1318         PMD_INIT_FUNC_TRACE();
1319         return 0;
1320 }
1321
1322 static void
1323 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1324                        struct rte_cryptodev_info *info)
1325 {
1326         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1327
1328         PMD_INIT_FUNC_TRACE();
1329         if (info != NULL) {
1330                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1331                 info->feature_flags = dev->feature_flags;
1332                 info->capabilities = dpaa_sec_capabilities;
1333                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1334                 info->sym.max_nb_sessions_per_qp =
1335                         RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS / RTE_MAX_NB_SEC_QPS;
1336                 info->driver_id = cryptodev_driver_id;
1337         }
1338 }
1339
1340 static struct rte_cryptodev_ops crypto_ops = {
1341         .dev_configure        = dpaa_sec_dev_configure,
1342         .dev_start            = dpaa_sec_dev_start,
1343         .dev_stop             = dpaa_sec_dev_stop,
1344         .dev_close            = dpaa_sec_dev_close,
1345         .dev_infos_get        = dpaa_sec_dev_infos_get,
1346         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
1347         .queue_pair_release   = dpaa_sec_queue_pair_release,
1348         .queue_pair_start     = dpaa_sec_queue_pair_start,
1349         .queue_pair_stop      = dpaa_sec_queue_pair_stop,
1350         .queue_pair_count     = dpaa_sec_queue_pair_count,
1351         .session_get_size     = dpaa_sec_session_get_size,
1352         .session_configure    = dpaa_sec_session_configure,
1353         .session_clear        = dpaa_sec_session_clear,
1354         .qp_attach_session    = dpaa_sec_qp_attach_sess,
1355         .qp_detach_session    = dpaa_sec_qp_detach_sess,
1356 };
1357
1358 static int
1359 dpaa_sec_uninit(struct rte_cryptodev *dev)
1360 {
1361         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1362
1363         if (dev == NULL)
1364                 return -ENODEV;
1365
1366         rte_mempool_free(internals->ctx_pool);
1367         rte_free(internals);
1368
1369         PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1370                      dev->data->name, rte_socket_id());
1371
1372         return 0;
1373 }
1374
1375 static int
1376 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1377 {
1378         struct dpaa_sec_dev_private *internals;
1379         struct dpaa_sec_qp *qp;
1380         uint32_t i;
1381         int ret;
1382         char str[20];
1383
1384         PMD_INIT_FUNC_TRACE();
1385
1386         cryptodev->driver_id = cryptodev_driver_id;
1387         cryptodev->dev_ops = &crypto_ops;
1388
1389         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1390         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1391         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1392                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
1393                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1394
1395         internals = cryptodev->data->dev_private;
1396         internals->max_nb_queue_pairs = RTE_MAX_NB_SEC_QPS;
1397         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1398
1399         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1400                 /* init qman fq for queue pair */
1401                 qp = &internals->qps[i];
1402                 ret = dpaa_sec_init_tx(&qp->outq);
1403                 if (ret) {
1404                         PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
1405                         goto init_error;
1406                 }
1407                 ret = dpaa_sec_init_rx(&qp->inq, dpaa_mem_vtop(&qp->cdb),
1408                                        qman_fq_fqid(&qp->outq));
1409                 if (ret) {
1410                         PMD_INIT_LOG(ERR, "config rx of queue pair %d", i);
1411                         goto init_error;
1412                 }
1413         }
1414
1415         sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1416         internals->ctx_pool = rte_mempool_create((const char *)str,
1417                         CTX_POOL_NUM_BUFS,
1418                         CTX_POOL_BUF_SIZE,
1419                         CTX_POOL_CACHE_SIZE, 0,
1420                         NULL, NULL, NULL, NULL,
1421                         SOCKET_ID_ANY, 0);
1422         if (!internals->ctx_pool) {
1423                 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1424                 goto init_error;
1425         }
1426
1427         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1428         return 0;
1429
1430 init_error:
1431         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1432
1433         dpaa_sec_uninit(cryptodev);
1434         return -EFAULT;
1435 }
1436
1437 static int
1438 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1439                                 struct rte_dpaa_device *dpaa_dev)
1440 {
1441         struct rte_cryptodev *cryptodev;
1442         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1443
1444         int retval;
1445
1446         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1447
1448         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1449         if (cryptodev == NULL)
1450                 return -ENOMEM;
1451
1452         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1453                 cryptodev->data->dev_private = rte_zmalloc_socket(
1454                                         "cryptodev private structure",
1455                                         sizeof(struct dpaa_sec_dev_private),
1456                                         RTE_CACHE_LINE_SIZE,
1457                                         rte_socket_id());
1458
1459                 if (cryptodev->data->dev_private == NULL)
1460                         rte_panic("Cannot allocate memzone for private "
1461                                         "device data");
1462         }
1463
1464         dpaa_dev->crypto_dev = cryptodev;
1465         cryptodev->device = &dpaa_dev->device;
1466         cryptodev->device->driver = &dpaa_drv->driver;
1467
1468         /* init user callbacks */
1469         TAILQ_INIT(&(cryptodev->link_intr_cbs));
1470
1471         /* if sec device version is not configured */
1472         if (!rta_get_sec_era()) {
1473                 const struct device_node *caam_node;
1474
1475                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1476                         const uint32_t *prop = of_get_property(caam_node,
1477                                         "fsl,sec-era",
1478                                         NULL);
1479                         if (prop) {
1480                                 rta_set_sec_era(
1481                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1482                                 break;
1483                         }
1484                 }
1485         }
1486
1487         /* Invoke PMD device initialization function */
1488         retval = dpaa_sec_dev_init(cryptodev);
1489         if (retval == 0)
1490                 return 0;
1491
1492         /* In case of error, cleanup is done */
1493         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1494                 rte_free(cryptodev->data->dev_private);
1495
1496         rte_cryptodev_pmd_release_device(cryptodev);
1497
1498         return -ENXIO;
1499 }
1500
1501 static int
1502 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1503 {
1504         struct rte_cryptodev *cryptodev;
1505         int ret;
1506
1507         cryptodev = dpaa_dev->crypto_dev;
1508         if (cryptodev == NULL)
1509                 return -ENODEV;
1510
1511         ret = dpaa_sec_uninit(cryptodev);
1512         if (ret)
1513                 return ret;
1514
1515         return rte_cryptodev_pmd_destroy(cryptodev);
1516 }
1517
1518 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1519         .drv_type = FSL_DPAA_CRYPTO,
1520         .driver = {
1521                 .name = "DPAA SEC PMD"
1522         },
1523         .probe = cryptodev_dpaa_sec_probe,
1524         .remove = cryptodev_dpaa_sec_remove,
1525 };
1526
1527 static struct cryptodev_driver dpaa_sec_crypto_drv;
1528
1529 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1530 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1531                 cryptodev_driver_id);