crypto/dpaa_sec: support multiple sessions per queue pair
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_cycles.h>
19 #include <rte_dev.h>
20 #include <rte_kvargs.h>
21 #include <rte_malloc.h>
22 #include <rte_mbuf.h>
23 #include <rte_memcpy.h>
24 #include <rte_string_fns.h>
25
26 #include <fsl_usd.h>
27 #include <fsl_qman.h>
28 #include <of.h>
29
30 /* RTA header files */
31 #include <hw/desc/common.h>
32 #include <hw/desc/algo.h>
33 #include <hw/desc/ipsec.h>
34
35 #include <rte_dpaa_bus.h>
36 #include <dpaa_sec.h>
37 #include <dpaa_sec_log.h>
38
39 enum rta_sec_era rta_sec_era;
40
41 static uint8_t cryptodev_driver_id;
42
43 static __thread struct rte_crypto_op **dpaa_sec_ops;
44 static __thread int dpaa_sec_op_nb;
45
46 static int
47 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
48
49 static inline void
50 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
51 {
52         if (!ctx->fd_status) {
53                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
54         } else {
55                 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
56                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
57         }
58
59         /* report op status to sym->op and then free the ctx memeory  */
60         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
61 }
62
63 static inline struct dpaa_sec_op_ctx *
64 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
65 {
66         struct dpaa_sec_op_ctx *ctx;
67         int retval;
68
69         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
70         if (!ctx || retval) {
71                 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
72                 return NULL;
73         }
74         /*
75          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
76          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
77          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
78          * each packet, memset is costlier than dcbz_64().
79          */
80         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
81         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
82         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
83         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
84
85         ctx->ctx_pool = ses->ctx_pool;
86         ctx->vtop_offset = (uint64_t) ctx
87                                 - rte_mempool_virt2iova(ctx);
88
89         return ctx;
90 }
91
92 static inline rte_iova_t
93 dpaa_mem_vtop(void *vaddr)
94 {
95         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
96         uint64_t vaddr_64, paddr;
97         int i;
98
99         vaddr_64 = (uint64_t)vaddr;
100         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
101                 if (vaddr_64 >= memseg[i].addr_64 &&
102                     vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
103                         paddr = memseg[i].iova +
104                                 (vaddr_64 - memseg[i].addr_64);
105
106                         return (rte_iova_t)paddr;
107                 }
108         }
109         return (rte_iova_t)(NULL);
110 }
111
112 /* virtual address conversin when mempool support is available for ctx */
113 static inline phys_addr_t
114 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
115 {
116         return (uint64_t)vaddr - ctx->vtop_offset;
117 }
118
119 static inline void *
120 dpaa_mem_ptov(rte_iova_t paddr)
121 {
122         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
123         int i;
124
125         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
126                 if (paddr >= memseg[i].iova &&
127                     (char *)paddr < (char *)memseg[i].iova + memseg[i].len)
128                         return (void *)(memseg[i].addr_64 +
129                                         (paddr - memseg[i].iova));
130         }
131         return NULL;
132 }
133
134 static void
135 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
136                    struct qman_fq *fq,
137                    const struct qm_mr_entry *msg)
138 {
139         RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
140                    fq->fqid, msg->ern.rc, msg->ern.seqnum);
141 }
142
143 /* initialize the queue with dest chan as caam chan so that
144  * all the packets in this queue could be dispatched into caam
145  */
146 static int
147 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
148                  uint32_t fqid_out)
149 {
150         struct qm_mcc_initfq fq_opts;
151         uint32_t flags;
152         int ret = -1;
153
154         /* Clear FQ options */
155         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
156
157         flags = QMAN_INITFQ_FLAG_SCHED;
158         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
159                           QM_INITFQ_WE_CONTEXTB;
160
161         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
162         fq_opts.fqd.context_b = fqid_out;
163         fq_opts.fqd.dest.channel = qm_channel_caam;
164         fq_opts.fqd.dest.wq = 0;
165
166         fq_in->cb.ern  = ern_sec_fq_handler;
167
168         PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
169
170         ret = qman_init_fq(fq_in, flags, &fq_opts);
171         if (unlikely(ret != 0))
172                 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
173
174         return ret;
175 }
176
177 /* something is put into in_fq and caam put the crypto result into out_fq */
178 static enum qman_cb_dqrr_result
179 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
180                   struct qman_fq *fq __always_unused,
181                   const struct qm_dqrr_entry *dqrr)
182 {
183         const struct qm_fd *fd;
184         struct dpaa_sec_job *job;
185         struct dpaa_sec_op_ctx *ctx;
186
187         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
188                 return qman_cb_dqrr_defer;
189
190         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
191                 return qman_cb_dqrr_consume;
192
193         fd = &dqrr->fd;
194         /* sg is embedded in an op ctx,
195          * sg[0] is for output
196          * sg[1] for input
197          */
198         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
199         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
200         ctx->fd_status = fd->status;
201         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
202         dpaa_sec_op_ending(ctx);
203
204         return qman_cb_dqrr_consume;
205 }
206
207 /* caam result is put into this queue */
208 static int
209 dpaa_sec_init_tx(struct qman_fq *fq)
210 {
211         int ret;
212         struct qm_mcc_initfq opts;
213         uint32_t flags;
214
215         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
216                 QMAN_FQ_FLAG_DYNAMIC_FQID;
217
218         ret = qman_create_fq(0, flags, fq);
219         if (unlikely(ret)) {
220                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
221                 return ret;
222         }
223
224         memset(&opts, 0, sizeof(opts));
225         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
226                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
227
228         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
229
230         fq->cb.dqrr = dqrr_out_fq_cb_rx;
231         fq->cb.ern  = ern_sec_fq_handler;
232
233         ret = qman_init_fq(fq, 0, &opts);
234         if (unlikely(ret)) {
235                 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
236                 return ret;
237         }
238
239         return ret;
240 }
241
242 static inline int is_cipher_only(dpaa_sec_session *ses)
243 {
244         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
245                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
246 }
247
248 static inline int is_auth_only(dpaa_sec_session *ses)
249 {
250         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
251                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
252 }
253
254 static inline int is_aead(dpaa_sec_session *ses)
255 {
256         return ((ses->cipher_alg == 0) &&
257                 (ses->auth_alg == 0) &&
258                 (ses->aead_alg != 0));
259 }
260
261 static inline int is_auth_cipher(dpaa_sec_session *ses)
262 {
263         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
264                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
265 }
266
267 static inline int is_encode(dpaa_sec_session *ses)
268 {
269         return ses->dir == DIR_ENC;
270 }
271
272 static inline int is_decode(dpaa_sec_session *ses)
273 {
274         return ses->dir == DIR_DEC;
275 }
276
277 static inline void
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
279 {
280         switch (ses->auth_alg) {
281         case RTE_CRYPTO_AUTH_NULL:
282                 ses->digest_length = 0;
283                 break;
284         case RTE_CRYPTO_AUTH_MD5_HMAC:
285                 alginfo_a->algtype = OP_ALG_ALGSEL_MD5;
286                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
287                 break;
288         case RTE_CRYPTO_AUTH_SHA1_HMAC:
289                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA1;
290                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
291                 break;
292         case RTE_CRYPTO_AUTH_SHA224_HMAC:
293                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA224;
294                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
295                 break;
296         case RTE_CRYPTO_AUTH_SHA256_HMAC:
297                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA256;
298                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
299                 break;
300         case RTE_CRYPTO_AUTH_SHA384_HMAC:
301                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA384;
302                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
303                 break;
304         case RTE_CRYPTO_AUTH_SHA512_HMAC:
305                 alginfo_a->algtype = OP_ALG_ALGSEL_SHA512;
306                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307                 break;
308         default:
309                 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
310         }
311 }
312
313 static inline void
314 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
315 {
316         switch (ses->cipher_alg) {
317         case RTE_CRYPTO_CIPHER_NULL:
318                 break;
319         case RTE_CRYPTO_CIPHER_AES_CBC:
320                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
321                 alginfo_c->algmode = OP_ALG_AAI_CBC;
322                 break;
323         case RTE_CRYPTO_CIPHER_3DES_CBC:
324                 alginfo_c->algtype = OP_ALG_ALGSEL_3DES;
325                 alginfo_c->algmode = OP_ALG_AAI_CBC;
326                 break;
327         case RTE_CRYPTO_CIPHER_AES_CTR:
328                 alginfo_c->algtype = OP_ALG_ALGSEL_AES;
329                 alginfo_c->algmode = OP_ALG_AAI_CTR;
330                 break;
331         default:
332                 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
333         }
334 }
335
336 static inline void
337 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
338 {
339         switch (ses->aead_alg) {
340         case RTE_CRYPTO_AEAD_AES_GCM:
341                 alginfo->algtype = OP_ALG_ALGSEL_AES;
342                 alginfo->algmode = OP_ALG_AAI_GCM;
343                 break;
344         default:
345                 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
346         }
347 }
348
349
350 /* prepare command block of the session */
351 static int
352 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
353 {
354         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
355         uint32_t shared_desc_len = 0;
356         struct sec_cdb *cdb = &ses->cdb;
357         int err;
358 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
359         int swap = false;
360 #else
361         int swap = true;
362 #endif
363
364         memset(cdb, 0, sizeof(struct sec_cdb));
365
366         if (is_cipher_only(ses)) {
367                 caam_cipher_alg(ses, &alginfo_c);
368                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
369                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
370                         return -ENOTSUP;
371                 }
372
373                 alginfo_c.key = (uint64_t)ses->cipher_key.data;
374                 alginfo_c.keylen = ses->cipher_key.length;
375                 alginfo_c.key_enc_flags = 0;
376                 alginfo_c.key_type = RTA_DATA_IMM;
377
378                 shared_desc_len = cnstr_shdsc_blkcipher(
379                                                 cdb->sh_desc, true,
380                                                 swap, &alginfo_c,
381                                                 NULL,
382                                                 ses->iv.length,
383                                                 ses->dir);
384         } else if (is_auth_only(ses)) {
385                 caam_auth_alg(ses, &alginfo_a);
386                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387                         PMD_TX_LOG(ERR, "not supported auth alg\n");
388                         return -ENOTSUP;
389                 }
390
391                 alginfo_a.key = (uint64_t)ses->auth_key.data;
392                 alginfo_a.keylen = ses->auth_key.length;
393                 alginfo_a.key_enc_flags = 0;
394                 alginfo_a.key_type = RTA_DATA_IMM;
395
396                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
397                                                    swap, &alginfo_a,
398                                                    !ses->dir,
399                                                    ses->digest_length);
400         } else if (is_aead(ses)) {
401                 caam_aead_alg(ses, &alginfo);
402                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
403                         PMD_TX_LOG(ERR, "not supported aead alg\n");
404                         return -ENOTSUP;
405                 }
406                 alginfo.key = (uint64_t)ses->aead_key.data;
407                 alginfo.keylen = ses->aead_key.length;
408                 alginfo.key_enc_flags = 0;
409                 alginfo.key_type = RTA_DATA_IMM;
410
411                 if (ses->dir == DIR_ENC)
412                         shared_desc_len = cnstr_shdsc_gcm_encap(
413                                         cdb->sh_desc, true, swap,
414                                         &alginfo,
415                                         ses->iv.length,
416                                         ses->digest_length);
417                 else
418                         shared_desc_len = cnstr_shdsc_gcm_decap(
419                                         cdb->sh_desc, true, swap,
420                                         &alginfo,
421                                         ses->iv.length,
422                                         ses->digest_length);
423         } else {
424                 caam_cipher_alg(ses, &alginfo_c);
425                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
426                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
427                         return -ENOTSUP;
428                 }
429
430                 alginfo_c.key = (uint64_t)ses->cipher_key.data;
431                 alginfo_c.keylen = ses->cipher_key.length;
432                 alginfo_c.key_enc_flags = 0;
433                 alginfo_c.key_type = RTA_DATA_IMM;
434
435                 caam_auth_alg(ses, &alginfo_a);
436                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
437                         PMD_TX_LOG(ERR, "not supported auth alg\n");
438                         return -ENOTSUP;
439                 }
440
441                 alginfo_a.key = (uint64_t)ses->auth_key.data;
442                 alginfo_a.keylen = ses->auth_key.length;
443                 alginfo_a.key_enc_flags = 0;
444                 alginfo_a.key_type = RTA_DATA_IMM;
445
446                 cdb->sh_desc[0] = alginfo_c.keylen;
447                 cdb->sh_desc[1] = alginfo_a.keylen;
448                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
449                                        MIN_JOB_DESC_SIZE,
450                                        (unsigned int *)cdb->sh_desc,
451                                        &cdb->sh_desc[2], 2);
452
453                 if (err < 0) {
454                         PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
455                         return err;
456                 }
457                 if (cdb->sh_desc[2] & 1)
458                         alginfo_c.key_type = RTA_DATA_IMM;
459                 else {
460                         alginfo_c.key = (uint64_t)dpaa_mem_vtop(
461                                                         (void *)alginfo_c.key);
462                         alginfo_c.key_type = RTA_DATA_PTR;
463                 }
464                 if (cdb->sh_desc[2] & (1<<1))
465                         alginfo_a.key_type = RTA_DATA_IMM;
466                 else {
467                         alginfo_a.key = (uint64_t)dpaa_mem_vtop(
468                                                         (void *)alginfo_a.key);
469                         alginfo_a.key_type = RTA_DATA_PTR;
470                 }
471                 cdb->sh_desc[0] = 0;
472                 cdb->sh_desc[1] = 0;
473                 cdb->sh_desc[2] = 0;
474
475                 /* Auth_only_len is set as 0 here and it will be overwritten
476                  *  in fd for each packet.
477                  */
478                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
479                                 true, swap, &alginfo_c, &alginfo_a,
480                                 ses->iv.length, 0,
481                                 ses->digest_length, ses->dir);
482         }
483         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
484         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
485         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
486
487         return 0;
488 }
489
490 static inline unsigned int
491 dpaa_volatile_deq(struct qman_fq *fq, unsigned int len, bool exact)
492 {
493         unsigned int pkts = 0;
494         int ret;
495         struct qm_mcr_queryfq_np np;
496         enum qman_fq_state state;
497         uint32_t flags;
498         uint32_t vdqcr;
499
500         qman_query_fq_np(fq, &np);
501         if (np.frm_cnt) {
502                 vdqcr = QM_VDQCR_NUMFRAMES_SET(len);
503                 if (exact)
504                         vdqcr |= QM_VDQCR_EXACT;
505                 ret = qman_volatile_dequeue(fq, 0, vdqcr);
506                 if (ret)
507                         return 0;
508                 do {
509                         pkts += qman_poll_dqrr(len);
510                         qman_fq_state(fq, &state, &flags);
511                 } while (flags & QMAN_FQ_STATE_VDQCR);
512         }
513         return pkts;
514 }
515
516 /* qp is lockless, should be accessed by only one thread */
517 static int
518 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
519 {
520         struct qman_fq *fq;
521
522         fq = &qp->outq;
523         dpaa_sec_op_nb = 0;
524         dpaa_sec_ops = ops;
525
526         if (unlikely(nb_ops > DPAA_SEC_BURST))
527                 nb_ops = DPAA_SEC_BURST;
528
529         return dpaa_volatile_deq(fq, nb_ops, 1);
530 }
531
532 /**
533  * packet looks like:
534  *              |<----data_len------->|
535  *    |ip_header|ah_header|icv|payload|
536  *              ^
537  *              |
538  *         mbuf->pkt.data
539  */
540 static inline struct dpaa_sec_job *
541 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
542 {
543         struct rte_crypto_sym_op *sym = op->sym;
544         struct rte_mbuf *mbuf = sym->m_src;
545         struct dpaa_sec_job *cf;
546         struct dpaa_sec_op_ctx *ctx;
547         struct qm_sg_entry *sg;
548         rte_iova_t start_addr;
549         uint8_t *old_digest;
550
551         ctx = dpaa_sec_alloc_ctx(ses);
552         if (!ctx)
553                 return NULL;
554
555         cf = &ctx->job;
556         ctx->op = op;
557         old_digest = ctx->digest;
558
559         start_addr = rte_pktmbuf_iova(mbuf);
560         /* output */
561         sg = &cf->sg[0];
562         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
563         sg->length = ses->digest_length;
564         cpu_to_hw_sg(sg);
565
566         /* input */
567         sg = &cf->sg[1];
568         if (is_decode(ses)) {
569                 /* need to extend the input to a compound frame */
570                 sg->extension = 1;
571                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
572                 sg->length = sym->auth.data.length + ses->digest_length;
573                 sg->final = 1;
574                 cpu_to_hw_sg(sg);
575
576                 sg = &cf->sg[2];
577                 /* hash result or digest, save digest first */
578                 rte_memcpy(old_digest, sym->auth.digest.data,
579                            ses->digest_length);
580                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
581                 sg->length = sym->auth.data.length;
582                 cpu_to_hw_sg(sg);
583
584                 /* let's check digest by hw */
585                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
586                 sg++;
587                 qm_sg_entry_set64(sg, start_addr);
588                 sg->length = ses->digest_length;
589                 sg->final = 1;
590                 cpu_to_hw_sg(sg);
591         } else {
592                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
593                 sg->length = sym->auth.data.length;
594                 sg->final = 1;
595                 cpu_to_hw_sg(sg);
596         }
597
598         return cf;
599 }
600
601 static inline struct dpaa_sec_job *
602 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
603 {
604         struct rte_crypto_sym_op *sym = op->sym;
605         struct dpaa_sec_job *cf;
606         struct dpaa_sec_op_ctx *ctx;
607         struct qm_sg_entry *sg;
608         rte_iova_t src_start_addr, dst_start_addr;
609         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
610                         ses->iv.offset);
611
612         ctx = dpaa_sec_alloc_ctx(ses);
613         if (!ctx)
614                 return NULL;
615
616         cf = &ctx->job;
617         ctx->op = op;
618
619         src_start_addr = rte_pktmbuf_iova(sym->m_src);
620
621         if (sym->m_dst)
622                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
623         else
624                 dst_start_addr = src_start_addr;
625
626         /* output */
627         sg = &cf->sg[0];
628         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
629         sg->length = sym->cipher.data.length + ses->iv.length;
630         cpu_to_hw_sg(sg);
631
632         /* input */
633         sg = &cf->sg[1];
634
635         /* need to extend the input to a compound frame */
636         sg->extension = 1;
637         sg->final = 1;
638         sg->length = sym->cipher.data.length + ses->iv.length;
639         qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
640         cpu_to_hw_sg(sg);
641
642         sg = &cf->sg[2];
643         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
644         sg->length = ses->iv.length;
645         cpu_to_hw_sg(sg);
646
647         sg++;
648         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
649         sg->length = sym->cipher.data.length;
650         sg->final = 1;
651         cpu_to_hw_sg(sg);
652
653         return cf;
654 }
655
656 static inline struct dpaa_sec_job *
657 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
658 {
659         struct rte_crypto_sym_op *sym = op->sym;
660         struct dpaa_sec_job *cf;
661         struct dpaa_sec_op_ctx *ctx;
662         struct qm_sg_entry *sg;
663         uint32_t length = 0;
664         rte_iova_t src_start_addr, dst_start_addr;
665         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
666                         ses->iv.offset);
667
668         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
669
670         if (sym->m_dst)
671                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
672         else
673                 dst_start_addr = src_start_addr;
674
675         ctx = dpaa_sec_alloc_ctx(ses);
676         if (!ctx)
677                 return NULL;
678
679         cf = &ctx->job;
680         ctx->op = op;
681
682         /* input */
683         rte_prefetch0(cf->sg);
684         sg = &cf->sg[2];
685         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
686         if (is_encode(ses)) {
687                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
688                 sg->length = ses->iv.length;
689                 length += sg->length;
690                 cpu_to_hw_sg(sg);
691
692                 sg++;
693                 if (ses->auth_only_len) {
694                         qm_sg_entry_set64(sg,
695                                           dpaa_mem_vtop(sym->aead.aad.data));
696                         sg->length = ses->auth_only_len;
697                         length += sg->length;
698                         cpu_to_hw_sg(sg);
699                         sg++;
700                 }
701                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
702                 sg->length = sym->aead.data.length;
703                 length += sg->length;
704                 sg->final = 1;
705                 cpu_to_hw_sg(sg);
706         } else {
707                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
708                 sg->length = ses->iv.length;
709                 length += sg->length;
710                 cpu_to_hw_sg(sg);
711
712                 sg++;
713                 if (ses->auth_only_len) {
714                         qm_sg_entry_set64(sg,
715                                           dpaa_mem_vtop(sym->aead.aad.data));
716                         sg->length = ses->auth_only_len;
717                         length += sg->length;
718                         cpu_to_hw_sg(sg);
719                         sg++;
720                 }
721                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
722                 sg->length = sym->aead.data.length;
723                 length += sg->length;
724                 cpu_to_hw_sg(sg);
725
726                 memcpy(ctx->digest, sym->aead.digest.data,
727                        ses->digest_length);
728                 sg++;
729
730                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
731                 sg->length = ses->digest_length;
732                 length += sg->length;
733                 sg->final = 1;
734                 cpu_to_hw_sg(sg);
735         }
736         /* input compound frame */
737         cf->sg[1].length = length;
738         cf->sg[1].extension = 1;
739         cf->sg[1].final = 1;
740         cpu_to_hw_sg(&cf->sg[1]);
741
742         /* output */
743         sg++;
744         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
745         qm_sg_entry_set64(sg,
746                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
747         sg->length = sym->aead.data.length + ses->auth_only_len;
748         length = sg->length;
749         if (is_encode(ses)) {
750                 cpu_to_hw_sg(sg);
751                 /* set auth output */
752                 sg++;
753                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
754                 sg->length = ses->digest_length;
755                 length += sg->length;
756         }
757         sg->final = 1;
758         cpu_to_hw_sg(sg);
759
760         /* output compound frame */
761         cf->sg[0].length = length;
762         cf->sg[0].extension = 1;
763         cpu_to_hw_sg(&cf->sg[0]);
764
765         return cf;
766 }
767
768 static inline struct dpaa_sec_job *
769 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
770 {
771         struct rte_crypto_sym_op *sym = op->sym;
772         struct dpaa_sec_job *cf;
773         struct dpaa_sec_op_ctx *ctx;
774         struct qm_sg_entry *sg;
775         rte_iova_t src_start_addr, dst_start_addr;
776         uint32_t length = 0;
777         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
778                         ses->iv.offset);
779
780         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
781         if (sym->m_dst)
782                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
783         else
784                 dst_start_addr = src_start_addr;
785
786         ctx = dpaa_sec_alloc_ctx(ses);
787         if (!ctx)
788                 return NULL;
789
790         cf = &ctx->job;
791         ctx->op = op;
792
793         /* input */
794         rte_prefetch0(cf->sg);
795         sg = &cf->sg[2];
796         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
797         if (is_encode(ses)) {
798                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
799                 sg->length = ses->iv.length;
800                 length += sg->length;
801                 cpu_to_hw_sg(sg);
802
803                 sg++;
804                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
805                 sg->length = sym->auth.data.length;
806                 length += sg->length;
807                 sg->final = 1;
808                 cpu_to_hw_sg(sg);
809         } else {
810                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
811                 sg->length = ses->iv.length;
812                 length += sg->length;
813                 cpu_to_hw_sg(sg);
814
815                 sg++;
816
817                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
818                 sg->length = sym->auth.data.length;
819                 length += sg->length;
820                 cpu_to_hw_sg(sg);
821
822                 memcpy(ctx->digest, sym->auth.digest.data,
823                        ses->digest_length);
824                 sg++;
825
826                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
827                 sg->length = ses->digest_length;
828                 length += sg->length;
829                 sg->final = 1;
830                 cpu_to_hw_sg(sg);
831         }
832         /* input compound frame */
833         cf->sg[1].length = length;
834         cf->sg[1].extension = 1;
835         cf->sg[1].final = 1;
836         cpu_to_hw_sg(&cf->sg[1]);
837
838         /* output */
839         sg++;
840         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
841         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
842         sg->length = sym->cipher.data.length;
843         length = sg->length;
844         if (is_encode(ses)) {
845                 cpu_to_hw_sg(sg);
846                 /* set auth output */
847                 sg++;
848                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
849                 sg->length = ses->digest_length;
850                 length += sg->length;
851         }
852         sg->final = 1;
853         cpu_to_hw_sg(sg);
854
855         /* output compound frame */
856         cf->sg[0].length = length;
857         cf->sg[0].extension = 1;
858         cpu_to_hw_sg(&cf->sg[0]);
859
860         return cf;
861 }
862
863 static int
864 dpaa_sec_enqueue_op(struct rte_crypto_op *op,  struct dpaa_sec_qp *qp)
865 {
866         struct dpaa_sec_job *cf;
867         dpaa_sec_session *ses;
868         struct qm_fd fd;
869         int ret;
870         uint32_t auth_only_len = op->sym->auth.data.length -
871                                 op->sym->cipher.data.length;
872
873         ses = (dpaa_sec_session *)get_session_private_data(op->sym->session,
874                                         cryptodev_driver_id);
875
876         if (unlikely(!ses->qp || ses->qp != qp)) {
877                 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p", ses->qp, qp);
878                 if (dpaa_sec_attach_sess_q(qp, ses))
879                         return -1;
880         }
881
882         /*
883          * Segmented buffer is not supported.
884          */
885         if (!rte_pktmbuf_is_contiguous(op->sym->m_src)) {
886                 op->status = RTE_CRYPTO_OP_STATUS_ERROR;
887                 return -ENOTSUP;
888         }
889         if (is_auth_only(ses)) {
890                 cf = build_auth_only(op, ses);
891         } else if (is_cipher_only(ses)) {
892                 cf = build_cipher_only(op, ses);
893         } else if (is_aead(ses)) {
894                 cf = build_cipher_auth_gcm(op, ses);
895                 auth_only_len = ses->auth_only_len;
896         } else if (is_auth_cipher(ses)) {
897                 cf = build_cipher_auth(op, ses);
898         } else {
899                 PMD_TX_LOG(ERR, "not supported sec op");
900                 return -ENOTSUP;
901         }
902         if (unlikely(!cf))
903                 return -ENOMEM;
904
905         memset(&fd, 0, sizeof(struct qm_fd));
906         qm_fd_addr_set64(&fd, dpaa_mem_vtop(cf->sg));
907         fd._format1 = qm_fd_compound;
908         fd.length29 = 2 * sizeof(struct qm_sg_entry);
909         /* Auth_only_len is set as 0 in descriptor and it is overwritten
910          * here in the fd.cmd which will update the DPOVRD reg.
911          */
912         if (auth_only_len)
913                 fd.cmd = 0x80000000 | auth_only_len;
914         do {
915                 ret = qman_enqueue(ses->inq, &fd, 0);
916         } while (ret != 0);
917
918         return 0;
919 }
920
921 static uint16_t
922 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
923                        uint16_t nb_ops)
924 {
925         /* Function to transmit the frames to given device and queuepair */
926         uint32_t loop;
927         int32_t ret;
928         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
929         uint16_t num_tx = 0;
930
931         if (unlikely(nb_ops == 0))
932                 return 0;
933
934         /*Prepare each packet which is to be sent*/
935         for (loop = 0; loop < nb_ops; loop++) {
936                 if (ops[loop]->sess_type != RTE_CRYPTO_OP_WITH_SESSION) {
937                         PMD_TX_LOG(ERR, "sessionless crypto op not supported");
938                         return 0;
939                 }
940                 ret = dpaa_sec_enqueue_op(ops[loop], dpaa_qp);
941                 if (!ret)
942                         num_tx++;
943         }
944         dpaa_qp->tx_pkts += num_tx;
945         dpaa_qp->tx_errs += nb_ops - num_tx;
946
947         return num_tx;
948 }
949
950 static uint16_t
951 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
952                        uint16_t nb_ops)
953 {
954         uint16_t num_rx;
955         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
956
957         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
958
959         dpaa_qp->rx_pkts += num_rx;
960         dpaa_qp->rx_errs += nb_ops - num_rx;
961
962         PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
963
964         return num_rx;
965 }
966
967 /** Release queue pair */
968 static int
969 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
970                             uint16_t qp_id)
971 {
972         struct dpaa_sec_dev_private *internals;
973         struct dpaa_sec_qp *qp = NULL;
974
975         PMD_INIT_FUNC_TRACE();
976
977         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
978
979         internals = dev->data->dev_private;
980         if (qp_id >= internals->max_nb_queue_pairs) {
981                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
982                              internals->max_nb_queue_pairs);
983                 return -EINVAL;
984         }
985
986         qp = &internals->qps[qp_id];
987         qp->internals = NULL;
988         dev->data->queue_pairs[qp_id] = NULL;
989
990         return 0;
991 }
992
993 /** Setup a queue pair */
994 static int
995 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
996                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
997                 __rte_unused int socket_id,
998                 __rte_unused struct rte_mempool *session_pool)
999 {
1000         struct dpaa_sec_dev_private *internals;
1001         struct dpaa_sec_qp *qp = NULL;
1002
1003         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1004                      dev, qp_id, qp_conf);
1005
1006         internals = dev->data->dev_private;
1007         if (qp_id >= internals->max_nb_queue_pairs) {
1008                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1009                              internals->max_nb_queue_pairs);
1010                 return -EINVAL;
1011         }
1012
1013         qp = &internals->qps[qp_id];
1014         qp->internals = internals;
1015         dev->data->queue_pairs[qp_id] = qp;
1016
1017         return 0;
1018 }
1019
1020 /** Start queue pair */
1021 static int
1022 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1023                           __rte_unused uint16_t queue_pair_id)
1024 {
1025         PMD_INIT_FUNC_TRACE();
1026
1027         return 0;
1028 }
1029
1030 /** Stop queue pair */
1031 static int
1032 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1033                          __rte_unused uint16_t queue_pair_id)
1034 {
1035         PMD_INIT_FUNC_TRACE();
1036
1037         return 0;
1038 }
1039
1040 /** Return the number of allocated queue pairs */
1041 static uint32_t
1042 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1043 {
1044         PMD_INIT_FUNC_TRACE();
1045
1046         return dev->data->nb_queue_pairs;
1047 }
1048
1049 /** Returns the size of session structure */
1050 static unsigned int
1051 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1052 {
1053         PMD_INIT_FUNC_TRACE();
1054
1055         return sizeof(dpaa_sec_session);
1056 }
1057
1058 static int
1059 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1060                      struct rte_crypto_sym_xform *xform,
1061                      dpaa_sec_session *session)
1062 {
1063         session->cipher_alg = xform->cipher.algo;
1064         session->iv.length = xform->cipher.iv.length;
1065         session->iv.offset = xform->cipher.iv.offset;
1066         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1067                                                RTE_CACHE_LINE_SIZE);
1068         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1069                 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1070                 return -ENOMEM;
1071         }
1072         session->cipher_key.length = xform->cipher.key.length;
1073
1074         memcpy(session->cipher_key.data, xform->cipher.key.data,
1075                xform->cipher.key.length);
1076         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1077                         DIR_ENC : DIR_DEC;
1078
1079         return 0;
1080 }
1081
1082 static int
1083 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1084                    struct rte_crypto_sym_xform *xform,
1085                    dpaa_sec_session *session)
1086 {
1087         session->auth_alg = xform->auth.algo;
1088         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1089                                              RTE_CACHE_LINE_SIZE);
1090         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1091                 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1092                 return -ENOMEM;
1093         }
1094         session->auth_key.length = xform->auth.key.length;
1095         session->digest_length = xform->auth.digest_length;
1096
1097         memcpy(session->auth_key.data, xform->auth.key.data,
1098                xform->auth.key.length);
1099         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1100                         DIR_ENC : DIR_DEC;
1101
1102         return 0;
1103 }
1104
1105 static int
1106 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1107                    struct rte_crypto_sym_xform *xform,
1108                    dpaa_sec_session *session)
1109 {
1110         session->aead_alg = xform->aead.algo;
1111         session->iv.length = xform->aead.iv.length;
1112         session->iv.offset = xform->aead.iv.offset;
1113         session->auth_only_len = xform->aead.aad_length;
1114         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1115                                              RTE_CACHE_LINE_SIZE);
1116         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1117                 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1118                 return -ENOMEM;
1119         }
1120         session->aead_key.length = xform->aead.key.length;
1121         session->digest_length = xform->aead.digest_length;
1122
1123         memcpy(session->aead_key.data, xform->aead.key.data,
1124                xform->aead.key.length);
1125         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1126                         DIR_ENC : DIR_DEC;
1127
1128         return 0;
1129 }
1130
1131 static struct qman_fq *
1132 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1133 {
1134         unsigned int i;
1135
1136         for (i = 0; i < qi->max_nb_sessions; i++) {
1137                 if (qi->inq_attach[i] == 0) {
1138                         qi->inq_attach[i] = 1;
1139                         return &qi->inq[i];
1140                 }
1141         }
1142         PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1143
1144         return NULL;
1145 }
1146
1147 static int
1148 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1149 {
1150         unsigned int i;
1151
1152         for (i = 0; i < qi->max_nb_sessions; i++) {
1153                 if (&qi->inq[i] == fq) {
1154                         qi->inq_attach[i] = 0;
1155                         return 0;
1156                 }
1157         }
1158         return -1;
1159 }
1160
1161 static int
1162 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1163 {
1164         int ret;
1165
1166         sess->qp = qp;
1167         ret = dpaa_sec_prep_cdb(sess);
1168         if (ret) {
1169                 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1170                 return -1;
1171         }
1172
1173         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1174                                qman_fq_fqid(&qp->outq));
1175         if (ret)
1176                 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1177
1178         return ret;
1179 }
1180
1181 static int
1182 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1183                         uint16_t qp_id __rte_unused,
1184                         void *ses __rte_unused)
1185 {
1186         PMD_INIT_FUNC_TRACE();
1187         return 0;
1188 }
1189
1190 static int
1191 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1192                         uint16_t qp_id  __rte_unused,
1193                         void *ses)
1194 {
1195         dpaa_sec_session *sess = ses;
1196         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1197
1198         PMD_INIT_FUNC_TRACE();
1199
1200         if (sess->inq)
1201                 dpaa_sec_detach_rxq(qi, sess->inq);
1202         sess->inq = NULL;
1203
1204         sess->qp = NULL;
1205
1206         return 0;
1207 }
1208
1209 static int
1210 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1211                             struct rte_crypto_sym_xform *xform, void *sess)
1212 {
1213         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1214         dpaa_sec_session *session = sess;
1215
1216         PMD_INIT_FUNC_TRACE();
1217
1218         if (unlikely(sess == NULL)) {
1219                 RTE_LOG(ERR, PMD, "invalid session struct\n");
1220                 return -EINVAL;
1221         }
1222
1223         /* Default IV length = 0 */
1224         session->iv.length = 0;
1225
1226         /* Cipher Only */
1227         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1228                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1229                 dpaa_sec_cipher_init(dev, xform, session);
1230
1231         /* Authentication Only */
1232         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1233                    xform->next == NULL) {
1234                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1235                 dpaa_sec_auth_init(dev, xform, session);
1236
1237         /* Cipher then Authenticate */
1238         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1239                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1240                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1241                         dpaa_sec_cipher_init(dev, xform, session);
1242                         dpaa_sec_auth_init(dev, xform->next, session);
1243                 } else {
1244                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1245                         return -EINVAL;
1246                 }
1247
1248         /* Authenticate then Cipher */
1249         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1250                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1251                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1252                         dpaa_sec_auth_init(dev, xform, session);
1253                         dpaa_sec_cipher_init(dev, xform->next, session);
1254                 } else {
1255                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1256                         return -EINVAL;
1257                 }
1258
1259         /* AEAD operation for AES-GCM kind of Algorithms */
1260         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1261                    xform->next == NULL) {
1262                 dpaa_sec_aead_init(dev, xform, session);
1263
1264         } else {
1265                 PMD_DRV_LOG(ERR, "Invalid crypto type");
1266                 return -EINVAL;
1267         }
1268         session->ctx_pool = internals->ctx_pool;
1269         session->inq = dpaa_sec_attach_rxq(internals);
1270         if (session->inq == NULL) {
1271                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1272                 goto err1;
1273         }
1274
1275         return 0;
1276
1277 err1:
1278         rte_free(session->cipher_key.data);
1279         rte_free(session->auth_key.data);
1280         memset(session, 0, sizeof(dpaa_sec_session));
1281
1282         return -EINVAL;
1283 }
1284
1285 static int
1286 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1287                 struct rte_crypto_sym_xform *xform,
1288                 struct rte_cryptodev_sym_session *sess,
1289                 struct rte_mempool *mempool)
1290 {
1291         void *sess_private_data;
1292         int ret;
1293
1294         PMD_INIT_FUNC_TRACE();
1295
1296         if (rte_mempool_get(mempool, &sess_private_data)) {
1297                 CDEV_LOG_ERR(
1298                         "Couldn't get object from session mempool");
1299                 return -ENOMEM;
1300         }
1301
1302         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1303         if (ret != 0) {
1304                 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1305                                 "session parameters");
1306
1307                 /* Return session to mempool */
1308                 rte_mempool_put(mempool, sess_private_data);
1309                 return ret;
1310         }
1311
1312         set_session_private_data(sess, dev->driver_id,
1313                         sess_private_data);
1314
1315
1316         return 0;
1317 }
1318
1319 /** Clear the memory of session so it doesn't leave key material behind */
1320 static void
1321 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1322                 struct rte_cryptodev_sym_session *sess)
1323 {
1324         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1325         uint8_t index = dev->driver_id;
1326         void *sess_priv = get_session_private_data(sess, index);
1327
1328         PMD_INIT_FUNC_TRACE();
1329
1330         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1331
1332         if (sess_priv) {
1333                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1334
1335                 if (s->inq)
1336                         dpaa_sec_detach_rxq(qi, s->inq);
1337                 rte_free(s->cipher_key.data);
1338                 rte_free(s->auth_key.data);
1339                 memset(s, 0, sizeof(dpaa_sec_session));
1340                 set_session_private_data(sess, index, NULL);
1341                 rte_mempool_put(sess_mp, sess_priv);
1342         }
1343 }
1344
1345 static int
1346 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
1347                        struct rte_cryptodev_config *config __rte_unused)
1348 {
1349         PMD_INIT_FUNC_TRACE();
1350
1351         return 0;
1352 }
1353
1354 static int
1355 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
1356 {
1357         PMD_INIT_FUNC_TRACE();
1358         return 0;
1359 }
1360
1361 static void
1362 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
1363 {
1364         PMD_INIT_FUNC_TRACE();
1365 }
1366
1367 static int
1368 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
1369 {
1370         PMD_INIT_FUNC_TRACE();
1371         return 0;
1372 }
1373
1374 static void
1375 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
1376                        struct rte_cryptodev_info *info)
1377 {
1378         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1379
1380         PMD_INIT_FUNC_TRACE();
1381         if (info != NULL) {
1382                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
1383                 info->feature_flags = dev->feature_flags;
1384                 info->capabilities = dpaa_sec_capabilities;
1385                 info->sym.max_nb_sessions = internals->max_nb_sessions;
1386                 info->sym.max_nb_sessions_per_qp =
1387                         RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
1388                         RTE_DPAA_MAX_NB_SEC_QPS;
1389                 info->driver_id = cryptodev_driver_id;
1390         }
1391 }
1392
1393 static struct rte_cryptodev_ops crypto_ops = {
1394         .dev_configure        = dpaa_sec_dev_configure,
1395         .dev_start            = dpaa_sec_dev_start,
1396         .dev_stop             = dpaa_sec_dev_stop,
1397         .dev_close            = dpaa_sec_dev_close,
1398         .dev_infos_get        = dpaa_sec_dev_infos_get,
1399         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
1400         .queue_pair_release   = dpaa_sec_queue_pair_release,
1401         .queue_pair_start     = dpaa_sec_queue_pair_start,
1402         .queue_pair_stop      = dpaa_sec_queue_pair_stop,
1403         .queue_pair_count     = dpaa_sec_queue_pair_count,
1404         .session_get_size     = dpaa_sec_session_get_size,
1405         .session_configure    = dpaa_sec_session_configure,
1406         .session_clear        = dpaa_sec_session_clear,
1407         .qp_attach_session    = dpaa_sec_qp_attach_sess,
1408         .qp_detach_session    = dpaa_sec_qp_detach_sess,
1409 };
1410
1411 static int
1412 dpaa_sec_uninit(struct rte_cryptodev *dev)
1413 {
1414         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1415
1416         if (dev == NULL)
1417                 return -ENODEV;
1418
1419         rte_mempool_free(internals->ctx_pool);
1420         rte_free(internals);
1421
1422         PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
1423                      dev->data->name, rte_socket_id());
1424
1425         return 0;
1426 }
1427
1428 static int
1429 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
1430 {
1431         struct dpaa_sec_dev_private *internals;
1432         struct dpaa_sec_qp *qp;
1433         uint32_t i, flags;
1434         int ret;
1435         char str[20];
1436
1437         PMD_INIT_FUNC_TRACE();
1438
1439         cryptodev->driver_id = cryptodev_driver_id;
1440         cryptodev->dev_ops = &crypto_ops;
1441
1442         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
1443         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
1444         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
1445                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
1446                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING;
1447
1448         internals = cryptodev->data->dev_private;
1449         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
1450         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
1451
1452         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
1453                 /* init qman fq for queue pair */
1454                 qp = &internals->qps[i];
1455                 ret = dpaa_sec_init_tx(&qp->outq);
1456                 if (ret) {
1457                         PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
1458                         goto init_error;
1459                 }
1460         }
1461
1462         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
1463                 QMAN_FQ_FLAG_TO_DCPORTAL;
1464         for (i = 0; i < internals->max_nb_sessions; i++) {
1465                 /* create rx qman fq for sessions*/
1466                 ret = qman_create_fq(0, flags, &internals->inq[i]);
1467                 if (unlikely(ret != 0)) {
1468                         PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
1469                         goto init_error;
1470                 }
1471         }
1472
1473         sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
1474         internals->ctx_pool = rte_mempool_create((const char *)str,
1475                         CTX_POOL_NUM_BUFS,
1476                         CTX_POOL_BUF_SIZE,
1477                         CTX_POOL_CACHE_SIZE, 0,
1478                         NULL, NULL, NULL, NULL,
1479                         SOCKET_ID_ANY, 0);
1480         if (!internals->ctx_pool) {
1481                 RTE_LOG(ERR, PMD, "%s create failed\n", str);
1482                 goto init_error;
1483         }
1484
1485         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
1486         return 0;
1487
1488 init_error:
1489         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
1490
1491         dpaa_sec_uninit(cryptodev);
1492         return -EFAULT;
1493 }
1494
1495 static int
1496 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
1497                                 struct rte_dpaa_device *dpaa_dev)
1498 {
1499         struct rte_cryptodev *cryptodev;
1500         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
1501
1502         int retval;
1503
1504         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
1505
1506         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
1507         if (cryptodev == NULL)
1508                 return -ENOMEM;
1509
1510         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1511                 cryptodev->data->dev_private = rte_zmalloc_socket(
1512                                         "cryptodev private structure",
1513                                         sizeof(struct dpaa_sec_dev_private),
1514                                         RTE_CACHE_LINE_SIZE,
1515                                         rte_socket_id());
1516
1517                 if (cryptodev->data->dev_private == NULL)
1518                         rte_panic("Cannot allocate memzone for private "
1519                                         "device data");
1520         }
1521
1522         dpaa_dev->crypto_dev = cryptodev;
1523         cryptodev->device = &dpaa_dev->device;
1524         cryptodev->device->driver = &dpaa_drv->driver;
1525
1526         /* init user callbacks */
1527         TAILQ_INIT(&(cryptodev->link_intr_cbs));
1528
1529         /* if sec device version is not configured */
1530         if (!rta_get_sec_era()) {
1531                 const struct device_node *caam_node;
1532
1533                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
1534                         const uint32_t *prop = of_get_property(caam_node,
1535                                         "fsl,sec-era",
1536                                         NULL);
1537                         if (prop) {
1538                                 rta_set_sec_era(
1539                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
1540                                 break;
1541                         }
1542                 }
1543         }
1544
1545         /* Invoke PMD device initialization function */
1546         retval = dpaa_sec_dev_init(cryptodev);
1547         if (retval == 0)
1548                 return 0;
1549
1550         /* In case of error, cleanup is done */
1551         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1552                 rte_free(cryptodev->data->dev_private);
1553
1554         rte_cryptodev_pmd_release_device(cryptodev);
1555
1556         return -ENXIO;
1557 }
1558
1559 static int
1560 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
1561 {
1562         struct rte_cryptodev *cryptodev;
1563         int ret;
1564
1565         cryptodev = dpaa_dev->crypto_dev;
1566         if (cryptodev == NULL)
1567                 return -ENODEV;
1568
1569         ret = dpaa_sec_uninit(cryptodev);
1570         if (ret)
1571                 return ret;
1572
1573         return rte_cryptodev_pmd_destroy(cryptodev);
1574 }
1575
1576 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
1577         .drv_type = FSL_DPAA_CRYPTO,
1578         .driver = {
1579                 .name = "DPAA SEC PMD"
1580         },
1581         .probe = cryptodev_dpaa_sec_probe,
1582         .remove = cryptodev_dpaa_sec_remove,
1583 };
1584
1585 static struct cryptodev_driver dpaa_sec_crypto_drv;
1586
1587 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
1588 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
1589                 cryptodev_driver_id);