21abd82bc3469b4541a27a11320d6e56d04b1db7
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39
40 enum rta_sec_era rta_sec_era;
41
42 static uint8_t cryptodev_driver_id;
43
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
46
47 static int
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
49
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53         if (!ctx->fd_status) {
54                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55         } else {
56                 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58         }
59
60         /* report op status to sym->op and then free the ctx memeory  */
61         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
62 }
63
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 {
67         struct dpaa_sec_op_ctx *ctx;
68         int retval;
69
70         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71         if (!ctx || retval) {
72                 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
73                 return NULL;
74         }
75         /*
76          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79          * each packet, memset is costlier than dcbz_64().
80          */
81         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85
86         ctx->ctx_pool = ses->ctx_pool;
87         ctx->vtop_offset = (size_t) ctx
88                                 - rte_mempool_virt2iova(ctx);
89
90         return ctx;
91 }
92
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
95 {
96         const struct rte_memseg *ms;
97
98         ms = rte_mem_virt2memseg(vaddr, NULL);
99         if (ms)
100                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
101         return (size_t)NULL;
102 }
103
104 /* virtual address conversin when mempool support is available for ctx */
105 static inline phys_addr_t
106 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
107 {
108         return (size_t)vaddr - ctx->vtop_offset;
109 }
110
111 static inline void *
112 dpaa_mem_ptov(rte_iova_t paddr)
113 {
114         return rte_mem_iova2virt(paddr);
115 }
116
117 static void
118 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
119                    struct qman_fq *fq,
120                    const struct qm_mr_entry *msg)
121 {
122         RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
123                    fq->fqid, msg->ern.rc, msg->ern.seqnum);
124 }
125
126 /* initialize the queue with dest chan as caam chan so that
127  * all the packets in this queue could be dispatched into caam
128  */
129 static int
130 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
131                  uint32_t fqid_out)
132 {
133         struct qm_mcc_initfq fq_opts;
134         uint32_t flags;
135         int ret = -1;
136
137         /* Clear FQ options */
138         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
139
140         flags = QMAN_INITFQ_FLAG_SCHED;
141         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
142                           QM_INITFQ_WE_CONTEXTB;
143
144         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
145         fq_opts.fqd.context_b = fqid_out;
146         fq_opts.fqd.dest.channel = qm_channel_caam;
147         fq_opts.fqd.dest.wq = 0;
148
149         fq_in->cb.ern  = ern_sec_fq_handler;
150
151         PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
152
153         ret = qman_init_fq(fq_in, flags, &fq_opts);
154         if (unlikely(ret != 0))
155                 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
156
157         return ret;
158 }
159
160 /* something is put into in_fq and caam put the crypto result into out_fq */
161 static enum qman_cb_dqrr_result
162 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
163                   struct qman_fq *fq __always_unused,
164                   const struct qm_dqrr_entry *dqrr)
165 {
166         const struct qm_fd *fd;
167         struct dpaa_sec_job *job;
168         struct dpaa_sec_op_ctx *ctx;
169
170         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
171                 return qman_cb_dqrr_defer;
172
173         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
174                 return qman_cb_dqrr_consume;
175
176         fd = &dqrr->fd;
177         /* sg is embedded in an op ctx,
178          * sg[0] is for output
179          * sg[1] for input
180          */
181         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
182
183         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
184         ctx->fd_status = fd->status;
185         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
186                 struct qm_sg_entry *sg_out;
187                 uint32_t len;
188
189                 sg_out = &job->sg[0];
190                 hw_sg_to_cpu(sg_out);
191                 len = sg_out->length;
192                 ctx->op->sym->m_src->pkt_len = len;
193                 ctx->op->sym->m_src->data_len = len;
194         }
195         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
196         dpaa_sec_op_ending(ctx);
197
198         return qman_cb_dqrr_consume;
199 }
200
201 /* caam result is put into this queue */
202 static int
203 dpaa_sec_init_tx(struct qman_fq *fq)
204 {
205         int ret;
206         struct qm_mcc_initfq opts;
207         uint32_t flags;
208
209         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
210                 QMAN_FQ_FLAG_DYNAMIC_FQID;
211
212         ret = qman_create_fq(0, flags, fq);
213         if (unlikely(ret)) {
214                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
215                 return ret;
216         }
217
218         memset(&opts, 0, sizeof(opts));
219         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
220                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
221
222         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
223
224         fq->cb.dqrr = dqrr_out_fq_cb_rx;
225         fq->cb.ern  = ern_sec_fq_handler;
226
227         ret = qman_init_fq(fq, 0, &opts);
228         if (unlikely(ret)) {
229                 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
230                 return ret;
231         }
232
233         return ret;
234 }
235
236 static inline int is_cipher_only(dpaa_sec_session *ses)
237 {
238         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
239                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
240 }
241
242 static inline int is_auth_only(dpaa_sec_session *ses)
243 {
244         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
245                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
246 }
247
248 static inline int is_aead(dpaa_sec_session *ses)
249 {
250         return ((ses->cipher_alg == 0) &&
251                 (ses->auth_alg == 0) &&
252                 (ses->aead_alg != 0));
253 }
254
255 static inline int is_auth_cipher(dpaa_sec_session *ses)
256 {
257         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
258                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
259                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
260 }
261
262 static inline int is_proto_ipsec(dpaa_sec_session *ses)
263 {
264         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
265 }
266
267 static inline int is_encode(dpaa_sec_session *ses)
268 {
269         return ses->dir == DIR_ENC;
270 }
271
272 static inline int is_decode(dpaa_sec_session *ses)
273 {
274         return ses->dir == DIR_DEC;
275 }
276
277 static inline void
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
279 {
280         switch (ses->auth_alg) {
281         case RTE_CRYPTO_AUTH_NULL:
282                 ses->digest_length = 0;
283                 break;
284         case RTE_CRYPTO_AUTH_MD5_HMAC:
285                 alginfo_a->algtype =
286                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
287                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
288                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
289                 break;
290         case RTE_CRYPTO_AUTH_SHA1_HMAC:
291                 alginfo_a->algtype =
292                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
293                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
294                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
295                 break;
296         case RTE_CRYPTO_AUTH_SHA224_HMAC:
297                 alginfo_a->algtype =
298                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
300                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
301                 break;
302         case RTE_CRYPTO_AUTH_SHA256_HMAC:
303                 alginfo_a->algtype =
304                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
306                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307                 break;
308         case RTE_CRYPTO_AUTH_SHA384_HMAC:
309                 alginfo_a->algtype =
310                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
312                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313                 break;
314         case RTE_CRYPTO_AUTH_SHA512_HMAC:
315                 alginfo_a->algtype =
316                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
318                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
319                 break;
320         default:
321                 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
322         }
323 }
324
325 static inline void
326 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
327 {
328         switch (ses->cipher_alg) {
329         case RTE_CRYPTO_CIPHER_NULL:
330                 break;
331         case RTE_CRYPTO_CIPHER_AES_CBC:
332                 alginfo_c->algtype =
333                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
334                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
335                 alginfo_c->algmode = OP_ALG_AAI_CBC;
336                 break;
337         case RTE_CRYPTO_CIPHER_3DES_CBC:
338                 alginfo_c->algtype =
339                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
340                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
341                 alginfo_c->algmode = OP_ALG_AAI_CBC;
342                 break;
343         case RTE_CRYPTO_CIPHER_AES_CTR:
344                 alginfo_c->algtype =
345                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
347                 alginfo_c->algmode = OP_ALG_AAI_CTR;
348                 break;
349         default:
350                 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
351         }
352 }
353
354 static inline void
355 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
356 {
357         switch (ses->aead_alg) {
358         case RTE_CRYPTO_AEAD_AES_GCM:
359                 alginfo->algtype = OP_ALG_ALGSEL_AES;
360                 alginfo->algmode = OP_ALG_AAI_GCM;
361                 break;
362         default:
363                 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
364         }
365 }
366
367
368 /* prepare command block of the session */
369 static int
370 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
371 {
372         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
373         uint32_t shared_desc_len = 0;
374         struct sec_cdb *cdb = &ses->cdb;
375         int err;
376 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
377         int swap = false;
378 #else
379         int swap = true;
380 #endif
381
382         memset(cdb, 0, sizeof(struct sec_cdb));
383
384         if (is_cipher_only(ses)) {
385                 caam_cipher_alg(ses, &alginfo_c);
386                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
388                         return -ENOTSUP;
389                 }
390
391                 alginfo_c.key = (size_t)ses->cipher_key.data;
392                 alginfo_c.keylen = ses->cipher_key.length;
393                 alginfo_c.key_enc_flags = 0;
394                 alginfo_c.key_type = RTA_DATA_IMM;
395
396                 shared_desc_len = cnstr_shdsc_blkcipher(
397                                                 cdb->sh_desc, true,
398                                                 swap, &alginfo_c,
399                                                 NULL,
400                                                 ses->iv.length,
401                                                 ses->dir);
402         } else if (is_auth_only(ses)) {
403                 caam_auth_alg(ses, &alginfo_a);
404                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405                         PMD_TX_LOG(ERR, "not supported auth alg\n");
406                         return -ENOTSUP;
407                 }
408
409                 alginfo_a.key = (size_t)ses->auth_key.data;
410                 alginfo_a.keylen = ses->auth_key.length;
411                 alginfo_a.key_enc_flags = 0;
412                 alginfo_a.key_type = RTA_DATA_IMM;
413
414                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
415                                                    swap, &alginfo_a,
416                                                    !ses->dir,
417                                                    ses->digest_length);
418         } else if (is_aead(ses)) {
419                 caam_aead_alg(ses, &alginfo);
420                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
421                         PMD_TX_LOG(ERR, "not supported aead alg\n");
422                         return -ENOTSUP;
423                 }
424                 alginfo.key = (size_t)ses->aead_key.data;
425                 alginfo.keylen = ses->aead_key.length;
426                 alginfo.key_enc_flags = 0;
427                 alginfo.key_type = RTA_DATA_IMM;
428
429                 if (ses->dir == DIR_ENC)
430                         shared_desc_len = cnstr_shdsc_gcm_encap(
431                                         cdb->sh_desc, true, swap,
432                                         &alginfo,
433                                         ses->iv.length,
434                                         ses->digest_length);
435                 else
436                         shared_desc_len = cnstr_shdsc_gcm_decap(
437                                         cdb->sh_desc, true, swap,
438                                         &alginfo,
439                                         ses->iv.length,
440                                         ses->digest_length);
441         } else {
442                 caam_cipher_alg(ses, &alginfo_c);
443                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
444                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
445                         return -ENOTSUP;
446                 }
447
448                 alginfo_c.key = (size_t)ses->cipher_key.data;
449                 alginfo_c.keylen = ses->cipher_key.length;
450                 alginfo_c.key_enc_flags = 0;
451                 alginfo_c.key_type = RTA_DATA_IMM;
452
453                 caam_auth_alg(ses, &alginfo_a);
454                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
455                         PMD_TX_LOG(ERR, "not supported auth alg\n");
456                         return -ENOTSUP;
457                 }
458
459                 alginfo_a.key = (size_t)ses->auth_key.data;
460                 alginfo_a.keylen = ses->auth_key.length;
461                 alginfo_a.key_enc_flags = 0;
462                 alginfo_a.key_type = RTA_DATA_IMM;
463
464                 cdb->sh_desc[0] = alginfo_c.keylen;
465                 cdb->sh_desc[1] = alginfo_a.keylen;
466                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
467                                        MIN_JOB_DESC_SIZE,
468                                        (unsigned int *)cdb->sh_desc,
469                                        &cdb->sh_desc[2], 2);
470
471                 if (err < 0) {
472                         PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
473                         return err;
474                 }
475                 if (cdb->sh_desc[2] & 1)
476                         alginfo_c.key_type = RTA_DATA_IMM;
477                 else {
478                         alginfo_c.key = (size_t)dpaa_mem_vtop(
479                                                 (void *)(size_t)alginfo_c.key);
480                         alginfo_c.key_type = RTA_DATA_PTR;
481                 }
482                 if (cdb->sh_desc[2] & (1<<1))
483                         alginfo_a.key_type = RTA_DATA_IMM;
484                 else {
485                         alginfo_a.key = (size_t)dpaa_mem_vtop(
486                                                 (void *)(size_t)alginfo_a.key);
487                         alginfo_a.key_type = RTA_DATA_PTR;
488                 }
489                 cdb->sh_desc[0] = 0;
490                 cdb->sh_desc[1] = 0;
491                 cdb->sh_desc[2] = 0;
492                 if (is_proto_ipsec(ses)) {
493                         if (ses->dir == DIR_ENC) {
494                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
495                                                 cdb->sh_desc,
496                                                 true, swap, &ses->encap_pdb,
497                                                 (uint8_t *)&ses->ip4_hdr,
498                                                 &alginfo_c, &alginfo_a);
499                         } else if (ses->dir == DIR_DEC) {
500                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
501                                                 cdb->sh_desc,
502                                                 true, swap, &ses->decap_pdb,
503                                                 &alginfo_c, &alginfo_a);
504                         }
505                 } else {
506                         /* Auth_only_len is set as 0 here and it will be
507                          * overwritten in fd for each packet.
508                          */
509                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
510                                         true, swap, &alginfo_c, &alginfo_a,
511                                         ses->iv.length, 0,
512                                         ses->digest_length, ses->dir);
513                 }
514         }
515         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
516         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
517         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
518
519         return 0;
520 }
521
522 /* qp is lockless, should be accessed by only one thread */
523 static int
524 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
525 {
526         struct qman_fq *fq;
527         unsigned int pkts = 0;
528         int ret;
529         struct qm_dqrr_entry *dq;
530
531         fq = &qp->outq;
532         ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
533                                 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
534         if (ret)
535                 return 0;
536
537         do {
538                 const struct qm_fd *fd;
539                 struct dpaa_sec_job *job;
540                 struct dpaa_sec_op_ctx *ctx;
541                 struct rte_crypto_op *op;
542
543                 dq = qman_dequeue(fq);
544                 if (!dq)
545                         continue;
546
547                 fd = &dq->fd;
548                 /* sg is embedded in an op ctx,
549                  * sg[0] is for output
550                  * sg[1] for input
551                  */
552                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
553
554                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
555                 ctx->fd_status = fd->status;
556                 op = ctx->op;
557                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
558                         struct qm_sg_entry *sg_out;
559                         uint32_t len;
560
561                         sg_out = &job->sg[0];
562                         hw_sg_to_cpu(sg_out);
563                         len = sg_out->length;
564                         op->sym->m_src->pkt_len = len;
565                         op->sym->m_src->data_len = len;
566                 }
567                 if (!ctx->fd_status) {
568                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
569                 } else {
570                         printf("\nSEC return err: 0x%x", ctx->fd_status);
571                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
572                 }
573                 ops[pkts++] = op;
574
575                 /* report op status to sym->op and then free the ctx memeory */
576                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
577
578                 qman_dqrr_consume(fq, dq);
579         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
580
581         return pkts;
582 }
583
584 static inline struct dpaa_sec_job *
585 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
586 {
587         struct rte_crypto_sym_op *sym = op->sym;
588         struct rte_mbuf *mbuf = sym->m_src;
589         struct dpaa_sec_job *cf;
590         struct dpaa_sec_op_ctx *ctx;
591         struct qm_sg_entry *sg, *out_sg, *in_sg;
592         phys_addr_t start_addr;
593         uint8_t *old_digest, extra_segs;
594
595         if (is_decode(ses))
596                 extra_segs = 3;
597         else
598                 extra_segs = 2;
599
600         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
601                 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
602                                                                 MAX_SG_ENTRIES);
603                 return NULL;
604         }
605         ctx = dpaa_sec_alloc_ctx(ses);
606         if (!ctx)
607                 return NULL;
608
609         cf = &ctx->job;
610         ctx->op = op;
611         old_digest = ctx->digest;
612
613         /* output */
614         out_sg = &cf->sg[0];
615         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
616         out_sg->length = ses->digest_length;
617         cpu_to_hw_sg(out_sg);
618
619         /* input */
620         in_sg = &cf->sg[1];
621         /* need to extend the input to a compound frame */
622         in_sg->extension = 1;
623         in_sg->final = 1;
624         in_sg->length = sym->auth.data.length;
625         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
626
627         /* 1st seg */
628         sg = in_sg + 1;
629         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
630         sg->length = mbuf->data_len - sym->auth.data.offset;
631         sg->offset = sym->auth.data.offset;
632
633         /* Successive segs */
634         mbuf = mbuf->next;
635         while (mbuf) {
636                 cpu_to_hw_sg(sg);
637                 sg++;
638                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
639                 sg->length = mbuf->data_len;
640                 mbuf = mbuf->next;
641         }
642
643         if (is_decode(ses)) {
644                 /* Digest verification case */
645                 cpu_to_hw_sg(sg);
646                 sg++;
647                 rte_memcpy(old_digest, sym->auth.digest.data,
648                                 ses->digest_length);
649                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
650                 qm_sg_entry_set64(sg, start_addr);
651                 sg->length = ses->digest_length;
652                 in_sg->length += ses->digest_length;
653         } else {
654                 /* Digest calculation case */
655                 sg->length -= ses->digest_length;
656         }
657         sg->final = 1;
658         cpu_to_hw_sg(sg);
659         cpu_to_hw_sg(in_sg);
660
661         return cf;
662 }
663
664 /**
665  * packet looks like:
666  *              |<----data_len------->|
667  *    |ip_header|ah_header|icv|payload|
668  *              ^
669  *              |
670  *         mbuf->pkt.data
671  */
672 static inline struct dpaa_sec_job *
673 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
674 {
675         struct rte_crypto_sym_op *sym = op->sym;
676         struct rte_mbuf *mbuf = sym->m_src;
677         struct dpaa_sec_job *cf;
678         struct dpaa_sec_op_ctx *ctx;
679         struct qm_sg_entry *sg;
680         rte_iova_t start_addr;
681         uint8_t *old_digest;
682
683         ctx = dpaa_sec_alloc_ctx(ses);
684         if (!ctx)
685                 return NULL;
686
687         cf = &ctx->job;
688         ctx->op = op;
689         old_digest = ctx->digest;
690
691         start_addr = rte_pktmbuf_iova(mbuf);
692         /* output */
693         sg = &cf->sg[0];
694         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
695         sg->length = ses->digest_length;
696         cpu_to_hw_sg(sg);
697
698         /* input */
699         sg = &cf->sg[1];
700         if (is_decode(ses)) {
701                 /* need to extend the input to a compound frame */
702                 sg->extension = 1;
703                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
704                 sg->length = sym->auth.data.length + ses->digest_length;
705                 sg->final = 1;
706                 cpu_to_hw_sg(sg);
707
708                 sg = &cf->sg[2];
709                 /* hash result or digest, save digest first */
710                 rte_memcpy(old_digest, sym->auth.digest.data,
711                            ses->digest_length);
712                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
713                 sg->length = sym->auth.data.length;
714                 cpu_to_hw_sg(sg);
715
716                 /* let's check digest by hw */
717                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
718                 sg++;
719                 qm_sg_entry_set64(sg, start_addr);
720                 sg->length = ses->digest_length;
721                 sg->final = 1;
722                 cpu_to_hw_sg(sg);
723         } else {
724                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
725                 sg->length = sym->auth.data.length;
726                 sg->final = 1;
727                 cpu_to_hw_sg(sg);
728         }
729
730         return cf;
731 }
732
733 static inline struct dpaa_sec_job *
734 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
735 {
736         struct rte_crypto_sym_op *sym = op->sym;
737         struct dpaa_sec_job *cf;
738         struct dpaa_sec_op_ctx *ctx;
739         struct qm_sg_entry *sg, *out_sg, *in_sg;
740         struct rte_mbuf *mbuf;
741         uint8_t req_segs;
742         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
743                         ses->iv.offset);
744
745         if (sym->m_dst) {
746                 mbuf = sym->m_dst;
747                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
748         } else {
749                 mbuf = sym->m_src;
750                 req_segs = mbuf->nb_segs * 2 + 3;
751         }
752
753         if (req_segs > MAX_SG_ENTRIES) {
754                 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
755                                                                 MAX_SG_ENTRIES);
756                 return NULL;
757         }
758
759         ctx = dpaa_sec_alloc_ctx(ses);
760         if (!ctx)
761                 return NULL;
762
763         cf = &ctx->job;
764         ctx->op = op;
765
766         /* output */
767         out_sg = &cf->sg[0];
768         out_sg->extension = 1;
769         out_sg->length = sym->cipher.data.length;
770         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
771         cpu_to_hw_sg(out_sg);
772
773         /* 1st seg */
774         sg = &cf->sg[2];
775         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
776         sg->length = mbuf->data_len - sym->cipher.data.offset;
777         sg->offset = sym->cipher.data.offset;
778
779         /* Successive segs */
780         mbuf = mbuf->next;
781         while (mbuf) {
782                 cpu_to_hw_sg(sg);
783                 sg++;
784                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
785                 sg->length = mbuf->data_len;
786                 mbuf = mbuf->next;
787         }
788         sg->final = 1;
789         cpu_to_hw_sg(sg);
790
791         /* input */
792         mbuf = sym->m_src;
793         in_sg = &cf->sg[1];
794         in_sg->extension = 1;
795         in_sg->final = 1;
796         in_sg->length = sym->cipher.data.length + ses->iv.length;
797
798         sg++;
799         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
800         cpu_to_hw_sg(in_sg);
801
802         /* IV */
803         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
804         sg->length = ses->iv.length;
805         cpu_to_hw_sg(sg);
806
807         /* 1st seg */
808         sg++;
809         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
810         sg->length = mbuf->data_len - sym->cipher.data.offset;
811         sg->offset = sym->cipher.data.offset;
812
813         /* Successive segs */
814         mbuf = mbuf->next;
815         while (mbuf) {
816                 cpu_to_hw_sg(sg);
817                 sg++;
818                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
819                 sg->length = mbuf->data_len;
820                 mbuf = mbuf->next;
821         }
822         sg->final = 1;
823         cpu_to_hw_sg(sg);
824
825         return cf;
826 }
827
828 static inline struct dpaa_sec_job *
829 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
830 {
831         struct rte_crypto_sym_op *sym = op->sym;
832         struct dpaa_sec_job *cf;
833         struct dpaa_sec_op_ctx *ctx;
834         struct qm_sg_entry *sg;
835         rte_iova_t src_start_addr, dst_start_addr;
836         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
837                         ses->iv.offset);
838
839         ctx = dpaa_sec_alloc_ctx(ses);
840         if (!ctx)
841                 return NULL;
842
843         cf = &ctx->job;
844         ctx->op = op;
845
846         src_start_addr = rte_pktmbuf_iova(sym->m_src);
847
848         if (sym->m_dst)
849                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
850         else
851                 dst_start_addr = src_start_addr;
852
853         /* output */
854         sg = &cf->sg[0];
855         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
856         sg->length = sym->cipher.data.length + ses->iv.length;
857         cpu_to_hw_sg(sg);
858
859         /* input */
860         sg = &cf->sg[1];
861
862         /* need to extend the input to a compound frame */
863         sg->extension = 1;
864         sg->final = 1;
865         sg->length = sym->cipher.data.length + ses->iv.length;
866         qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
867         cpu_to_hw_sg(sg);
868
869         sg = &cf->sg[2];
870         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
871         sg->length = ses->iv.length;
872         cpu_to_hw_sg(sg);
873
874         sg++;
875         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
876         sg->length = sym->cipher.data.length;
877         sg->final = 1;
878         cpu_to_hw_sg(sg);
879
880         return cf;
881 }
882
883 static inline struct dpaa_sec_job *
884 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
885 {
886         struct rte_crypto_sym_op *sym = op->sym;
887         struct dpaa_sec_job *cf;
888         struct dpaa_sec_op_ctx *ctx;
889         struct qm_sg_entry *sg, *out_sg, *in_sg;
890         struct rte_mbuf *mbuf;
891         uint8_t req_segs;
892         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
893                         ses->iv.offset);
894
895         if (sym->m_dst) {
896                 mbuf = sym->m_dst;
897                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
898         } else {
899                 mbuf = sym->m_src;
900                 req_segs = mbuf->nb_segs * 2 + 4;
901         }
902
903         if (ses->auth_only_len)
904                 req_segs++;
905
906         if (req_segs > MAX_SG_ENTRIES) {
907                 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
908                                 MAX_SG_ENTRIES);
909                 return NULL;
910         }
911
912         ctx = dpaa_sec_alloc_ctx(ses);
913         if (!ctx)
914                 return NULL;
915
916         cf = &ctx->job;
917         ctx->op = op;
918
919         rte_prefetch0(cf->sg);
920
921         /* output */
922         out_sg = &cf->sg[0];
923         out_sg->extension = 1;
924         if (is_encode(ses))
925                 out_sg->length = sym->aead.data.length + ses->auth_only_len
926                                                 + ses->digest_length;
927         else
928                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
929
930         /* output sg entries */
931         sg = &cf->sg[2];
932         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
933         cpu_to_hw_sg(out_sg);
934
935         /* 1st seg */
936         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
937         sg->length = mbuf->data_len - sym->aead.data.offset +
938                                         ses->auth_only_len;
939         sg->offset = sym->aead.data.offset - ses->auth_only_len;
940
941         /* Successive segs */
942         mbuf = mbuf->next;
943         while (mbuf) {
944                 cpu_to_hw_sg(sg);
945                 sg++;
946                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
947                 sg->length = mbuf->data_len;
948                 mbuf = mbuf->next;
949         }
950         sg->length -= ses->digest_length;
951
952         if (is_encode(ses)) {
953                 cpu_to_hw_sg(sg);
954                 /* set auth output */
955                 sg++;
956                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
957                 sg->length = ses->digest_length;
958         }
959         sg->final = 1;
960         cpu_to_hw_sg(sg);
961
962         /* input */
963         mbuf = sym->m_src;
964         in_sg = &cf->sg[1];
965         in_sg->extension = 1;
966         in_sg->final = 1;
967         if (is_encode(ses))
968                 in_sg->length = ses->iv.length + sym->aead.data.length
969                                                         + ses->auth_only_len;
970         else
971                 in_sg->length = ses->iv.length + sym->aead.data.length
972                                 + ses->auth_only_len + ses->digest_length;
973
974         /* input sg entries */
975         sg++;
976         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
977         cpu_to_hw_sg(in_sg);
978
979         /* 1st seg IV */
980         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
981         sg->length = ses->iv.length;
982         cpu_to_hw_sg(sg);
983
984         /* 2nd seg auth only */
985         if (ses->auth_only_len) {
986                 sg++;
987                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
988                 sg->length = ses->auth_only_len;
989                 cpu_to_hw_sg(sg);
990         }
991
992         /* 3rd seg */
993         sg++;
994         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
995         sg->length = mbuf->data_len - sym->aead.data.offset;
996         sg->offset = sym->aead.data.offset;
997
998         /* Successive segs */
999         mbuf = mbuf->next;
1000         while (mbuf) {
1001                 cpu_to_hw_sg(sg);
1002                 sg++;
1003                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004                 sg->length = mbuf->data_len;
1005                 mbuf = mbuf->next;
1006         }
1007
1008         if (is_decode(ses)) {
1009                 cpu_to_hw_sg(sg);
1010                 sg++;
1011                 memcpy(ctx->digest, sym->aead.digest.data,
1012                         ses->digest_length);
1013                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1014                 sg->length = ses->digest_length;
1015         }
1016         sg->final = 1;
1017         cpu_to_hw_sg(sg);
1018
1019         return cf;
1020 }
1021
1022 static inline struct dpaa_sec_job *
1023 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1024 {
1025         struct rte_crypto_sym_op *sym = op->sym;
1026         struct dpaa_sec_job *cf;
1027         struct dpaa_sec_op_ctx *ctx;
1028         struct qm_sg_entry *sg;
1029         uint32_t length = 0;
1030         rte_iova_t src_start_addr, dst_start_addr;
1031         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1032                         ses->iv.offset);
1033
1034         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1035
1036         if (sym->m_dst)
1037                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1038         else
1039                 dst_start_addr = src_start_addr;
1040
1041         ctx = dpaa_sec_alloc_ctx(ses);
1042         if (!ctx)
1043                 return NULL;
1044
1045         cf = &ctx->job;
1046         ctx->op = op;
1047
1048         /* input */
1049         rte_prefetch0(cf->sg);
1050         sg = &cf->sg[2];
1051         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1052         if (is_encode(ses)) {
1053                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1054                 sg->length = ses->iv.length;
1055                 length += sg->length;
1056                 cpu_to_hw_sg(sg);
1057
1058                 sg++;
1059                 if (ses->auth_only_len) {
1060                         qm_sg_entry_set64(sg,
1061                                           dpaa_mem_vtop(sym->aead.aad.data));
1062                         sg->length = ses->auth_only_len;
1063                         length += sg->length;
1064                         cpu_to_hw_sg(sg);
1065                         sg++;
1066                 }
1067                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1068                 sg->length = sym->aead.data.length;
1069                 length += sg->length;
1070                 sg->final = 1;
1071                 cpu_to_hw_sg(sg);
1072         } else {
1073                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1074                 sg->length = ses->iv.length;
1075                 length += sg->length;
1076                 cpu_to_hw_sg(sg);
1077
1078                 sg++;
1079                 if (ses->auth_only_len) {
1080                         qm_sg_entry_set64(sg,
1081                                           dpaa_mem_vtop(sym->aead.aad.data));
1082                         sg->length = ses->auth_only_len;
1083                         length += sg->length;
1084                         cpu_to_hw_sg(sg);
1085                         sg++;
1086                 }
1087                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1088                 sg->length = sym->aead.data.length;
1089                 length += sg->length;
1090                 cpu_to_hw_sg(sg);
1091
1092                 memcpy(ctx->digest, sym->aead.digest.data,
1093                        ses->digest_length);
1094                 sg++;
1095
1096                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1097                 sg->length = ses->digest_length;
1098                 length += sg->length;
1099                 sg->final = 1;
1100                 cpu_to_hw_sg(sg);
1101         }
1102         /* input compound frame */
1103         cf->sg[1].length = length;
1104         cf->sg[1].extension = 1;
1105         cf->sg[1].final = 1;
1106         cpu_to_hw_sg(&cf->sg[1]);
1107
1108         /* output */
1109         sg++;
1110         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1111         qm_sg_entry_set64(sg,
1112                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1113         sg->length = sym->aead.data.length + ses->auth_only_len;
1114         length = sg->length;
1115         if (is_encode(ses)) {
1116                 cpu_to_hw_sg(sg);
1117                 /* set auth output */
1118                 sg++;
1119                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1120                 sg->length = ses->digest_length;
1121                 length += sg->length;
1122         }
1123         sg->final = 1;
1124         cpu_to_hw_sg(sg);
1125
1126         /* output compound frame */
1127         cf->sg[0].length = length;
1128         cf->sg[0].extension = 1;
1129         cpu_to_hw_sg(&cf->sg[0]);
1130
1131         return cf;
1132 }
1133
1134 static inline struct dpaa_sec_job *
1135 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1136 {
1137         struct rte_crypto_sym_op *sym = op->sym;
1138         struct dpaa_sec_job *cf;
1139         struct dpaa_sec_op_ctx *ctx;
1140         struct qm_sg_entry *sg, *out_sg, *in_sg;
1141         struct rte_mbuf *mbuf;
1142         uint8_t req_segs;
1143         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1144                         ses->iv.offset);
1145
1146         if (sym->m_dst) {
1147                 mbuf = sym->m_dst;
1148                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1149         } else {
1150                 mbuf = sym->m_src;
1151                 req_segs = mbuf->nb_segs * 2 + 4;
1152         }
1153
1154         if (req_segs > MAX_SG_ENTRIES) {
1155                 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1156                                 MAX_SG_ENTRIES);
1157                 return NULL;
1158         }
1159
1160         ctx = dpaa_sec_alloc_ctx(ses);
1161         if (!ctx)
1162                 return NULL;
1163
1164         cf = &ctx->job;
1165         ctx->op = op;
1166
1167         rte_prefetch0(cf->sg);
1168
1169         /* output */
1170         out_sg = &cf->sg[0];
1171         out_sg->extension = 1;
1172         if (is_encode(ses))
1173                 out_sg->length = sym->auth.data.length + ses->digest_length;
1174         else
1175                 out_sg->length = sym->auth.data.length;
1176
1177         /* output sg entries */
1178         sg = &cf->sg[2];
1179         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1180         cpu_to_hw_sg(out_sg);
1181
1182         /* 1st seg */
1183         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1184         sg->length = mbuf->data_len - sym->auth.data.offset;
1185         sg->offset = sym->auth.data.offset;
1186
1187         /* Successive segs */
1188         mbuf = mbuf->next;
1189         while (mbuf) {
1190                 cpu_to_hw_sg(sg);
1191                 sg++;
1192                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1193                 sg->length = mbuf->data_len;
1194                 mbuf = mbuf->next;
1195         }
1196         sg->length -= ses->digest_length;
1197
1198         if (is_encode(ses)) {
1199                 cpu_to_hw_sg(sg);
1200                 /* set auth output */
1201                 sg++;
1202                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1203                 sg->length = ses->digest_length;
1204         }
1205         sg->final = 1;
1206         cpu_to_hw_sg(sg);
1207
1208         /* input */
1209         mbuf = sym->m_src;
1210         in_sg = &cf->sg[1];
1211         in_sg->extension = 1;
1212         in_sg->final = 1;
1213         if (is_encode(ses))
1214                 in_sg->length = ses->iv.length + sym->auth.data.length;
1215         else
1216                 in_sg->length = ses->iv.length + sym->auth.data.length
1217                                                 + ses->digest_length;
1218
1219         /* input sg entries */
1220         sg++;
1221         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1222         cpu_to_hw_sg(in_sg);
1223
1224         /* 1st seg IV */
1225         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1226         sg->length = ses->iv.length;
1227         cpu_to_hw_sg(sg);
1228
1229         /* 2nd seg */
1230         sg++;
1231         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1232         sg->length = mbuf->data_len - sym->auth.data.offset;
1233         sg->offset = sym->auth.data.offset;
1234
1235         /* Successive segs */
1236         mbuf = mbuf->next;
1237         while (mbuf) {
1238                 cpu_to_hw_sg(sg);
1239                 sg++;
1240                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1241                 sg->length = mbuf->data_len;
1242                 mbuf = mbuf->next;
1243         }
1244
1245         sg->length -= ses->digest_length;
1246         if (is_decode(ses)) {
1247                 cpu_to_hw_sg(sg);
1248                 sg++;
1249                 memcpy(ctx->digest, sym->auth.digest.data,
1250                         ses->digest_length);
1251                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1252                 sg->length = ses->digest_length;
1253         }
1254         sg->final = 1;
1255         cpu_to_hw_sg(sg);
1256
1257         return cf;
1258 }
1259
1260 static inline struct dpaa_sec_job *
1261 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1262 {
1263         struct rte_crypto_sym_op *sym = op->sym;
1264         struct dpaa_sec_job *cf;
1265         struct dpaa_sec_op_ctx *ctx;
1266         struct qm_sg_entry *sg;
1267         rte_iova_t src_start_addr, dst_start_addr;
1268         uint32_t length = 0;
1269         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1270                         ses->iv.offset);
1271
1272         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1273         if (sym->m_dst)
1274                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1275         else
1276                 dst_start_addr = src_start_addr;
1277
1278         ctx = dpaa_sec_alloc_ctx(ses);
1279         if (!ctx)
1280                 return NULL;
1281
1282         cf = &ctx->job;
1283         ctx->op = op;
1284
1285         /* input */
1286         rte_prefetch0(cf->sg);
1287         sg = &cf->sg[2];
1288         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1289         if (is_encode(ses)) {
1290                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1291                 sg->length = ses->iv.length;
1292                 length += sg->length;
1293                 cpu_to_hw_sg(sg);
1294
1295                 sg++;
1296                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1297                 sg->length = sym->auth.data.length;
1298                 length += sg->length;
1299                 sg->final = 1;
1300                 cpu_to_hw_sg(sg);
1301         } else {
1302                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1303                 sg->length = ses->iv.length;
1304                 length += sg->length;
1305                 cpu_to_hw_sg(sg);
1306
1307                 sg++;
1308
1309                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1310                 sg->length = sym->auth.data.length;
1311                 length += sg->length;
1312                 cpu_to_hw_sg(sg);
1313
1314                 memcpy(ctx->digest, sym->auth.digest.data,
1315                        ses->digest_length);
1316                 sg++;
1317
1318                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1319                 sg->length = ses->digest_length;
1320                 length += sg->length;
1321                 sg->final = 1;
1322                 cpu_to_hw_sg(sg);
1323         }
1324         /* input compound frame */
1325         cf->sg[1].length = length;
1326         cf->sg[1].extension = 1;
1327         cf->sg[1].final = 1;
1328         cpu_to_hw_sg(&cf->sg[1]);
1329
1330         /* output */
1331         sg++;
1332         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1333         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1334         sg->length = sym->cipher.data.length;
1335         length = sg->length;
1336         if (is_encode(ses)) {
1337                 cpu_to_hw_sg(sg);
1338                 /* set auth output */
1339                 sg++;
1340                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1341                 sg->length = ses->digest_length;
1342                 length += sg->length;
1343         }
1344         sg->final = 1;
1345         cpu_to_hw_sg(sg);
1346
1347         /* output compound frame */
1348         cf->sg[0].length = length;
1349         cf->sg[0].extension = 1;
1350         cpu_to_hw_sg(&cf->sg[0]);
1351
1352         return cf;
1353 }
1354
1355 static inline struct dpaa_sec_job *
1356 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1357 {
1358         struct rte_crypto_sym_op *sym = op->sym;
1359         struct dpaa_sec_job *cf;
1360         struct dpaa_sec_op_ctx *ctx;
1361         struct qm_sg_entry *sg;
1362         phys_addr_t src_start_addr, dst_start_addr;
1363
1364         ctx = dpaa_sec_alloc_ctx(ses);
1365         if (!ctx)
1366                 return NULL;
1367         cf = &ctx->job;
1368         ctx->op = op;
1369
1370         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1371
1372         if (sym->m_dst)
1373                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1374         else
1375                 dst_start_addr = src_start_addr;
1376
1377         /* input */
1378         sg = &cf->sg[1];
1379         qm_sg_entry_set64(sg, src_start_addr);
1380         sg->length = sym->m_src->pkt_len;
1381         sg->final = 1;
1382         cpu_to_hw_sg(sg);
1383
1384         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1385         /* output */
1386         sg = &cf->sg[0];
1387         qm_sg_entry_set64(sg, dst_start_addr);
1388         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1389         cpu_to_hw_sg(sg);
1390
1391         return cf;
1392 }
1393
1394 static uint16_t
1395 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1396                        uint16_t nb_ops)
1397 {
1398         /* Function to transmit the frames to given device and queuepair */
1399         uint32_t loop;
1400         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1401         uint16_t num_tx = 0;
1402         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1403         uint32_t frames_to_send;
1404         struct rte_crypto_op *op;
1405         struct dpaa_sec_job *cf;
1406         dpaa_sec_session *ses;
1407         struct dpaa_sec_op_ctx *ctx;
1408         uint32_t auth_only_len;
1409         struct qman_fq *inq[DPAA_SEC_BURST];
1410
1411         while (nb_ops) {
1412                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1413                                 DPAA_SEC_BURST : nb_ops;
1414                 for (loop = 0; loop < frames_to_send; loop++) {
1415                         op = *(ops++);
1416                         switch (op->sess_type) {
1417                         case RTE_CRYPTO_OP_WITH_SESSION:
1418                                 ses = (dpaa_sec_session *)
1419                                         get_session_private_data(
1420                                                         op->sym->session,
1421                                                         cryptodev_driver_id);
1422                                 break;
1423                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1424                                 ses = (dpaa_sec_session *)
1425                                         get_sec_session_private_data(
1426                                                         op->sym->sec_session);
1427                                 break;
1428                         default:
1429                                 PMD_TX_LOG(ERR,
1430                                         "sessionless crypto op not supported");
1431                                 frames_to_send = loop;
1432                                 nb_ops = loop;
1433                                 goto send_pkts;
1434                         }
1435                         if (unlikely(!ses->qp || ses->qp != qp)) {
1436                                 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1437                                                 ses->qp, qp);
1438                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1439                                         frames_to_send = loop;
1440                                         nb_ops = loop;
1441                                         goto send_pkts;
1442                                 }
1443                         }
1444
1445                         auth_only_len = op->sym->auth.data.length -
1446                                                 op->sym->cipher.data.length;
1447                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1448                                 if (is_auth_only(ses)) {
1449                                         cf = build_auth_only(op, ses);
1450                                 } else if (is_cipher_only(ses)) {
1451                                         cf = build_cipher_only(op, ses);
1452                                 } else if (is_aead(ses)) {
1453                                         cf = build_cipher_auth_gcm(op, ses);
1454                                         auth_only_len = ses->auth_only_len;
1455                                 } else if (is_auth_cipher(ses)) {
1456                                         cf = build_cipher_auth(op, ses);
1457                                 } else if (is_proto_ipsec(ses)) {
1458                                         cf = build_proto(op, ses);
1459                                 } else {
1460                                         PMD_TX_LOG(ERR, "not supported sec op");
1461                                         frames_to_send = loop;
1462                                         nb_ops = loop;
1463                                         goto send_pkts;
1464                                 }
1465                         } else {
1466                                 if (is_auth_only(ses)) {
1467                                         cf = build_auth_only_sg(op, ses);
1468                                 } else if (is_cipher_only(ses)) {
1469                                         cf = build_cipher_only_sg(op, ses);
1470                                 } else if (is_aead(ses)) {
1471                                         cf = build_cipher_auth_gcm_sg(op, ses);
1472                                         auth_only_len = ses->auth_only_len;
1473                                 } else if (is_auth_cipher(ses)) {
1474                                         cf = build_cipher_auth_sg(op, ses);
1475                                 } else {
1476                                         PMD_TX_LOG(ERR, "not supported sec op");
1477                                         frames_to_send = loop;
1478                                         nb_ops = loop;
1479                                         goto send_pkts;
1480                                 }
1481                         }
1482                         if (unlikely(!cf)) {
1483                                 frames_to_send = loop;
1484                                 nb_ops = loop;
1485                                 goto send_pkts;
1486                         }
1487
1488                         fd = &fds[loop];
1489                         inq[loop] = ses->inq;
1490                         fd->opaque_addr = 0;
1491                         fd->cmd = 0;
1492                         ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1493                         qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1494                         fd->_format1 = qm_fd_compound;
1495                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1496                         /* Auth_only_len is set as 0 in descriptor and it is
1497                          * overwritten here in the fd.cmd which will update
1498                          * the DPOVRD reg.
1499                          */
1500                         if (auth_only_len)
1501                                 fd->cmd = 0x80000000 | auth_only_len;
1502
1503                 }
1504 send_pkts:
1505                 loop = 0;
1506                 while (loop < frames_to_send) {
1507                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1508                                         frames_to_send - loop);
1509                 }
1510                 nb_ops -= frames_to_send;
1511                 num_tx += frames_to_send;
1512         }
1513
1514         dpaa_qp->tx_pkts += num_tx;
1515         dpaa_qp->tx_errs += nb_ops - num_tx;
1516
1517         return num_tx;
1518 }
1519
1520 static uint16_t
1521 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1522                        uint16_t nb_ops)
1523 {
1524         uint16_t num_rx;
1525         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1526
1527         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1528
1529         dpaa_qp->rx_pkts += num_rx;
1530         dpaa_qp->rx_errs += nb_ops - num_rx;
1531
1532         PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1533
1534         return num_rx;
1535 }
1536
1537 /** Release queue pair */
1538 static int
1539 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1540                             uint16_t qp_id)
1541 {
1542         struct dpaa_sec_dev_private *internals;
1543         struct dpaa_sec_qp *qp = NULL;
1544
1545         PMD_INIT_FUNC_TRACE();
1546
1547         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1548
1549         internals = dev->data->dev_private;
1550         if (qp_id >= internals->max_nb_queue_pairs) {
1551                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1552                              internals->max_nb_queue_pairs);
1553                 return -EINVAL;
1554         }
1555
1556         qp = &internals->qps[qp_id];
1557         qp->internals = NULL;
1558         dev->data->queue_pairs[qp_id] = NULL;
1559
1560         return 0;
1561 }
1562
1563 /** Setup a queue pair */
1564 static int
1565 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1566                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1567                 __rte_unused int socket_id,
1568                 __rte_unused struct rte_mempool *session_pool)
1569 {
1570         struct dpaa_sec_dev_private *internals;
1571         struct dpaa_sec_qp *qp = NULL;
1572
1573         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1574                      dev, qp_id, qp_conf);
1575
1576         internals = dev->data->dev_private;
1577         if (qp_id >= internals->max_nb_queue_pairs) {
1578                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1579                              internals->max_nb_queue_pairs);
1580                 return -EINVAL;
1581         }
1582
1583         qp = &internals->qps[qp_id];
1584         qp->internals = internals;
1585         dev->data->queue_pairs[qp_id] = qp;
1586
1587         return 0;
1588 }
1589
1590 /** Start queue pair */
1591 static int
1592 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1593                           __rte_unused uint16_t queue_pair_id)
1594 {
1595         PMD_INIT_FUNC_TRACE();
1596
1597         return 0;
1598 }
1599
1600 /** Stop queue pair */
1601 static int
1602 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1603                          __rte_unused uint16_t queue_pair_id)
1604 {
1605         PMD_INIT_FUNC_TRACE();
1606
1607         return 0;
1608 }
1609
1610 /** Return the number of allocated queue pairs */
1611 static uint32_t
1612 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1613 {
1614         PMD_INIT_FUNC_TRACE();
1615
1616         return dev->data->nb_queue_pairs;
1617 }
1618
1619 /** Returns the size of session structure */
1620 static unsigned int
1621 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1622 {
1623         PMD_INIT_FUNC_TRACE();
1624
1625         return sizeof(dpaa_sec_session);
1626 }
1627
1628 static int
1629 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1630                      struct rte_crypto_sym_xform *xform,
1631                      dpaa_sec_session *session)
1632 {
1633         session->cipher_alg = xform->cipher.algo;
1634         session->iv.length = xform->cipher.iv.length;
1635         session->iv.offset = xform->cipher.iv.offset;
1636         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1637                                                RTE_CACHE_LINE_SIZE);
1638         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1639                 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1640                 return -ENOMEM;
1641         }
1642         session->cipher_key.length = xform->cipher.key.length;
1643
1644         memcpy(session->cipher_key.data, xform->cipher.key.data,
1645                xform->cipher.key.length);
1646         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1647                         DIR_ENC : DIR_DEC;
1648
1649         return 0;
1650 }
1651
1652 static int
1653 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1654                    struct rte_crypto_sym_xform *xform,
1655                    dpaa_sec_session *session)
1656 {
1657         session->auth_alg = xform->auth.algo;
1658         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1659                                              RTE_CACHE_LINE_SIZE);
1660         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1661                 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1662                 return -ENOMEM;
1663         }
1664         session->auth_key.length = xform->auth.key.length;
1665         session->digest_length = xform->auth.digest_length;
1666
1667         memcpy(session->auth_key.data, xform->auth.key.data,
1668                xform->auth.key.length);
1669         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1670                         DIR_ENC : DIR_DEC;
1671
1672         return 0;
1673 }
1674
1675 static int
1676 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1677                    struct rte_crypto_sym_xform *xform,
1678                    dpaa_sec_session *session)
1679 {
1680         session->aead_alg = xform->aead.algo;
1681         session->iv.length = xform->aead.iv.length;
1682         session->iv.offset = xform->aead.iv.offset;
1683         session->auth_only_len = xform->aead.aad_length;
1684         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1685                                              RTE_CACHE_LINE_SIZE);
1686         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1687                 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1688                 return -ENOMEM;
1689         }
1690         session->aead_key.length = xform->aead.key.length;
1691         session->digest_length = xform->aead.digest_length;
1692
1693         memcpy(session->aead_key.data, xform->aead.key.data,
1694                xform->aead.key.length);
1695         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1696                         DIR_ENC : DIR_DEC;
1697
1698         return 0;
1699 }
1700
1701 static struct qman_fq *
1702 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1703 {
1704         unsigned int i;
1705
1706         for (i = 0; i < qi->max_nb_sessions; i++) {
1707                 if (qi->inq_attach[i] == 0) {
1708                         qi->inq_attach[i] = 1;
1709                         return &qi->inq[i];
1710                 }
1711         }
1712         PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1713
1714         return NULL;
1715 }
1716
1717 static int
1718 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1719 {
1720         unsigned int i;
1721
1722         for (i = 0; i < qi->max_nb_sessions; i++) {
1723                 if (&qi->inq[i] == fq) {
1724                         qman_retire_fq(fq, NULL);
1725                         qman_oos_fq(fq);
1726                         qi->inq_attach[i] = 0;
1727                         return 0;
1728                 }
1729         }
1730         return -1;
1731 }
1732
1733 static int
1734 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1735 {
1736         int ret;
1737
1738         sess->qp = qp;
1739         ret = dpaa_sec_prep_cdb(sess);
1740         if (ret) {
1741                 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1742                 return -1;
1743         }
1744         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1745                 ret = rte_dpaa_portal_init((void *)0);
1746                 if (ret) {
1747                         PMD_DRV_LOG(ERR, "Failure in affining portal");
1748                         return ret;
1749                 }
1750         }
1751         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1752                                qman_fq_fqid(&qp->outq));
1753         if (ret)
1754                 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1755
1756         return ret;
1757 }
1758
1759 static int
1760 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1761                         uint16_t qp_id __rte_unused,
1762                         void *ses __rte_unused)
1763 {
1764         PMD_INIT_FUNC_TRACE();
1765         return 0;
1766 }
1767
1768 static int
1769 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1770                         uint16_t qp_id  __rte_unused,
1771                         void *ses)
1772 {
1773         dpaa_sec_session *sess = ses;
1774         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1775
1776         PMD_INIT_FUNC_TRACE();
1777
1778         if (sess->inq)
1779                 dpaa_sec_detach_rxq(qi, sess->inq);
1780         sess->inq = NULL;
1781
1782         sess->qp = NULL;
1783
1784         return 0;
1785 }
1786
1787 static int
1788 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1789                             struct rte_crypto_sym_xform *xform, void *sess)
1790 {
1791         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1792         dpaa_sec_session *session = sess;
1793
1794         PMD_INIT_FUNC_TRACE();
1795
1796         if (unlikely(sess == NULL)) {
1797                 RTE_LOG(ERR, PMD, "invalid session struct\n");
1798                 return -EINVAL;
1799         }
1800
1801         /* Default IV length = 0 */
1802         session->iv.length = 0;
1803
1804         /* Cipher Only */
1805         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1806                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1807                 dpaa_sec_cipher_init(dev, xform, session);
1808
1809         /* Authentication Only */
1810         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1811                    xform->next == NULL) {
1812                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1813                 dpaa_sec_auth_init(dev, xform, session);
1814
1815         /* Cipher then Authenticate */
1816         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1817                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1818                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1819                         dpaa_sec_cipher_init(dev, xform, session);
1820                         dpaa_sec_auth_init(dev, xform->next, session);
1821                 } else {
1822                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1823                         return -EINVAL;
1824                 }
1825
1826         /* Authenticate then Cipher */
1827         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1828                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1829                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1830                         dpaa_sec_auth_init(dev, xform, session);
1831                         dpaa_sec_cipher_init(dev, xform->next, session);
1832                 } else {
1833                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1834                         return -EINVAL;
1835                 }
1836
1837         /* AEAD operation for AES-GCM kind of Algorithms */
1838         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1839                    xform->next == NULL) {
1840                 dpaa_sec_aead_init(dev, xform, session);
1841
1842         } else {
1843                 PMD_DRV_LOG(ERR, "Invalid crypto type");
1844                 return -EINVAL;
1845         }
1846         session->ctx_pool = internals->ctx_pool;
1847         session->inq = dpaa_sec_attach_rxq(internals);
1848         if (session->inq == NULL) {
1849                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1850                 goto err1;
1851         }
1852
1853         return 0;
1854
1855 err1:
1856         rte_free(session->cipher_key.data);
1857         rte_free(session->auth_key.data);
1858         memset(session, 0, sizeof(dpaa_sec_session));
1859
1860         return -EINVAL;
1861 }
1862
1863 static int
1864 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1865                 struct rte_crypto_sym_xform *xform,
1866                 struct rte_cryptodev_sym_session *sess,
1867                 struct rte_mempool *mempool)
1868 {
1869         void *sess_private_data;
1870         int ret;
1871
1872         PMD_INIT_FUNC_TRACE();
1873
1874         if (rte_mempool_get(mempool, &sess_private_data)) {
1875                 CDEV_LOG_ERR(
1876                         "Couldn't get object from session mempool");
1877                 return -ENOMEM;
1878         }
1879
1880         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1881         if (ret != 0) {
1882                 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1883                                 "session parameters");
1884
1885                 /* Return session to mempool */
1886                 rte_mempool_put(mempool, sess_private_data);
1887                 return ret;
1888         }
1889
1890         set_session_private_data(sess, dev->driver_id,
1891                         sess_private_data);
1892
1893
1894         return 0;
1895 }
1896
1897 /** Clear the memory of session so it doesn't leave key material behind */
1898 static void
1899 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1900                 struct rte_cryptodev_sym_session *sess)
1901 {
1902         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1903         uint8_t index = dev->driver_id;
1904         void *sess_priv = get_session_private_data(sess, index);
1905
1906         PMD_INIT_FUNC_TRACE();
1907
1908         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1909
1910         if (sess_priv) {
1911                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1912
1913                 if (s->inq)
1914                         dpaa_sec_detach_rxq(qi, s->inq);
1915                 rte_free(s->cipher_key.data);
1916                 rte_free(s->auth_key.data);
1917                 memset(s, 0, sizeof(dpaa_sec_session));
1918                 set_session_private_data(sess, index, NULL);
1919                 rte_mempool_put(sess_mp, sess_priv);
1920         }
1921 }
1922
1923 static int
1924 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1925                            struct rte_security_session_conf *conf,
1926                            void *sess)
1927 {
1928         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1929         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1930         struct rte_crypto_auth_xform *auth_xform;
1931         struct rte_crypto_cipher_xform *cipher_xform;
1932         dpaa_sec_session *session = (dpaa_sec_session *)sess;
1933
1934         PMD_INIT_FUNC_TRACE();
1935
1936         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1937                 cipher_xform = &conf->crypto_xform->cipher;
1938                 auth_xform = &conf->crypto_xform->next->auth;
1939         } else {
1940                 auth_xform = &conf->crypto_xform->auth;
1941                 cipher_xform = &conf->crypto_xform->next->cipher;
1942         }
1943         session->proto_alg = conf->protocol;
1944         session->cipher_key.data = rte_zmalloc(NULL,
1945                                                cipher_xform->key.length,
1946                                                RTE_CACHE_LINE_SIZE);
1947         if (session->cipher_key.data == NULL &&
1948                         cipher_xform->key.length > 0) {
1949                 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1950                 return -ENOMEM;
1951         }
1952
1953         session->cipher_key.length = cipher_xform->key.length;
1954         session->auth_key.data = rte_zmalloc(NULL,
1955                                         auth_xform->key.length,
1956                                         RTE_CACHE_LINE_SIZE);
1957         if (session->auth_key.data == NULL &&
1958                         auth_xform->key.length > 0) {
1959                 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1960                 rte_free(session->cipher_key.data);
1961                 return -ENOMEM;
1962         }
1963         session->auth_key.length = auth_xform->key.length;
1964         memcpy(session->cipher_key.data, cipher_xform->key.data,
1965                         cipher_xform->key.length);
1966         memcpy(session->auth_key.data, auth_xform->key.data,
1967                         auth_xform->key.length);
1968
1969         switch (auth_xform->algo) {
1970         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1971                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1972                 break;
1973         case RTE_CRYPTO_AUTH_MD5_HMAC:
1974                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1975                 break;
1976         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1977                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1978                 break;
1979         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1980                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1981                 break;
1982         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1983                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1984                 break;
1985         case RTE_CRYPTO_AUTH_AES_CMAC:
1986                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1987                 break;
1988         case RTE_CRYPTO_AUTH_NULL:
1989                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1990                 break;
1991         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1992         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1993         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1994         case RTE_CRYPTO_AUTH_SHA1:
1995         case RTE_CRYPTO_AUTH_SHA256:
1996         case RTE_CRYPTO_AUTH_SHA512:
1997         case RTE_CRYPTO_AUTH_SHA224:
1998         case RTE_CRYPTO_AUTH_SHA384:
1999         case RTE_CRYPTO_AUTH_MD5:
2000         case RTE_CRYPTO_AUTH_AES_GMAC:
2001         case RTE_CRYPTO_AUTH_KASUMI_F9:
2002         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2003         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2004                 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2005                         auth_xform->algo);
2006                 goto out;
2007         default:
2008                 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2009                         auth_xform->algo);
2010                 goto out;
2011         }
2012
2013         switch (cipher_xform->algo) {
2014         case RTE_CRYPTO_CIPHER_AES_CBC:
2015                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2016                 break;
2017         case RTE_CRYPTO_CIPHER_3DES_CBC:
2018                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2019                 break;
2020         case RTE_CRYPTO_CIPHER_AES_CTR:
2021                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2022                 break;
2023         case RTE_CRYPTO_CIPHER_NULL:
2024         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2025         case RTE_CRYPTO_CIPHER_3DES_ECB:
2026         case RTE_CRYPTO_CIPHER_AES_ECB:
2027         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2028                 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2029                         cipher_xform->algo);
2030                 goto out;
2031         default:
2032                 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2033                         cipher_xform->algo);
2034                 goto out;
2035         }
2036
2037         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2038                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2039                                 sizeof(session->ip4_hdr));
2040                 session->ip4_hdr.ip_v = IPVERSION;
2041                 session->ip4_hdr.ip_hl = 5;
2042                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2043                                                 sizeof(session->ip4_hdr));
2044                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2045                 session->ip4_hdr.ip_id = 0;
2046                 session->ip4_hdr.ip_off = 0;
2047                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2048                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2049                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2050                                 : IPPROTO_AH;
2051                 session->ip4_hdr.ip_sum = 0;
2052                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2053                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2054                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2055                                                 (void *)&session->ip4_hdr,
2056                                                 sizeof(struct ip));
2057
2058                 session->encap_pdb.options =
2059                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2060                         PDBOPTS_ESP_OIHI_PDB_INL |
2061                         PDBOPTS_ESP_IVSRC |
2062                         PDBHMO_ESP_ENCAP_DTTL;
2063                 session->encap_pdb.spi = ipsec_xform->spi;
2064                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2065
2066                 session->dir = DIR_ENC;
2067         } else if (ipsec_xform->direction ==
2068                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2069                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2070                 session->decap_pdb.options = sizeof(struct ip) << 16;
2071                 session->dir = DIR_DEC;
2072         } else
2073                 goto out;
2074         session->ctx_pool = internals->ctx_pool;
2075         session->inq = dpaa_sec_attach_rxq(internals);
2076         if (session->inq == NULL) {
2077                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2078                 goto out;
2079         }
2080
2081
2082         return 0;
2083 out:
2084         rte_free(session->auth_key.data);
2085         rte_free(session->cipher_key.data);
2086         memset(session, 0, sizeof(dpaa_sec_session));
2087         return -1;
2088 }
2089
2090 static int
2091 dpaa_sec_security_session_create(void *dev,
2092                                  struct rte_security_session_conf *conf,
2093                                  struct rte_security_session *sess,
2094                                  struct rte_mempool *mempool)
2095 {
2096         void *sess_private_data;
2097         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2098         int ret;
2099
2100         if (rte_mempool_get(mempool, &sess_private_data)) {
2101                 CDEV_LOG_ERR(
2102                         "Couldn't get object from session mempool");
2103                 return -ENOMEM;
2104         }
2105
2106         switch (conf->protocol) {
2107         case RTE_SECURITY_PROTOCOL_IPSEC:
2108                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2109                                 sess_private_data);
2110                 break;
2111         case RTE_SECURITY_PROTOCOL_MACSEC:
2112                 return -ENOTSUP;
2113         default:
2114                 return -EINVAL;
2115         }
2116         if (ret != 0) {
2117                 PMD_DRV_LOG(ERR,
2118                         "DPAA2 PMD: failed to configure session parameters");
2119
2120                 /* Return session to mempool */
2121                 rte_mempool_put(mempool, sess_private_data);
2122                 return ret;
2123         }
2124
2125         set_sec_session_private_data(sess, sess_private_data);
2126
2127         return ret;
2128 }
2129
2130 /** Clear the memory of session so it doesn't leave key material behind */
2131 static int
2132 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2133                 struct rte_security_session *sess)
2134 {
2135         PMD_INIT_FUNC_TRACE();
2136         void *sess_priv = get_sec_session_private_data(sess);
2137
2138         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2139
2140         if (sess_priv) {
2141                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2142
2143                 rte_free(s->cipher_key.data);
2144                 rte_free(s->auth_key.data);
2145                 memset(sess, 0, sizeof(dpaa_sec_session));
2146                 set_sec_session_private_data(sess, NULL);
2147                 rte_mempool_put(sess_mp, sess_priv);
2148         }
2149         return 0;
2150 }
2151
2152
2153 static int
2154 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2155                        struct rte_cryptodev_config *config __rte_unused)
2156 {
2157         PMD_INIT_FUNC_TRACE();
2158
2159         return 0;
2160 }
2161
2162 static int
2163 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2164 {
2165         PMD_INIT_FUNC_TRACE();
2166         return 0;
2167 }
2168
2169 static void
2170 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2171 {
2172         PMD_INIT_FUNC_TRACE();
2173 }
2174
2175 static int
2176 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
2177 {
2178         PMD_INIT_FUNC_TRACE();
2179         return 0;
2180 }
2181
2182 static void
2183 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2184                        struct rte_cryptodev_info *info)
2185 {
2186         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2187
2188         PMD_INIT_FUNC_TRACE();
2189         if (info != NULL) {
2190                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2191                 info->feature_flags = dev->feature_flags;
2192                 info->capabilities = dpaa_sec_capabilities;
2193                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2194                 info->sym.max_nb_sessions_per_qp =
2195                         RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2196                         RTE_DPAA_MAX_NB_SEC_QPS;
2197                 info->driver_id = cryptodev_driver_id;
2198         }
2199 }
2200
2201 static struct rte_cryptodev_ops crypto_ops = {
2202         .dev_configure        = dpaa_sec_dev_configure,
2203         .dev_start            = dpaa_sec_dev_start,
2204         .dev_stop             = dpaa_sec_dev_stop,
2205         .dev_close            = dpaa_sec_dev_close,
2206         .dev_infos_get        = dpaa_sec_dev_infos_get,
2207         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2208         .queue_pair_release   = dpaa_sec_queue_pair_release,
2209         .queue_pair_start     = dpaa_sec_queue_pair_start,
2210         .queue_pair_stop      = dpaa_sec_queue_pair_stop,
2211         .queue_pair_count     = dpaa_sec_queue_pair_count,
2212         .session_get_size     = dpaa_sec_session_get_size,
2213         .session_configure    = dpaa_sec_session_configure,
2214         .session_clear        = dpaa_sec_session_clear,
2215         .qp_attach_session    = dpaa_sec_qp_attach_sess,
2216         .qp_detach_session    = dpaa_sec_qp_detach_sess,
2217 };
2218
2219 static const struct rte_security_capability *
2220 dpaa_sec_capabilities_get(void *device __rte_unused)
2221 {
2222         return dpaa_sec_security_cap;
2223 }
2224
2225 struct rte_security_ops dpaa_sec_security_ops = {
2226         .session_create = dpaa_sec_security_session_create,
2227         .session_update = NULL,
2228         .session_stats_get = NULL,
2229         .session_destroy = dpaa_sec_security_session_destroy,
2230         .set_pkt_metadata = NULL,
2231         .capabilities_get = dpaa_sec_capabilities_get
2232 };
2233
2234 static int
2235 dpaa_sec_uninit(struct rte_cryptodev *dev)
2236 {
2237         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2238
2239         if (dev == NULL)
2240                 return -ENODEV;
2241
2242         rte_free(dev->security_ctx);
2243
2244         rte_mempool_free(internals->ctx_pool);
2245         rte_free(internals);
2246
2247         PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2248                      dev->data->name, rte_socket_id());
2249
2250         return 0;
2251 }
2252
2253 static int
2254 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2255 {
2256         struct dpaa_sec_dev_private *internals;
2257         struct rte_security_ctx *security_instance;
2258         struct dpaa_sec_qp *qp;
2259         uint32_t i, flags;
2260         int ret;
2261         char str[20];
2262
2263         PMD_INIT_FUNC_TRACE();
2264
2265         cryptodev->driver_id = cryptodev_driver_id;
2266         cryptodev->dev_ops = &crypto_ops;
2267
2268         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2269         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2270         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2271                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2272                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2273                         RTE_CRYPTODEV_FF_SECURITY |
2274                         RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2275
2276         internals = cryptodev->data->dev_private;
2277         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2278         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2279
2280         /*
2281          * For secondary processes, we don't initialise any further as primary
2282          * has already done this work. Only check we don't need a different
2283          * RX function
2284          */
2285         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2286                 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2287                 return 0;
2288         }
2289
2290         /* Initialize security_ctx only for primary process*/
2291         security_instance = rte_malloc("rte_security_instances_ops",
2292                                 sizeof(struct rte_security_ctx), 0);
2293         if (security_instance == NULL)
2294                 return -ENOMEM;
2295         security_instance->device = (void *)cryptodev;
2296         security_instance->ops = &dpaa_sec_security_ops;
2297         security_instance->sess_cnt = 0;
2298         cryptodev->security_ctx = security_instance;
2299
2300         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2301                 /* init qman fq for queue pair */
2302                 qp = &internals->qps[i];
2303                 ret = dpaa_sec_init_tx(&qp->outq);
2304                 if (ret) {
2305                         PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
2306                         goto init_error;
2307                 }
2308         }
2309
2310         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2311                 QMAN_FQ_FLAG_TO_DCPORTAL;
2312         for (i = 0; i < internals->max_nb_sessions; i++) {
2313                 /* create rx qman fq for sessions*/
2314                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2315                 if (unlikely(ret != 0)) {
2316                         PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2317                         goto init_error;
2318                 }
2319         }
2320
2321         sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
2322         internals->ctx_pool = rte_mempool_create((const char *)str,
2323                         CTX_POOL_NUM_BUFS,
2324                         CTX_POOL_BUF_SIZE,
2325                         CTX_POOL_CACHE_SIZE, 0,
2326                         NULL, NULL, NULL, NULL,
2327                         SOCKET_ID_ANY, 0);
2328         if (!internals->ctx_pool) {
2329                 RTE_LOG(ERR, PMD, "%s create failed\n", str);
2330                 goto init_error;
2331         }
2332
2333         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2334         return 0;
2335
2336 init_error:
2337         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2338
2339         dpaa_sec_uninit(cryptodev);
2340         return -EFAULT;
2341 }
2342
2343 static int
2344 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2345                                 struct rte_dpaa_device *dpaa_dev)
2346 {
2347         struct rte_cryptodev *cryptodev;
2348         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2349
2350         int retval;
2351
2352         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2353
2354         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2355         if (cryptodev == NULL)
2356                 return -ENOMEM;
2357
2358         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2359                 cryptodev->data->dev_private = rte_zmalloc_socket(
2360                                         "cryptodev private structure",
2361                                         sizeof(struct dpaa_sec_dev_private),
2362                                         RTE_CACHE_LINE_SIZE,
2363                                         rte_socket_id());
2364
2365                 if (cryptodev->data->dev_private == NULL)
2366                         rte_panic("Cannot allocate memzone for private "
2367                                         "device data");
2368         }
2369
2370         dpaa_dev->crypto_dev = cryptodev;
2371         cryptodev->device = &dpaa_dev->device;
2372         cryptodev->device->driver = &dpaa_drv->driver;
2373
2374         /* init user callbacks */
2375         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2376
2377         /* if sec device version is not configured */
2378         if (!rta_get_sec_era()) {
2379                 const struct device_node *caam_node;
2380
2381                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2382                         const uint32_t *prop = of_get_property(caam_node,
2383                                         "fsl,sec-era",
2384                                         NULL);
2385                         if (prop) {
2386                                 rta_set_sec_era(
2387                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2388                                 break;
2389                         }
2390                 }
2391         }
2392
2393         /* Invoke PMD device initialization function */
2394         retval = dpaa_sec_dev_init(cryptodev);
2395         if (retval == 0)
2396                 return 0;
2397
2398         /* In case of error, cleanup is done */
2399         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2400                 rte_free(cryptodev->data->dev_private);
2401
2402         rte_cryptodev_pmd_release_device(cryptodev);
2403
2404         return -ENXIO;
2405 }
2406
2407 static int
2408 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2409 {
2410         struct rte_cryptodev *cryptodev;
2411         int ret;
2412
2413         cryptodev = dpaa_dev->crypto_dev;
2414         if (cryptodev == NULL)
2415                 return -ENODEV;
2416
2417         ret = dpaa_sec_uninit(cryptodev);
2418         if (ret)
2419                 return ret;
2420
2421         return rte_cryptodev_pmd_destroy(cryptodev);
2422 }
2423
2424 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2425         .drv_type = FSL_DPAA_CRYPTO,
2426         .driver = {
2427                 .name = "DPAA SEC PMD"
2428         },
2429         .probe = cryptodev_dpaa_sec_probe,
2430         .remove = cryptodev_dpaa_sec_remove,
2431 };
2432
2433 static struct cryptodev_driver dpaa_sec_crypto_drv;
2434
2435 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2436 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2437                 cryptodev_driver_id);