crypto/dpaa_sec: use iova2virt instead of memseg iteration
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39
40 enum rta_sec_era rta_sec_era;
41
42 static uint8_t cryptodev_driver_id;
43
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
46
47 static int
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
49
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53         if (!ctx->fd_status) {
54                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55         } else {
56                 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58         }
59
60         /* report op status to sym->op and then free the ctx memeory  */
61         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
62 }
63
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 {
67         struct dpaa_sec_op_ctx *ctx;
68         int retval;
69
70         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71         if (!ctx || retval) {
72                 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
73                 return NULL;
74         }
75         /*
76          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79          * each packet, memset is costlier than dcbz_64().
80          */
81         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85
86         ctx->ctx_pool = ses->ctx_pool;
87         ctx->vtop_offset = (size_t) ctx
88                                 - rte_mempool_virt2iova(ctx);
89
90         return ctx;
91 }
92
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
95 {
96         const struct rte_memseg *memseg = rte_eal_get_physmem_layout();
97         uint64_t vaddr_64, paddr;
98         int i;
99
100         vaddr_64 = (size_t)vaddr;
101         for (i = 0; i < RTE_MAX_MEMSEG && memseg[i].addr_64 != 0; i++) {
102                 if (vaddr_64 >= memseg[i].addr_64 &&
103                     vaddr_64 < memseg[i].addr_64 + memseg[i].len) {
104                         paddr = memseg[i].iova +
105                                 (vaddr_64 - memseg[i].addr_64);
106
107                         return (rte_iova_t)paddr;
108                 }
109         }
110         return (size_t)NULL;
111 }
112
113 /* virtual address conversin when mempool support is available for ctx */
114 static inline phys_addr_t
115 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
116 {
117         return (size_t)vaddr - ctx->vtop_offset;
118 }
119
120 static inline void *
121 dpaa_mem_ptov(rte_iova_t paddr)
122 {
123         return rte_mem_iova2virt(paddr);
124 }
125
126 static void
127 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
128                    struct qman_fq *fq,
129                    const struct qm_mr_entry *msg)
130 {
131         RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
132                    fq->fqid, msg->ern.rc, msg->ern.seqnum);
133 }
134
135 /* initialize the queue with dest chan as caam chan so that
136  * all the packets in this queue could be dispatched into caam
137  */
138 static int
139 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
140                  uint32_t fqid_out)
141 {
142         struct qm_mcc_initfq fq_opts;
143         uint32_t flags;
144         int ret = -1;
145
146         /* Clear FQ options */
147         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
148
149         flags = QMAN_INITFQ_FLAG_SCHED;
150         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
151                           QM_INITFQ_WE_CONTEXTB;
152
153         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
154         fq_opts.fqd.context_b = fqid_out;
155         fq_opts.fqd.dest.channel = qm_channel_caam;
156         fq_opts.fqd.dest.wq = 0;
157
158         fq_in->cb.ern  = ern_sec_fq_handler;
159
160         PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
161
162         ret = qman_init_fq(fq_in, flags, &fq_opts);
163         if (unlikely(ret != 0))
164                 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
165
166         return ret;
167 }
168
169 /* something is put into in_fq and caam put the crypto result into out_fq */
170 static enum qman_cb_dqrr_result
171 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
172                   struct qman_fq *fq __always_unused,
173                   const struct qm_dqrr_entry *dqrr)
174 {
175         const struct qm_fd *fd;
176         struct dpaa_sec_job *job;
177         struct dpaa_sec_op_ctx *ctx;
178
179         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
180                 return qman_cb_dqrr_defer;
181
182         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
183                 return qman_cb_dqrr_consume;
184
185         fd = &dqrr->fd;
186         /* sg is embedded in an op ctx,
187          * sg[0] is for output
188          * sg[1] for input
189          */
190         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
191
192         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
193         ctx->fd_status = fd->status;
194         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
195                 struct qm_sg_entry *sg_out;
196                 uint32_t len;
197
198                 sg_out = &job->sg[0];
199                 hw_sg_to_cpu(sg_out);
200                 len = sg_out->length;
201                 ctx->op->sym->m_src->pkt_len = len;
202                 ctx->op->sym->m_src->data_len = len;
203         }
204         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
205         dpaa_sec_op_ending(ctx);
206
207         return qman_cb_dqrr_consume;
208 }
209
210 /* caam result is put into this queue */
211 static int
212 dpaa_sec_init_tx(struct qman_fq *fq)
213 {
214         int ret;
215         struct qm_mcc_initfq opts;
216         uint32_t flags;
217
218         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
219                 QMAN_FQ_FLAG_DYNAMIC_FQID;
220
221         ret = qman_create_fq(0, flags, fq);
222         if (unlikely(ret)) {
223                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
224                 return ret;
225         }
226
227         memset(&opts, 0, sizeof(opts));
228         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
229                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
230
231         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
232
233         fq->cb.dqrr = dqrr_out_fq_cb_rx;
234         fq->cb.ern  = ern_sec_fq_handler;
235
236         ret = qman_init_fq(fq, 0, &opts);
237         if (unlikely(ret)) {
238                 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
239                 return ret;
240         }
241
242         return ret;
243 }
244
245 static inline int is_cipher_only(dpaa_sec_session *ses)
246 {
247         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
248                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
249 }
250
251 static inline int is_auth_only(dpaa_sec_session *ses)
252 {
253         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
254                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
255 }
256
257 static inline int is_aead(dpaa_sec_session *ses)
258 {
259         return ((ses->cipher_alg == 0) &&
260                 (ses->auth_alg == 0) &&
261                 (ses->aead_alg != 0));
262 }
263
264 static inline int is_auth_cipher(dpaa_sec_session *ses)
265 {
266         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
267                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
268                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
269 }
270
271 static inline int is_proto_ipsec(dpaa_sec_session *ses)
272 {
273         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
274 }
275
276 static inline int is_encode(dpaa_sec_session *ses)
277 {
278         return ses->dir == DIR_ENC;
279 }
280
281 static inline int is_decode(dpaa_sec_session *ses)
282 {
283         return ses->dir == DIR_DEC;
284 }
285
286 static inline void
287 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
288 {
289         switch (ses->auth_alg) {
290         case RTE_CRYPTO_AUTH_NULL:
291                 ses->digest_length = 0;
292                 break;
293         case RTE_CRYPTO_AUTH_MD5_HMAC:
294                 alginfo_a->algtype =
295                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
296                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
297                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
298                 break;
299         case RTE_CRYPTO_AUTH_SHA1_HMAC:
300                 alginfo_a->algtype =
301                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
302                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
303                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
304                 break;
305         case RTE_CRYPTO_AUTH_SHA224_HMAC:
306                 alginfo_a->algtype =
307                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
308                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
309                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
310                 break;
311         case RTE_CRYPTO_AUTH_SHA256_HMAC:
312                 alginfo_a->algtype =
313                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
314                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
315                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
316                 break;
317         case RTE_CRYPTO_AUTH_SHA384_HMAC:
318                 alginfo_a->algtype =
319                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
320                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
321                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
322                 break;
323         case RTE_CRYPTO_AUTH_SHA512_HMAC:
324                 alginfo_a->algtype =
325                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
326                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
327                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
328                 break;
329         default:
330                 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
331         }
332 }
333
334 static inline void
335 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
336 {
337         switch (ses->cipher_alg) {
338         case RTE_CRYPTO_CIPHER_NULL:
339                 break;
340         case RTE_CRYPTO_CIPHER_AES_CBC:
341                 alginfo_c->algtype =
342                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
343                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
344                 alginfo_c->algmode = OP_ALG_AAI_CBC;
345                 break;
346         case RTE_CRYPTO_CIPHER_3DES_CBC:
347                 alginfo_c->algtype =
348                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
349                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
350                 alginfo_c->algmode = OP_ALG_AAI_CBC;
351                 break;
352         case RTE_CRYPTO_CIPHER_AES_CTR:
353                 alginfo_c->algtype =
354                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
355                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
356                 alginfo_c->algmode = OP_ALG_AAI_CTR;
357                 break;
358         default:
359                 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
360         }
361 }
362
363 static inline void
364 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
365 {
366         switch (ses->aead_alg) {
367         case RTE_CRYPTO_AEAD_AES_GCM:
368                 alginfo->algtype = OP_ALG_ALGSEL_AES;
369                 alginfo->algmode = OP_ALG_AAI_GCM;
370                 break;
371         default:
372                 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
373         }
374 }
375
376
377 /* prepare command block of the session */
378 static int
379 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
380 {
381         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
382         uint32_t shared_desc_len = 0;
383         struct sec_cdb *cdb = &ses->cdb;
384         int err;
385 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
386         int swap = false;
387 #else
388         int swap = true;
389 #endif
390
391         memset(cdb, 0, sizeof(struct sec_cdb));
392
393         if (is_cipher_only(ses)) {
394                 caam_cipher_alg(ses, &alginfo_c);
395                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
396                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
397                         return -ENOTSUP;
398                 }
399
400                 alginfo_c.key = (size_t)ses->cipher_key.data;
401                 alginfo_c.keylen = ses->cipher_key.length;
402                 alginfo_c.key_enc_flags = 0;
403                 alginfo_c.key_type = RTA_DATA_IMM;
404
405                 shared_desc_len = cnstr_shdsc_blkcipher(
406                                                 cdb->sh_desc, true,
407                                                 swap, &alginfo_c,
408                                                 NULL,
409                                                 ses->iv.length,
410                                                 ses->dir);
411         } else if (is_auth_only(ses)) {
412                 caam_auth_alg(ses, &alginfo_a);
413                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
414                         PMD_TX_LOG(ERR, "not supported auth alg\n");
415                         return -ENOTSUP;
416                 }
417
418                 alginfo_a.key = (size_t)ses->auth_key.data;
419                 alginfo_a.keylen = ses->auth_key.length;
420                 alginfo_a.key_enc_flags = 0;
421                 alginfo_a.key_type = RTA_DATA_IMM;
422
423                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
424                                                    swap, &alginfo_a,
425                                                    !ses->dir,
426                                                    ses->digest_length);
427         } else if (is_aead(ses)) {
428                 caam_aead_alg(ses, &alginfo);
429                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
430                         PMD_TX_LOG(ERR, "not supported aead alg\n");
431                         return -ENOTSUP;
432                 }
433                 alginfo.key = (size_t)ses->aead_key.data;
434                 alginfo.keylen = ses->aead_key.length;
435                 alginfo.key_enc_flags = 0;
436                 alginfo.key_type = RTA_DATA_IMM;
437
438                 if (ses->dir == DIR_ENC)
439                         shared_desc_len = cnstr_shdsc_gcm_encap(
440                                         cdb->sh_desc, true, swap,
441                                         &alginfo,
442                                         ses->iv.length,
443                                         ses->digest_length);
444                 else
445                         shared_desc_len = cnstr_shdsc_gcm_decap(
446                                         cdb->sh_desc, true, swap,
447                                         &alginfo,
448                                         ses->iv.length,
449                                         ses->digest_length);
450         } else {
451                 caam_cipher_alg(ses, &alginfo_c);
452                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
453                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
454                         return -ENOTSUP;
455                 }
456
457                 alginfo_c.key = (size_t)ses->cipher_key.data;
458                 alginfo_c.keylen = ses->cipher_key.length;
459                 alginfo_c.key_enc_flags = 0;
460                 alginfo_c.key_type = RTA_DATA_IMM;
461
462                 caam_auth_alg(ses, &alginfo_a);
463                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
464                         PMD_TX_LOG(ERR, "not supported auth alg\n");
465                         return -ENOTSUP;
466                 }
467
468                 alginfo_a.key = (size_t)ses->auth_key.data;
469                 alginfo_a.keylen = ses->auth_key.length;
470                 alginfo_a.key_enc_flags = 0;
471                 alginfo_a.key_type = RTA_DATA_IMM;
472
473                 cdb->sh_desc[0] = alginfo_c.keylen;
474                 cdb->sh_desc[1] = alginfo_a.keylen;
475                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
476                                        MIN_JOB_DESC_SIZE,
477                                        (unsigned int *)cdb->sh_desc,
478                                        &cdb->sh_desc[2], 2);
479
480                 if (err < 0) {
481                         PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
482                         return err;
483                 }
484                 if (cdb->sh_desc[2] & 1)
485                         alginfo_c.key_type = RTA_DATA_IMM;
486                 else {
487                         alginfo_c.key = (size_t)dpaa_mem_vtop(
488                                                 (void *)(size_t)alginfo_c.key);
489                         alginfo_c.key_type = RTA_DATA_PTR;
490                 }
491                 if (cdb->sh_desc[2] & (1<<1))
492                         alginfo_a.key_type = RTA_DATA_IMM;
493                 else {
494                         alginfo_a.key = (size_t)dpaa_mem_vtop(
495                                                 (void *)(size_t)alginfo_a.key);
496                         alginfo_a.key_type = RTA_DATA_PTR;
497                 }
498                 cdb->sh_desc[0] = 0;
499                 cdb->sh_desc[1] = 0;
500                 cdb->sh_desc[2] = 0;
501                 if (is_proto_ipsec(ses)) {
502                         if (ses->dir == DIR_ENC) {
503                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
504                                                 cdb->sh_desc,
505                                                 true, swap, &ses->encap_pdb,
506                                                 (uint8_t *)&ses->ip4_hdr,
507                                                 &alginfo_c, &alginfo_a);
508                         } else if (ses->dir == DIR_DEC) {
509                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
510                                                 cdb->sh_desc,
511                                                 true, swap, &ses->decap_pdb,
512                                                 &alginfo_c, &alginfo_a);
513                         }
514                 } else {
515                         /* Auth_only_len is set as 0 here and it will be
516                          * overwritten in fd for each packet.
517                          */
518                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
519                                         true, swap, &alginfo_c, &alginfo_a,
520                                         ses->iv.length, 0,
521                                         ses->digest_length, ses->dir);
522                 }
523         }
524         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
525         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
526         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
527
528         return 0;
529 }
530
531 /* qp is lockless, should be accessed by only one thread */
532 static int
533 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
534 {
535         struct qman_fq *fq;
536         unsigned int pkts = 0;
537         int ret;
538         struct qm_dqrr_entry *dq;
539
540         fq = &qp->outq;
541         ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
542                                 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
543         if (ret)
544                 return 0;
545
546         do {
547                 const struct qm_fd *fd;
548                 struct dpaa_sec_job *job;
549                 struct dpaa_sec_op_ctx *ctx;
550                 struct rte_crypto_op *op;
551
552                 dq = qman_dequeue(fq);
553                 if (!dq)
554                         continue;
555
556                 fd = &dq->fd;
557                 /* sg is embedded in an op ctx,
558                  * sg[0] is for output
559                  * sg[1] for input
560                  */
561                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
562
563                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
564                 ctx->fd_status = fd->status;
565                 op = ctx->op;
566                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
567                         struct qm_sg_entry *sg_out;
568                         uint32_t len;
569
570                         sg_out = &job->sg[0];
571                         hw_sg_to_cpu(sg_out);
572                         len = sg_out->length;
573                         op->sym->m_src->pkt_len = len;
574                         op->sym->m_src->data_len = len;
575                 }
576                 if (!ctx->fd_status) {
577                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
578                 } else {
579                         printf("\nSEC return err: 0x%x", ctx->fd_status);
580                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
581                 }
582                 ops[pkts++] = op;
583
584                 /* report op status to sym->op and then free the ctx memeory */
585                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
586
587                 qman_dqrr_consume(fq, dq);
588         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
589
590         return pkts;
591 }
592
593 static inline struct dpaa_sec_job *
594 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
595 {
596         struct rte_crypto_sym_op *sym = op->sym;
597         struct rte_mbuf *mbuf = sym->m_src;
598         struct dpaa_sec_job *cf;
599         struct dpaa_sec_op_ctx *ctx;
600         struct qm_sg_entry *sg, *out_sg, *in_sg;
601         phys_addr_t start_addr;
602         uint8_t *old_digest, extra_segs;
603
604         if (is_decode(ses))
605                 extra_segs = 3;
606         else
607                 extra_segs = 2;
608
609         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
610                 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
611                                                                 MAX_SG_ENTRIES);
612                 return NULL;
613         }
614         ctx = dpaa_sec_alloc_ctx(ses);
615         if (!ctx)
616                 return NULL;
617
618         cf = &ctx->job;
619         ctx->op = op;
620         old_digest = ctx->digest;
621
622         /* output */
623         out_sg = &cf->sg[0];
624         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
625         out_sg->length = ses->digest_length;
626         cpu_to_hw_sg(out_sg);
627
628         /* input */
629         in_sg = &cf->sg[1];
630         /* need to extend the input to a compound frame */
631         in_sg->extension = 1;
632         in_sg->final = 1;
633         in_sg->length = sym->auth.data.length;
634         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
635
636         /* 1st seg */
637         sg = in_sg + 1;
638         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
639         sg->length = mbuf->data_len - sym->auth.data.offset;
640         sg->offset = sym->auth.data.offset;
641
642         /* Successive segs */
643         mbuf = mbuf->next;
644         while (mbuf) {
645                 cpu_to_hw_sg(sg);
646                 sg++;
647                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
648                 sg->length = mbuf->data_len;
649                 mbuf = mbuf->next;
650         }
651
652         if (is_decode(ses)) {
653                 /* Digest verification case */
654                 cpu_to_hw_sg(sg);
655                 sg++;
656                 rte_memcpy(old_digest, sym->auth.digest.data,
657                                 ses->digest_length);
658                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
659                 qm_sg_entry_set64(sg, start_addr);
660                 sg->length = ses->digest_length;
661                 in_sg->length += ses->digest_length;
662         } else {
663                 /* Digest calculation case */
664                 sg->length -= ses->digest_length;
665         }
666         sg->final = 1;
667         cpu_to_hw_sg(sg);
668         cpu_to_hw_sg(in_sg);
669
670         return cf;
671 }
672
673 /**
674  * packet looks like:
675  *              |<----data_len------->|
676  *    |ip_header|ah_header|icv|payload|
677  *              ^
678  *              |
679  *         mbuf->pkt.data
680  */
681 static inline struct dpaa_sec_job *
682 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
683 {
684         struct rte_crypto_sym_op *sym = op->sym;
685         struct rte_mbuf *mbuf = sym->m_src;
686         struct dpaa_sec_job *cf;
687         struct dpaa_sec_op_ctx *ctx;
688         struct qm_sg_entry *sg;
689         rte_iova_t start_addr;
690         uint8_t *old_digest;
691
692         ctx = dpaa_sec_alloc_ctx(ses);
693         if (!ctx)
694                 return NULL;
695
696         cf = &ctx->job;
697         ctx->op = op;
698         old_digest = ctx->digest;
699
700         start_addr = rte_pktmbuf_iova(mbuf);
701         /* output */
702         sg = &cf->sg[0];
703         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
704         sg->length = ses->digest_length;
705         cpu_to_hw_sg(sg);
706
707         /* input */
708         sg = &cf->sg[1];
709         if (is_decode(ses)) {
710                 /* need to extend the input to a compound frame */
711                 sg->extension = 1;
712                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
713                 sg->length = sym->auth.data.length + ses->digest_length;
714                 sg->final = 1;
715                 cpu_to_hw_sg(sg);
716
717                 sg = &cf->sg[2];
718                 /* hash result or digest, save digest first */
719                 rte_memcpy(old_digest, sym->auth.digest.data,
720                            ses->digest_length);
721                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
722                 sg->length = sym->auth.data.length;
723                 cpu_to_hw_sg(sg);
724
725                 /* let's check digest by hw */
726                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
727                 sg++;
728                 qm_sg_entry_set64(sg, start_addr);
729                 sg->length = ses->digest_length;
730                 sg->final = 1;
731                 cpu_to_hw_sg(sg);
732         } else {
733                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
734                 sg->length = sym->auth.data.length;
735                 sg->final = 1;
736                 cpu_to_hw_sg(sg);
737         }
738
739         return cf;
740 }
741
742 static inline struct dpaa_sec_job *
743 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
744 {
745         struct rte_crypto_sym_op *sym = op->sym;
746         struct dpaa_sec_job *cf;
747         struct dpaa_sec_op_ctx *ctx;
748         struct qm_sg_entry *sg, *out_sg, *in_sg;
749         struct rte_mbuf *mbuf;
750         uint8_t req_segs;
751         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
752                         ses->iv.offset);
753
754         if (sym->m_dst) {
755                 mbuf = sym->m_dst;
756                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
757         } else {
758                 mbuf = sym->m_src;
759                 req_segs = mbuf->nb_segs * 2 + 3;
760         }
761
762         if (req_segs > MAX_SG_ENTRIES) {
763                 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
764                                                                 MAX_SG_ENTRIES);
765                 return NULL;
766         }
767
768         ctx = dpaa_sec_alloc_ctx(ses);
769         if (!ctx)
770                 return NULL;
771
772         cf = &ctx->job;
773         ctx->op = op;
774
775         /* output */
776         out_sg = &cf->sg[0];
777         out_sg->extension = 1;
778         out_sg->length = sym->cipher.data.length;
779         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
780         cpu_to_hw_sg(out_sg);
781
782         /* 1st seg */
783         sg = &cf->sg[2];
784         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
785         sg->length = mbuf->data_len - sym->cipher.data.offset;
786         sg->offset = sym->cipher.data.offset;
787
788         /* Successive segs */
789         mbuf = mbuf->next;
790         while (mbuf) {
791                 cpu_to_hw_sg(sg);
792                 sg++;
793                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
794                 sg->length = mbuf->data_len;
795                 mbuf = mbuf->next;
796         }
797         sg->final = 1;
798         cpu_to_hw_sg(sg);
799
800         /* input */
801         mbuf = sym->m_src;
802         in_sg = &cf->sg[1];
803         in_sg->extension = 1;
804         in_sg->final = 1;
805         in_sg->length = sym->cipher.data.length + ses->iv.length;
806
807         sg++;
808         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
809         cpu_to_hw_sg(in_sg);
810
811         /* IV */
812         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
813         sg->length = ses->iv.length;
814         cpu_to_hw_sg(sg);
815
816         /* 1st seg */
817         sg++;
818         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
819         sg->length = mbuf->data_len - sym->cipher.data.offset;
820         sg->offset = sym->cipher.data.offset;
821
822         /* Successive segs */
823         mbuf = mbuf->next;
824         while (mbuf) {
825                 cpu_to_hw_sg(sg);
826                 sg++;
827                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
828                 sg->length = mbuf->data_len;
829                 mbuf = mbuf->next;
830         }
831         sg->final = 1;
832         cpu_to_hw_sg(sg);
833
834         return cf;
835 }
836
837 static inline struct dpaa_sec_job *
838 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
839 {
840         struct rte_crypto_sym_op *sym = op->sym;
841         struct dpaa_sec_job *cf;
842         struct dpaa_sec_op_ctx *ctx;
843         struct qm_sg_entry *sg;
844         rte_iova_t src_start_addr, dst_start_addr;
845         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
846                         ses->iv.offset);
847
848         ctx = dpaa_sec_alloc_ctx(ses);
849         if (!ctx)
850                 return NULL;
851
852         cf = &ctx->job;
853         ctx->op = op;
854
855         src_start_addr = rte_pktmbuf_iova(sym->m_src);
856
857         if (sym->m_dst)
858                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
859         else
860                 dst_start_addr = src_start_addr;
861
862         /* output */
863         sg = &cf->sg[0];
864         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
865         sg->length = sym->cipher.data.length + ses->iv.length;
866         cpu_to_hw_sg(sg);
867
868         /* input */
869         sg = &cf->sg[1];
870
871         /* need to extend the input to a compound frame */
872         sg->extension = 1;
873         sg->final = 1;
874         sg->length = sym->cipher.data.length + ses->iv.length;
875         qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
876         cpu_to_hw_sg(sg);
877
878         sg = &cf->sg[2];
879         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
880         sg->length = ses->iv.length;
881         cpu_to_hw_sg(sg);
882
883         sg++;
884         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
885         sg->length = sym->cipher.data.length;
886         sg->final = 1;
887         cpu_to_hw_sg(sg);
888
889         return cf;
890 }
891
892 static inline struct dpaa_sec_job *
893 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
894 {
895         struct rte_crypto_sym_op *sym = op->sym;
896         struct dpaa_sec_job *cf;
897         struct dpaa_sec_op_ctx *ctx;
898         struct qm_sg_entry *sg, *out_sg, *in_sg;
899         struct rte_mbuf *mbuf;
900         uint8_t req_segs;
901         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
902                         ses->iv.offset);
903
904         if (sym->m_dst) {
905                 mbuf = sym->m_dst;
906                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
907         } else {
908                 mbuf = sym->m_src;
909                 req_segs = mbuf->nb_segs * 2 + 4;
910         }
911
912         if (ses->auth_only_len)
913                 req_segs++;
914
915         if (req_segs > MAX_SG_ENTRIES) {
916                 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
917                                 MAX_SG_ENTRIES);
918                 return NULL;
919         }
920
921         ctx = dpaa_sec_alloc_ctx(ses);
922         if (!ctx)
923                 return NULL;
924
925         cf = &ctx->job;
926         ctx->op = op;
927
928         rte_prefetch0(cf->sg);
929
930         /* output */
931         out_sg = &cf->sg[0];
932         out_sg->extension = 1;
933         if (is_encode(ses))
934                 out_sg->length = sym->aead.data.length + ses->auth_only_len
935                                                 + ses->digest_length;
936         else
937                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
938
939         /* output sg entries */
940         sg = &cf->sg[2];
941         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
942         cpu_to_hw_sg(out_sg);
943
944         /* 1st seg */
945         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
946         sg->length = mbuf->data_len - sym->aead.data.offset +
947                                         ses->auth_only_len;
948         sg->offset = sym->aead.data.offset - ses->auth_only_len;
949
950         /* Successive segs */
951         mbuf = mbuf->next;
952         while (mbuf) {
953                 cpu_to_hw_sg(sg);
954                 sg++;
955                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
956                 sg->length = mbuf->data_len;
957                 mbuf = mbuf->next;
958         }
959         sg->length -= ses->digest_length;
960
961         if (is_encode(ses)) {
962                 cpu_to_hw_sg(sg);
963                 /* set auth output */
964                 sg++;
965                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
966                 sg->length = ses->digest_length;
967         }
968         sg->final = 1;
969         cpu_to_hw_sg(sg);
970
971         /* input */
972         mbuf = sym->m_src;
973         in_sg = &cf->sg[1];
974         in_sg->extension = 1;
975         in_sg->final = 1;
976         if (is_encode(ses))
977                 in_sg->length = ses->iv.length + sym->aead.data.length
978                                                         + ses->auth_only_len;
979         else
980                 in_sg->length = ses->iv.length + sym->aead.data.length
981                                 + ses->auth_only_len + ses->digest_length;
982
983         /* input sg entries */
984         sg++;
985         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
986         cpu_to_hw_sg(in_sg);
987
988         /* 1st seg IV */
989         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
990         sg->length = ses->iv.length;
991         cpu_to_hw_sg(sg);
992
993         /* 2nd seg auth only */
994         if (ses->auth_only_len) {
995                 sg++;
996                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
997                 sg->length = ses->auth_only_len;
998                 cpu_to_hw_sg(sg);
999         }
1000
1001         /* 3rd seg */
1002         sg++;
1003         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1004         sg->length = mbuf->data_len - sym->aead.data.offset;
1005         sg->offset = sym->aead.data.offset;
1006
1007         /* Successive segs */
1008         mbuf = mbuf->next;
1009         while (mbuf) {
1010                 cpu_to_hw_sg(sg);
1011                 sg++;
1012                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1013                 sg->length = mbuf->data_len;
1014                 mbuf = mbuf->next;
1015         }
1016
1017         if (is_decode(ses)) {
1018                 cpu_to_hw_sg(sg);
1019                 sg++;
1020                 memcpy(ctx->digest, sym->aead.digest.data,
1021                         ses->digest_length);
1022                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1023                 sg->length = ses->digest_length;
1024         }
1025         sg->final = 1;
1026         cpu_to_hw_sg(sg);
1027
1028         return cf;
1029 }
1030
1031 static inline struct dpaa_sec_job *
1032 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1033 {
1034         struct rte_crypto_sym_op *sym = op->sym;
1035         struct dpaa_sec_job *cf;
1036         struct dpaa_sec_op_ctx *ctx;
1037         struct qm_sg_entry *sg;
1038         uint32_t length = 0;
1039         rte_iova_t src_start_addr, dst_start_addr;
1040         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1041                         ses->iv.offset);
1042
1043         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1044
1045         if (sym->m_dst)
1046                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1047         else
1048                 dst_start_addr = src_start_addr;
1049
1050         ctx = dpaa_sec_alloc_ctx(ses);
1051         if (!ctx)
1052                 return NULL;
1053
1054         cf = &ctx->job;
1055         ctx->op = op;
1056
1057         /* input */
1058         rte_prefetch0(cf->sg);
1059         sg = &cf->sg[2];
1060         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1061         if (is_encode(ses)) {
1062                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1063                 sg->length = ses->iv.length;
1064                 length += sg->length;
1065                 cpu_to_hw_sg(sg);
1066
1067                 sg++;
1068                 if (ses->auth_only_len) {
1069                         qm_sg_entry_set64(sg,
1070                                           dpaa_mem_vtop(sym->aead.aad.data));
1071                         sg->length = ses->auth_only_len;
1072                         length += sg->length;
1073                         cpu_to_hw_sg(sg);
1074                         sg++;
1075                 }
1076                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1077                 sg->length = sym->aead.data.length;
1078                 length += sg->length;
1079                 sg->final = 1;
1080                 cpu_to_hw_sg(sg);
1081         } else {
1082                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1083                 sg->length = ses->iv.length;
1084                 length += sg->length;
1085                 cpu_to_hw_sg(sg);
1086
1087                 sg++;
1088                 if (ses->auth_only_len) {
1089                         qm_sg_entry_set64(sg,
1090                                           dpaa_mem_vtop(sym->aead.aad.data));
1091                         sg->length = ses->auth_only_len;
1092                         length += sg->length;
1093                         cpu_to_hw_sg(sg);
1094                         sg++;
1095                 }
1096                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1097                 sg->length = sym->aead.data.length;
1098                 length += sg->length;
1099                 cpu_to_hw_sg(sg);
1100
1101                 memcpy(ctx->digest, sym->aead.digest.data,
1102                        ses->digest_length);
1103                 sg++;
1104
1105                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1106                 sg->length = ses->digest_length;
1107                 length += sg->length;
1108                 sg->final = 1;
1109                 cpu_to_hw_sg(sg);
1110         }
1111         /* input compound frame */
1112         cf->sg[1].length = length;
1113         cf->sg[1].extension = 1;
1114         cf->sg[1].final = 1;
1115         cpu_to_hw_sg(&cf->sg[1]);
1116
1117         /* output */
1118         sg++;
1119         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1120         qm_sg_entry_set64(sg,
1121                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1122         sg->length = sym->aead.data.length + ses->auth_only_len;
1123         length = sg->length;
1124         if (is_encode(ses)) {
1125                 cpu_to_hw_sg(sg);
1126                 /* set auth output */
1127                 sg++;
1128                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1129                 sg->length = ses->digest_length;
1130                 length += sg->length;
1131         }
1132         sg->final = 1;
1133         cpu_to_hw_sg(sg);
1134
1135         /* output compound frame */
1136         cf->sg[0].length = length;
1137         cf->sg[0].extension = 1;
1138         cpu_to_hw_sg(&cf->sg[0]);
1139
1140         return cf;
1141 }
1142
1143 static inline struct dpaa_sec_job *
1144 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1145 {
1146         struct rte_crypto_sym_op *sym = op->sym;
1147         struct dpaa_sec_job *cf;
1148         struct dpaa_sec_op_ctx *ctx;
1149         struct qm_sg_entry *sg, *out_sg, *in_sg;
1150         struct rte_mbuf *mbuf;
1151         uint8_t req_segs;
1152         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1153                         ses->iv.offset);
1154
1155         if (sym->m_dst) {
1156                 mbuf = sym->m_dst;
1157                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1158         } else {
1159                 mbuf = sym->m_src;
1160                 req_segs = mbuf->nb_segs * 2 + 4;
1161         }
1162
1163         if (req_segs > MAX_SG_ENTRIES) {
1164                 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1165                                 MAX_SG_ENTRIES);
1166                 return NULL;
1167         }
1168
1169         ctx = dpaa_sec_alloc_ctx(ses);
1170         if (!ctx)
1171                 return NULL;
1172
1173         cf = &ctx->job;
1174         ctx->op = op;
1175
1176         rte_prefetch0(cf->sg);
1177
1178         /* output */
1179         out_sg = &cf->sg[0];
1180         out_sg->extension = 1;
1181         if (is_encode(ses))
1182                 out_sg->length = sym->auth.data.length + ses->digest_length;
1183         else
1184                 out_sg->length = sym->auth.data.length;
1185
1186         /* output sg entries */
1187         sg = &cf->sg[2];
1188         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1189         cpu_to_hw_sg(out_sg);
1190
1191         /* 1st seg */
1192         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1193         sg->length = mbuf->data_len - sym->auth.data.offset;
1194         sg->offset = sym->auth.data.offset;
1195
1196         /* Successive segs */
1197         mbuf = mbuf->next;
1198         while (mbuf) {
1199                 cpu_to_hw_sg(sg);
1200                 sg++;
1201                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1202                 sg->length = mbuf->data_len;
1203                 mbuf = mbuf->next;
1204         }
1205         sg->length -= ses->digest_length;
1206
1207         if (is_encode(ses)) {
1208                 cpu_to_hw_sg(sg);
1209                 /* set auth output */
1210                 sg++;
1211                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1212                 sg->length = ses->digest_length;
1213         }
1214         sg->final = 1;
1215         cpu_to_hw_sg(sg);
1216
1217         /* input */
1218         mbuf = sym->m_src;
1219         in_sg = &cf->sg[1];
1220         in_sg->extension = 1;
1221         in_sg->final = 1;
1222         if (is_encode(ses))
1223                 in_sg->length = ses->iv.length + sym->auth.data.length;
1224         else
1225                 in_sg->length = ses->iv.length + sym->auth.data.length
1226                                                 + ses->digest_length;
1227
1228         /* input sg entries */
1229         sg++;
1230         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1231         cpu_to_hw_sg(in_sg);
1232
1233         /* 1st seg IV */
1234         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1235         sg->length = ses->iv.length;
1236         cpu_to_hw_sg(sg);
1237
1238         /* 2nd seg */
1239         sg++;
1240         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1241         sg->length = mbuf->data_len - sym->auth.data.offset;
1242         sg->offset = sym->auth.data.offset;
1243
1244         /* Successive segs */
1245         mbuf = mbuf->next;
1246         while (mbuf) {
1247                 cpu_to_hw_sg(sg);
1248                 sg++;
1249                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1250                 sg->length = mbuf->data_len;
1251                 mbuf = mbuf->next;
1252         }
1253
1254         sg->length -= ses->digest_length;
1255         if (is_decode(ses)) {
1256                 cpu_to_hw_sg(sg);
1257                 sg++;
1258                 memcpy(ctx->digest, sym->auth.digest.data,
1259                         ses->digest_length);
1260                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1261                 sg->length = ses->digest_length;
1262         }
1263         sg->final = 1;
1264         cpu_to_hw_sg(sg);
1265
1266         return cf;
1267 }
1268
1269 static inline struct dpaa_sec_job *
1270 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1271 {
1272         struct rte_crypto_sym_op *sym = op->sym;
1273         struct dpaa_sec_job *cf;
1274         struct dpaa_sec_op_ctx *ctx;
1275         struct qm_sg_entry *sg;
1276         rte_iova_t src_start_addr, dst_start_addr;
1277         uint32_t length = 0;
1278         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1279                         ses->iv.offset);
1280
1281         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1282         if (sym->m_dst)
1283                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1284         else
1285                 dst_start_addr = src_start_addr;
1286
1287         ctx = dpaa_sec_alloc_ctx(ses);
1288         if (!ctx)
1289                 return NULL;
1290
1291         cf = &ctx->job;
1292         ctx->op = op;
1293
1294         /* input */
1295         rte_prefetch0(cf->sg);
1296         sg = &cf->sg[2];
1297         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1298         if (is_encode(ses)) {
1299                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1300                 sg->length = ses->iv.length;
1301                 length += sg->length;
1302                 cpu_to_hw_sg(sg);
1303
1304                 sg++;
1305                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1306                 sg->length = sym->auth.data.length;
1307                 length += sg->length;
1308                 sg->final = 1;
1309                 cpu_to_hw_sg(sg);
1310         } else {
1311                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1312                 sg->length = ses->iv.length;
1313                 length += sg->length;
1314                 cpu_to_hw_sg(sg);
1315
1316                 sg++;
1317
1318                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1319                 sg->length = sym->auth.data.length;
1320                 length += sg->length;
1321                 cpu_to_hw_sg(sg);
1322
1323                 memcpy(ctx->digest, sym->auth.digest.data,
1324                        ses->digest_length);
1325                 sg++;
1326
1327                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1328                 sg->length = ses->digest_length;
1329                 length += sg->length;
1330                 sg->final = 1;
1331                 cpu_to_hw_sg(sg);
1332         }
1333         /* input compound frame */
1334         cf->sg[1].length = length;
1335         cf->sg[1].extension = 1;
1336         cf->sg[1].final = 1;
1337         cpu_to_hw_sg(&cf->sg[1]);
1338
1339         /* output */
1340         sg++;
1341         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1342         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1343         sg->length = sym->cipher.data.length;
1344         length = sg->length;
1345         if (is_encode(ses)) {
1346                 cpu_to_hw_sg(sg);
1347                 /* set auth output */
1348                 sg++;
1349                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1350                 sg->length = ses->digest_length;
1351                 length += sg->length;
1352         }
1353         sg->final = 1;
1354         cpu_to_hw_sg(sg);
1355
1356         /* output compound frame */
1357         cf->sg[0].length = length;
1358         cf->sg[0].extension = 1;
1359         cpu_to_hw_sg(&cf->sg[0]);
1360
1361         return cf;
1362 }
1363
1364 static inline struct dpaa_sec_job *
1365 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1366 {
1367         struct rte_crypto_sym_op *sym = op->sym;
1368         struct dpaa_sec_job *cf;
1369         struct dpaa_sec_op_ctx *ctx;
1370         struct qm_sg_entry *sg;
1371         phys_addr_t src_start_addr, dst_start_addr;
1372
1373         ctx = dpaa_sec_alloc_ctx(ses);
1374         if (!ctx)
1375                 return NULL;
1376         cf = &ctx->job;
1377         ctx->op = op;
1378
1379         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1380
1381         if (sym->m_dst)
1382                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1383         else
1384                 dst_start_addr = src_start_addr;
1385
1386         /* input */
1387         sg = &cf->sg[1];
1388         qm_sg_entry_set64(sg, src_start_addr);
1389         sg->length = sym->m_src->pkt_len;
1390         sg->final = 1;
1391         cpu_to_hw_sg(sg);
1392
1393         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1394         /* output */
1395         sg = &cf->sg[0];
1396         qm_sg_entry_set64(sg, dst_start_addr);
1397         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1398         cpu_to_hw_sg(sg);
1399
1400         return cf;
1401 }
1402
1403 static uint16_t
1404 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1405                        uint16_t nb_ops)
1406 {
1407         /* Function to transmit the frames to given device and queuepair */
1408         uint32_t loop;
1409         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1410         uint16_t num_tx = 0;
1411         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1412         uint32_t frames_to_send;
1413         struct rte_crypto_op *op;
1414         struct dpaa_sec_job *cf;
1415         dpaa_sec_session *ses;
1416         struct dpaa_sec_op_ctx *ctx;
1417         uint32_t auth_only_len;
1418         struct qman_fq *inq[DPAA_SEC_BURST];
1419
1420         while (nb_ops) {
1421                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1422                                 DPAA_SEC_BURST : nb_ops;
1423                 for (loop = 0; loop < frames_to_send; loop++) {
1424                         op = *(ops++);
1425                         switch (op->sess_type) {
1426                         case RTE_CRYPTO_OP_WITH_SESSION:
1427                                 ses = (dpaa_sec_session *)
1428                                         get_session_private_data(
1429                                                         op->sym->session,
1430                                                         cryptodev_driver_id);
1431                                 break;
1432                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1433                                 ses = (dpaa_sec_session *)
1434                                         get_sec_session_private_data(
1435                                                         op->sym->sec_session);
1436                                 break;
1437                         default:
1438                                 PMD_TX_LOG(ERR,
1439                                         "sessionless crypto op not supported");
1440                                 frames_to_send = loop;
1441                                 nb_ops = loop;
1442                                 goto send_pkts;
1443                         }
1444                         if (unlikely(!ses->qp || ses->qp != qp)) {
1445                                 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1446                                                 ses->qp, qp);
1447                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1448                                         frames_to_send = loop;
1449                                         nb_ops = loop;
1450                                         goto send_pkts;
1451                                 }
1452                         }
1453
1454                         auth_only_len = op->sym->auth.data.length -
1455                                                 op->sym->cipher.data.length;
1456                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1457                                 if (is_auth_only(ses)) {
1458                                         cf = build_auth_only(op, ses);
1459                                 } else if (is_cipher_only(ses)) {
1460                                         cf = build_cipher_only(op, ses);
1461                                 } else if (is_aead(ses)) {
1462                                         cf = build_cipher_auth_gcm(op, ses);
1463                                         auth_only_len = ses->auth_only_len;
1464                                 } else if (is_auth_cipher(ses)) {
1465                                         cf = build_cipher_auth(op, ses);
1466                                 } else if (is_proto_ipsec(ses)) {
1467                                         cf = build_proto(op, ses);
1468                                 } else {
1469                                         PMD_TX_LOG(ERR, "not supported sec op");
1470                                         frames_to_send = loop;
1471                                         nb_ops = loop;
1472                                         goto send_pkts;
1473                                 }
1474                         } else {
1475                                 if (is_auth_only(ses)) {
1476                                         cf = build_auth_only_sg(op, ses);
1477                                 } else if (is_cipher_only(ses)) {
1478                                         cf = build_cipher_only_sg(op, ses);
1479                                 } else if (is_aead(ses)) {
1480                                         cf = build_cipher_auth_gcm_sg(op, ses);
1481                                         auth_only_len = ses->auth_only_len;
1482                                 } else if (is_auth_cipher(ses)) {
1483                                         cf = build_cipher_auth_sg(op, ses);
1484                                 } else {
1485                                         PMD_TX_LOG(ERR, "not supported sec op");
1486                                         frames_to_send = loop;
1487                                         nb_ops = loop;
1488                                         goto send_pkts;
1489                                 }
1490                         }
1491                         if (unlikely(!cf)) {
1492                                 frames_to_send = loop;
1493                                 nb_ops = loop;
1494                                 goto send_pkts;
1495                         }
1496
1497                         fd = &fds[loop];
1498                         inq[loop] = ses->inq;
1499                         fd->opaque_addr = 0;
1500                         fd->cmd = 0;
1501                         ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1502                         qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1503                         fd->_format1 = qm_fd_compound;
1504                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1505                         /* Auth_only_len is set as 0 in descriptor and it is
1506                          * overwritten here in the fd.cmd which will update
1507                          * the DPOVRD reg.
1508                          */
1509                         if (auth_only_len)
1510                                 fd->cmd = 0x80000000 | auth_only_len;
1511
1512                 }
1513 send_pkts:
1514                 loop = 0;
1515                 while (loop < frames_to_send) {
1516                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1517                                         frames_to_send - loop);
1518                 }
1519                 nb_ops -= frames_to_send;
1520                 num_tx += frames_to_send;
1521         }
1522
1523         dpaa_qp->tx_pkts += num_tx;
1524         dpaa_qp->tx_errs += nb_ops - num_tx;
1525
1526         return num_tx;
1527 }
1528
1529 static uint16_t
1530 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1531                        uint16_t nb_ops)
1532 {
1533         uint16_t num_rx;
1534         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1535
1536         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1537
1538         dpaa_qp->rx_pkts += num_rx;
1539         dpaa_qp->rx_errs += nb_ops - num_rx;
1540
1541         PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1542
1543         return num_rx;
1544 }
1545
1546 /** Release queue pair */
1547 static int
1548 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1549                             uint16_t qp_id)
1550 {
1551         struct dpaa_sec_dev_private *internals;
1552         struct dpaa_sec_qp *qp = NULL;
1553
1554         PMD_INIT_FUNC_TRACE();
1555
1556         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1557
1558         internals = dev->data->dev_private;
1559         if (qp_id >= internals->max_nb_queue_pairs) {
1560                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1561                              internals->max_nb_queue_pairs);
1562                 return -EINVAL;
1563         }
1564
1565         qp = &internals->qps[qp_id];
1566         qp->internals = NULL;
1567         dev->data->queue_pairs[qp_id] = NULL;
1568
1569         return 0;
1570 }
1571
1572 /** Setup a queue pair */
1573 static int
1574 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1575                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1576                 __rte_unused int socket_id,
1577                 __rte_unused struct rte_mempool *session_pool)
1578 {
1579         struct dpaa_sec_dev_private *internals;
1580         struct dpaa_sec_qp *qp = NULL;
1581
1582         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1583                      dev, qp_id, qp_conf);
1584
1585         internals = dev->data->dev_private;
1586         if (qp_id >= internals->max_nb_queue_pairs) {
1587                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1588                              internals->max_nb_queue_pairs);
1589                 return -EINVAL;
1590         }
1591
1592         qp = &internals->qps[qp_id];
1593         qp->internals = internals;
1594         dev->data->queue_pairs[qp_id] = qp;
1595
1596         return 0;
1597 }
1598
1599 /** Start queue pair */
1600 static int
1601 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1602                           __rte_unused uint16_t queue_pair_id)
1603 {
1604         PMD_INIT_FUNC_TRACE();
1605
1606         return 0;
1607 }
1608
1609 /** Stop queue pair */
1610 static int
1611 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1612                          __rte_unused uint16_t queue_pair_id)
1613 {
1614         PMD_INIT_FUNC_TRACE();
1615
1616         return 0;
1617 }
1618
1619 /** Return the number of allocated queue pairs */
1620 static uint32_t
1621 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1622 {
1623         PMD_INIT_FUNC_TRACE();
1624
1625         return dev->data->nb_queue_pairs;
1626 }
1627
1628 /** Returns the size of session structure */
1629 static unsigned int
1630 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1631 {
1632         PMD_INIT_FUNC_TRACE();
1633
1634         return sizeof(dpaa_sec_session);
1635 }
1636
1637 static int
1638 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1639                      struct rte_crypto_sym_xform *xform,
1640                      dpaa_sec_session *session)
1641 {
1642         session->cipher_alg = xform->cipher.algo;
1643         session->iv.length = xform->cipher.iv.length;
1644         session->iv.offset = xform->cipher.iv.offset;
1645         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1646                                                RTE_CACHE_LINE_SIZE);
1647         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1648                 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1649                 return -ENOMEM;
1650         }
1651         session->cipher_key.length = xform->cipher.key.length;
1652
1653         memcpy(session->cipher_key.data, xform->cipher.key.data,
1654                xform->cipher.key.length);
1655         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1656                         DIR_ENC : DIR_DEC;
1657
1658         return 0;
1659 }
1660
1661 static int
1662 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1663                    struct rte_crypto_sym_xform *xform,
1664                    dpaa_sec_session *session)
1665 {
1666         session->auth_alg = xform->auth.algo;
1667         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1668                                              RTE_CACHE_LINE_SIZE);
1669         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1670                 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1671                 return -ENOMEM;
1672         }
1673         session->auth_key.length = xform->auth.key.length;
1674         session->digest_length = xform->auth.digest_length;
1675
1676         memcpy(session->auth_key.data, xform->auth.key.data,
1677                xform->auth.key.length);
1678         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1679                         DIR_ENC : DIR_DEC;
1680
1681         return 0;
1682 }
1683
1684 static int
1685 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1686                    struct rte_crypto_sym_xform *xform,
1687                    dpaa_sec_session *session)
1688 {
1689         session->aead_alg = xform->aead.algo;
1690         session->iv.length = xform->aead.iv.length;
1691         session->iv.offset = xform->aead.iv.offset;
1692         session->auth_only_len = xform->aead.aad_length;
1693         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1694                                              RTE_CACHE_LINE_SIZE);
1695         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1696                 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1697                 return -ENOMEM;
1698         }
1699         session->aead_key.length = xform->aead.key.length;
1700         session->digest_length = xform->aead.digest_length;
1701
1702         memcpy(session->aead_key.data, xform->aead.key.data,
1703                xform->aead.key.length);
1704         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1705                         DIR_ENC : DIR_DEC;
1706
1707         return 0;
1708 }
1709
1710 static struct qman_fq *
1711 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1712 {
1713         unsigned int i;
1714
1715         for (i = 0; i < qi->max_nb_sessions; i++) {
1716                 if (qi->inq_attach[i] == 0) {
1717                         qi->inq_attach[i] = 1;
1718                         return &qi->inq[i];
1719                 }
1720         }
1721         PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1722
1723         return NULL;
1724 }
1725
1726 static int
1727 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1728 {
1729         unsigned int i;
1730
1731         for (i = 0; i < qi->max_nb_sessions; i++) {
1732                 if (&qi->inq[i] == fq) {
1733                         qman_retire_fq(fq, NULL);
1734                         qman_oos_fq(fq);
1735                         qi->inq_attach[i] = 0;
1736                         return 0;
1737                 }
1738         }
1739         return -1;
1740 }
1741
1742 static int
1743 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1744 {
1745         int ret;
1746
1747         sess->qp = qp;
1748         ret = dpaa_sec_prep_cdb(sess);
1749         if (ret) {
1750                 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1751                 return -1;
1752         }
1753
1754         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1755                                qman_fq_fqid(&qp->outq));
1756         if (ret)
1757                 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1758
1759         return ret;
1760 }
1761
1762 static int
1763 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1764                         uint16_t qp_id __rte_unused,
1765                         void *ses __rte_unused)
1766 {
1767         PMD_INIT_FUNC_TRACE();
1768         return 0;
1769 }
1770
1771 static int
1772 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1773                         uint16_t qp_id  __rte_unused,
1774                         void *ses)
1775 {
1776         dpaa_sec_session *sess = ses;
1777         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1778
1779         PMD_INIT_FUNC_TRACE();
1780
1781         if (sess->inq)
1782                 dpaa_sec_detach_rxq(qi, sess->inq);
1783         sess->inq = NULL;
1784
1785         sess->qp = NULL;
1786
1787         return 0;
1788 }
1789
1790 static int
1791 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1792                             struct rte_crypto_sym_xform *xform, void *sess)
1793 {
1794         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1795         dpaa_sec_session *session = sess;
1796
1797         PMD_INIT_FUNC_TRACE();
1798
1799         if (unlikely(sess == NULL)) {
1800                 RTE_LOG(ERR, PMD, "invalid session struct\n");
1801                 return -EINVAL;
1802         }
1803
1804         /* Default IV length = 0 */
1805         session->iv.length = 0;
1806
1807         /* Cipher Only */
1808         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1809                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1810                 dpaa_sec_cipher_init(dev, xform, session);
1811
1812         /* Authentication Only */
1813         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1814                    xform->next == NULL) {
1815                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1816                 dpaa_sec_auth_init(dev, xform, session);
1817
1818         /* Cipher then Authenticate */
1819         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1820                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1821                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1822                         dpaa_sec_cipher_init(dev, xform, session);
1823                         dpaa_sec_auth_init(dev, xform->next, session);
1824                 } else {
1825                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1826                         return -EINVAL;
1827                 }
1828
1829         /* Authenticate then Cipher */
1830         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1831                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1832                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1833                         dpaa_sec_auth_init(dev, xform, session);
1834                         dpaa_sec_cipher_init(dev, xform->next, session);
1835                 } else {
1836                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1837                         return -EINVAL;
1838                 }
1839
1840         /* AEAD operation for AES-GCM kind of Algorithms */
1841         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1842                    xform->next == NULL) {
1843                 dpaa_sec_aead_init(dev, xform, session);
1844
1845         } else {
1846                 PMD_DRV_LOG(ERR, "Invalid crypto type");
1847                 return -EINVAL;
1848         }
1849         session->ctx_pool = internals->ctx_pool;
1850         session->inq = dpaa_sec_attach_rxq(internals);
1851         if (session->inq == NULL) {
1852                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1853                 goto err1;
1854         }
1855
1856         return 0;
1857
1858 err1:
1859         rte_free(session->cipher_key.data);
1860         rte_free(session->auth_key.data);
1861         memset(session, 0, sizeof(dpaa_sec_session));
1862
1863         return -EINVAL;
1864 }
1865
1866 static int
1867 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1868                 struct rte_crypto_sym_xform *xform,
1869                 struct rte_cryptodev_sym_session *sess,
1870                 struct rte_mempool *mempool)
1871 {
1872         void *sess_private_data;
1873         int ret;
1874
1875         PMD_INIT_FUNC_TRACE();
1876
1877         if (rte_mempool_get(mempool, &sess_private_data)) {
1878                 CDEV_LOG_ERR(
1879                         "Couldn't get object from session mempool");
1880                 return -ENOMEM;
1881         }
1882
1883         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1884         if (ret != 0) {
1885                 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1886                                 "session parameters");
1887
1888                 /* Return session to mempool */
1889                 rte_mempool_put(mempool, sess_private_data);
1890                 return ret;
1891         }
1892
1893         set_session_private_data(sess, dev->driver_id,
1894                         sess_private_data);
1895
1896
1897         return 0;
1898 }
1899
1900 /** Clear the memory of session so it doesn't leave key material behind */
1901 static void
1902 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1903                 struct rte_cryptodev_sym_session *sess)
1904 {
1905         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1906         uint8_t index = dev->driver_id;
1907         void *sess_priv = get_session_private_data(sess, index);
1908
1909         PMD_INIT_FUNC_TRACE();
1910
1911         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1912
1913         if (sess_priv) {
1914                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1915
1916                 if (s->inq)
1917                         dpaa_sec_detach_rxq(qi, s->inq);
1918                 rte_free(s->cipher_key.data);
1919                 rte_free(s->auth_key.data);
1920                 memset(s, 0, sizeof(dpaa_sec_session));
1921                 set_session_private_data(sess, index, NULL);
1922                 rte_mempool_put(sess_mp, sess_priv);
1923         }
1924 }
1925
1926 static int
1927 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1928                            struct rte_security_session_conf *conf,
1929                            void *sess)
1930 {
1931         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1932         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1933         struct rte_crypto_auth_xform *auth_xform;
1934         struct rte_crypto_cipher_xform *cipher_xform;
1935         dpaa_sec_session *session = (dpaa_sec_session *)sess;
1936
1937         PMD_INIT_FUNC_TRACE();
1938
1939         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1940                 cipher_xform = &conf->crypto_xform->cipher;
1941                 auth_xform = &conf->crypto_xform->next->auth;
1942         } else {
1943                 auth_xform = &conf->crypto_xform->auth;
1944                 cipher_xform = &conf->crypto_xform->next->cipher;
1945         }
1946         session->proto_alg = conf->protocol;
1947         session->cipher_key.data = rte_zmalloc(NULL,
1948                                                cipher_xform->key.length,
1949                                                RTE_CACHE_LINE_SIZE);
1950         if (session->cipher_key.data == NULL &&
1951                         cipher_xform->key.length > 0) {
1952                 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1953                 return -ENOMEM;
1954         }
1955
1956         session->cipher_key.length = cipher_xform->key.length;
1957         session->auth_key.data = rte_zmalloc(NULL,
1958                                         auth_xform->key.length,
1959                                         RTE_CACHE_LINE_SIZE);
1960         if (session->auth_key.data == NULL &&
1961                         auth_xform->key.length > 0) {
1962                 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1963                 rte_free(session->cipher_key.data);
1964                 return -ENOMEM;
1965         }
1966         session->auth_key.length = auth_xform->key.length;
1967         memcpy(session->cipher_key.data, cipher_xform->key.data,
1968                         cipher_xform->key.length);
1969         memcpy(session->auth_key.data, auth_xform->key.data,
1970                         auth_xform->key.length);
1971
1972         switch (auth_xform->algo) {
1973         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1974                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1975                 break;
1976         case RTE_CRYPTO_AUTH_MD5_HMAC:
1977                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1978                 break;
1979         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1980                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1981                 break;
1982         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1983                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1984                 break;
1985         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1986                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1987                 break;
1988         case RTE_CRYPTO_AUTH_AES_CMAC:
1989                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1990                 break;
1991         case RTE_CRYPTO_AUTH_NULL:
1992                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1993                 break;
1994         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1995         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1996         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
1997         case RTE_CRYPTO_AUTH_SHA1:
1998         case RTE_CRYPTO_AUTH_SHA256:
1999         case RTE_CRYPTO_AUTH_SHA512:
2000         case RTE_CRYPTO_AUTH_SHA224:
2001         case RTE_CRYPTO_AUTH_SHA384:
2002         case RTE_CRYPTO_AUTH_MD5:
2003         case RTE_CRYPTO_AUTH_AES_GMAC:
2004         case RTE_CRYPTO_AUTH_KASUMI_F9:
2005         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2006         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2007                 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2008                         auth_xform->algo);
2009                 goto out;
2010         default:
2011                 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2012                         auth_xform->algo);
2013                 goto out;
2014         }
2015
2016         switch (cipher_xform->algo) {
2017         case RTE_CRYPTO_CIPHER_AES_CBC:
2018                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2019                 break;
2020         case RTE_CRYPTO_CIPHER_3DES_CBC:
2021                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2022                 break;
2023         case RTE_CRYPTO_CIPHER_AES_CTR:
2024                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2025                 break;
2026         case RTE_CRYPTO_CIPHER_NULL:
2027         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2028         case RTE_CRYPTO_CIPHER_3DES_ECB:
2029         case RTE_CRYPTO_CIPHER_AES_ECB:
2030         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2031                 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2032                         cipher_xform->algo);
2033                 goto out;
2034         default:
2035                 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2036                         cipher_xform->algo);
2037                 goto out;
2038         }
2039
2040         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2041                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2042                                 sizeof(session->ip4_hdr));
2043                 session->ip4_hdr.ip_v = IPVERSION;
2044                 session->ip4_hdr.ip_hl = 5;
2045                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2046                                                 sizeof(session->ip4_hdr));
2047                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2048                 session->ip4_hdr.ip_id = 0;
2049                 session->ip4_hdr.ip_off = 0;
2050                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2051                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2052                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2053                                 : IPPROTO_AH;
2054                 session->ip4_hdr.ip_sum = 0;
2055                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2056                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2057                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2058                                                 (void *)&session->ip4_hdr,
2059                                                 sizeof(struct ip));
2060
2061                 session->encap_pdb.options =
2062                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2063                         PDBOPTS_ESP_OIHI_PDB_INL |
2064                         PDBOPTS_ESP_IVSRC |
2065                         PDBHMO_ESP_ENCAP_DTTL;
2066                 session->encap_pdb.spi = ipsec_xform->spi;
2067                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2068
2069                 session->dir = DIR_ENC;
2070         } else if (ipsec_xform->direction ==
2071                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2072                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2073                 session->decap_pdb.options = sizeof(struct ip) << 16;
2074                 session->dir = DIR_DEC;
2075         } else
2076                 goto out;
2077         session->ctx_pool = internals->ctx_pool;
2078         session->inq = dpaa_sec_attach_rxq(internals);
2079         if (session->inq == NULL) {
2080                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2081                 goto out;
2082         }
2083
2084
2085         return 0;
2086 out:
2087         rte_free(session->auth_key.data);
2088         rte_free(session->cipher_key.data);
2089         memset(session, 0, sizeof(dpaa_sec_session));
2090         return -1;
2091 }
2092
2093 static int
2094 dpaa_sec_security_session_create(void *dev,
2095                                  struct rte_security_session_conf *conf,
2096                                  struct rte_security_session *sess,
2097                                  struct rte_mempool *mempool)
2098 {
2099         void *sess_private_data;
2100         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2101         int ret;
2102
2103         if (rte_mempool_get(mempool, &sess_private_data)) {
2104                 CDEV_LOG_ERR(
2105                         "Couldn't get object from session mempool");
2106                 return -ENOMEM;
2107         }
2108
2109         switch (conf->protocol) {
2110         case RTE_SECURITY_PROTOCOL_IPSEC:
2111                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2112                                 sess_private_data);
2113                 break;
2114         case RTE_SECURITY_PROTOCOL_MACSEC:
2115                 return -ENOTSUP;
2116         default:
2117                 return -EINVAL;
2118         }
2119         if (ret != 0) {
2120                 PMD_DRV_LOG(ERR,
2121                         "DPAA2 PMD: failed to configure session parameters");
2122
2123                 /* Return session to mempool */
2124                 rte_mempool_put(mempool, sess_private_data);
2125                 return ret;
2126         }
2127
2128         set_sec_session_private_data(sess, sess_private_data);
2129
2130         return ret;
2131 }
2132
2133 /** Clear the memory of session so it doesn't leave key material behind */
2134 static int
2135 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2136                 struct rte_security_session *sess)
2137 {
2138         PMD_INIT_FUNC_TRACE();
2139         void *sess_priv = get_sec_session_private_data(sess);
2140
2141         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2142
2143         if (sess_priv) {
2144                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2145
2146                 rte_free(s->cipher_key.data);
2147                 rte_free(s->auth_key.data);
2148                 memset(sess, 0, sizeof(dpaa_sec_session));
2149                 set_sec_session_private_data(sess, NULL);
2150                 rte_mempool_put(sess_mp, sess_priv);
2151         }
2152         return 0;
2153 }
2154
2155
2156 static int
2157 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
2158                        struct rte_cryptodev_config *config __rte_unused)
2159 {
2160         PMD_INIT_FUNC_TRACE();
2161
2162         return 0;
2163 }
2164
2165 static int
2166 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2167 {
2168         PMD_INIT_FUNC_TRACE();
2169         return 0;
2170 }
2171
2172 static void
2173 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2174 {
2175         PMD_INIT_FUNC_TRACE();
2176 }
2177
2178 static int
2179 dpaa_sec_dev_close(struct rte_cryptodev *dev __rte_unused)
2180 {
2181         PMD_INIT_FUNC_TRACE();
2182         return 0;
2183 }
2184
2185 static void
2186 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2187                        struct rte_cryptodev_info *info)
2188 {
2189         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2190
2191         PMD_INIT_FUNC_TRACE();
2192         if (info != NULL) {
2193                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2194                 info->feature_flags = dev->feature_flags;
2195                 info->capabilities = dpaa_sec_capabilities;
2196                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2197                 info->sym.max_nb_sessions_per_qp =
2198                         RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2199                         RTE_DPAA_MAX_NB_SEC_QPS;
2200                 info->driver_id = cryptodev_driver_id;
2201         }
2202 }
2203
2204 static struct rte_cryptodev_ops crypto_ops = {
2205         .dev_configure        = dpaa_sec_dev_configure,
2206         .dev_start            = dpaa_sec_dev_start,
2207         .dev_stop             = dpaa_sec_dev_stop,
2208         .dev_close            = dpaa_sec_dev_close,
2209         .dev_infos_get        = dpaa_sec_dev_infos_get,
2210         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2211         .queue_pair_release   = dpaa_sec_queue_pair_release,
2212         .queue_pair_start     = dpaa_sec_queue_pair_start,
2213         .queue_pair_stop      = dpaa_sec_queue_pair_stop,
2214         .queue_pair_count     = dpaa_sec_queue_pair_count,
2215         .session_get_size     = dpaa_sec_session_get_size,
2216         .session_configure    = dpaa_sec_session_configure,
2217         .session_clear        = dpaa_sec_session_clear,
2218         .qp_attach_session    = dpaa_sec_qp_attach_sess,
2219         .qp_detach_session    = dpaa_sec_qp_detach_sess,
2220 };
2221
2222 static const struct rte_security_capability *
2223 dpaa_sec_capabilities_get(void *device __rte_unused)
2224 {
2225         return dpaa_sec_security_cap;
2226 }
2227
2228 struct rte_security_ops dpaa_sec_security_ops = {
2229         .session_create = dpaa_sec_security_session_create,
2230         .session_update = NULL,
2231         .session_stats_get = NULL,
2232         .session_destroy = dpaa_sec_security_session_destroy,
2233         .set_pkt_metadata = NULL,
2234         .capabilities_get = dpaa_sec_capabilities_get
2235 };
2236
2237 static int
2238 dpaa_sec_uninit(struct rte_cryptodev *dev)
2239 {
2240         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2241
2242         if (dev == NULL)
2243                 return -ENODEV;
2244
2245         rte_free(dev->security_ctx);
2246
2247         rte_mempool_free(internals->ctx_pool);
2248         rte_free(internals);
2249
2250         PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2251                      dev->data->name, rte_socket_id());
2252
2253         return 0;
2254 }
2255
2256 static int
2257 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2258 {
2259         struct dpaa_sec_dev_private *internals;
2260         struct rte_security_ctx *security_instance;
2261         struct dpaa_sec_qp *qp;
2262         uint32_t i, flags;
2263         int ret;
2264         char str[20];
2265
2266         PMD_INIT_FUNC_TRACE();
2267
2268         cryptodev->driver_id = cryptodev_driver_id;
2269         cryptodev->dev_ops = &crypto_ops;
2270
2271         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2272         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2273         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2274                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2275                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2276                         RTE_CRYPTODEV_FF_SECURITY |
2277                         RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2278
2279         internals = cryptodev->data->dev_private;
2280         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2281         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2282
2283         /*
2284          * For secondary processes, we don't initialise any further as primary
2285          * has already done this work. Only check we don't need a different
2286          * RX function
2287          */
2288         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2289                 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2290                 return 0;
2291         }
2292
2293         /* Initialize security_ctx only for primary process*/
2294         security_instance = rte_malloc("rte_security_instances_ops",
2295                                 sizeof(struct rte_security_ctx), 0);
2296         if (security_instance == NULL)
2297                 return -ENOMEM;
2298         security_instance->device = (void *)cryptodev;
2299         security_instance->ops = &dpaa_sec_security_ops;
2300         security_instance->sess_cnt = 0;
2301         cryptodev->security_ctx = security_instance;
2302
2303         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2304                 /* init qman fq for queue pair */
2305                 qp = &internals->qps[i];
2306                 ret = dpaa_sec_init_tx(&qp->outq);
2307                 if (ret) {
2308                         PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
2309                         goto init_error;
2310                 }
2311         }
2312
2313         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2314                 QMAN_FQ_FLAG_TO_DCPORTAL;
2315         for (i = 0; i < internals->max_nb_sessions; i++) {
2316                 /* create rx qman fq for sessions*/
2317                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2318                 if (unlikely(ret != 0)) {
2319                         PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2320                         goto init_error;
2321                 }
2322         }
2323
2324         sprintf(str, "ctx_pool_%d", cryptodev->data->dev_id);
2325         internals->ctx_pool = rte_mempool_create((const char *)str,
2326                         CTX_POOL_NUM_BUFS,
2327                         CTX_POOL_BUF_SIZE,
2328                         CTX_POOL_CACHE_SIZE, 0,
2329                         NULL, NULL, NULL, NULL,
2330                         SOCKET_ID_ANY, 0);
2331         if (!internals->ctx_pool) {
2332                 RTE_LOG(ERR, PMD, "%s create failed\n", str);
2333                 goto init_error;
2334         }
2335
2336         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2337         return 0;
2338
2339 init_error:
2340         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2341
2342         dpaa_sec_uninit(cryptodev);
2343         return -EFAULT;
2344 }
2345
2346 static int
2347 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2348                                 struct rte_dpaa_device *dpaa_dev)
2349 {
2350         struct rte_cryptodev *cryptodev;
2351         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2352
2353         int retval;
2354
2355         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2356
2357         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2358         if (cryptodev == NULL)
2359                 return -ENOMEM;
2360
2361         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2362                 cryptodev->data->dev_private = rte_zmalloc_socket(
2363                                         "cryptodev private structure",
2364                                         sizeof(struct dpaa_sec_dev_private),
2365                                         RTE_CACHE_LINE_SIZE,
2366                                         rte_socket_id());
2367
2368                 if (cryptodev->data->dev_private == NULL)
2369                         rte_panic("Cannot allocate memzone for private "
2370                                         "device data");
2371         }
2372
2373         dpaa_dev->crypto_dev = cryptodev;
2374         cryptodev->device = &dpaa_dev->device;
2375         cryptodev->device->driver = &dpaa_drv->driver;
2376
2377         /* init user callbacks */
2378         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2379
2380         /* if sec device version is not configured */
2381         if (!rta_get_sec_era()) {
2382                 const struct device_node *caam_node;
2383
2384                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2385                         const uint32_t *prop = of_get_property(caam_node,
2386                                         "fsl,sec-era",
2387                                         NULL);
2388                         if (prop) {
2389                                 rta_set_sec_era(
2390                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2391                                 break;
2392                         }
2393                 }
2394         }
2395
2396         /* Invoke PMD device initialization function */
2397         retval = dpaa_sec_dev_init(cryptodev);
2398         if (retval == 0)
2399                 return 0;
2400
2401         /* In case of error, cleanup is done */
2402         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2403                 rte_free(cryptodev->data->dev_private);
2404
2405         rte_cryptodev_pmd_release_device(cryptodev);
2406
2407         return -ENXIO;
2408 }
2409
2410 static int
2411 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2412 {
2413         struct rte_cryptodev *cryptodev;
2414         int ret;
2415
2416         cryptodev = dpaa_dev->crypto_dev;
2417         if (cryptodev == NULL)
2418                 return -ENODEV;
2419
2420         ret = dpaa_sec_uninit(cryptodev);
2421         if (ret)
2422                 return ret;
2423
2424         return rte_cryptodev_pmd_destroy(cryptodev);
2425 }
2426
2427 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2428         .drv_type = FSL_DPAA_CRYPTO,
2429         .driver = {
2430                 .name = "DPAA SEC PMD"
2431         },
2432         .probe = cryptodev_dpaa_sec_probe,
2433         .remove = cryptodev_dpaa_sec_remove,
2434 };
2435
2436 static struct cryptodev_driver dpaa_sec_crypto_drv;
2437
2438 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2439 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver,
2440                 cryptodev_driver_id);