crypto/dpaa_sec: improve the error checking
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26
27 #include <fsl_usd.h>
28 #include <fsl_qman.h>
29 #include <of.h>
30
31 /* RTA header files */
32 #include <hw/desc/common.h>
33 #include <hw/desc/algo.h>
34 #include <hw/desc/ipsec.h>
35
36 #include <rte_dpaa_bus.h>
37 #include <dpaa_sec.h>
38 #include <dpaa_sec_log.h>
39
40 enum rta_sec_era rta_sec_era;
41
42 static uint8_t cryptodev_driver_id;
43
44 static __thread struct rte_crypto_op **dpaa_sec_ops;
45 static __thread int dpaa_sec_op_nb;
46
47 static int
48 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
49
50 static inline void
51 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
52 {
53         if (!ctx->fd_status) {
54                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
55         } else {
56                 PMD_RX_LOG(ERR, "SEC return err: 0x%x", ctx->fd_status);
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
58         }
59
60         /* report op status to sym->op and then free the ctx memeory  */
61         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
62 }
63
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
66 {
67         struct dpaa_sec_op_ctx *ctx;
68         int retval;
69
70         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
71         if (!ctx || retval) {
72                 PMD_TX_LOG(ERR, "Alloc sec descriptor failed!");
73                 return NULL;
74         }
75         /*
76          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
77          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
78          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
79          * each packet, memset is costlier than dcbz_64().
80          */
81         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
82         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
83         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
84         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
85
86         ctx->ctx_pool = ses->ctx_pool;
87         ctx->vtop_offset = (size_t) ctx
88                                 - rte_mempool_virt2iova(ctx);
89
90         return ctx;
91 }
92
93 static inline rte_iova_t
94 dpaa_mem_vtop(void *vaddr)
95 {
96         const struct rte_memseg *ms;
97
98         ms = rte_mem_virt2memseg(vaddr, NULL);
99         if (ms)
100                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
101         return (size_t)NULL;
102 }
103
104 /* virtual address conversin when mempool support is available for ctx */
105 static inline phys_addr_t
106 dpaa_mem_vtop_ctx(struct dpaa_sec_op_ctx *ctx, void *vaddr)
107 {
108         return (size_t)vaddr - ctx->vtop_offset;
109 }
110
111 static inline void *
112 dpaa_mem_ptov(rte_iova_t paddr)
113 {
114         return rte_mem_iova2virt(paddr);
115 }
116
117 static void
118 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
119                    struct qman_fq *fq,
120                    const struct qm_mr_entry *msg)
121 {
122         RTE_LOG_DP(ERR, PMD, "sec fq %d error, RC = %x, seqnum = %x\n",
123                    fq->fqid, msg->ern.rc, msg->ern.seqnum);
124 }
125
126 /* initialize the queue with dest chan as caam chan so that
127  * all the packets in this queue could be dispatched into caam
128  */
129 static int
130 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
131                  uint32_t fqid_out)
132 {
133         struct qm_mcc_initfq fq_opts;
134         uint32_t flags;
135         int ret = -1;
136
137         /* Clear FQ options */
138         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
139
140         flags = QMAN_INITFQ_FLAG_SCHED;
141         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
142                           QM_INITFQ_WE_CONTEXTB;
143
144         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
145         fq_opts.fqd.context_b = fqid_out;
146         fq_opts.fqd.dest.channel = qm_channel_caam;
147         fq_opts.fqd.dest.wq = 0;
148
149         fq_in->cb.ern  = ern_sec_fq_handler;
150
151         PMD_INIT_LOG(DEBUG, "in-%x out-%x", fq_in->fqid, fqid_out);
152
153         ret = qman_init_fq(fq_in, flags, &fq_opts);
154         if (unlikely(ret != 0))
155                 PMD_INIT_LOG(ERR, "qman_init_fq failed %d", ret);
156
157         return ret;
158 }
159
160 /* something is put into in_fq and caam put the crypto result into out_fq */
161 static enum qman_cb_dqrr_result
162 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
163                   struct qman_fq *fq __always_unused,
164                   const struct qm_dqrr_entry *dqrr)
165 {
166         const struct qm_fd *fd;
167         struct dpaa_sec_job *job;
168         struct dpaa_sec_op_ctx *ctx;
169
170         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
171                 return qman_cb_dqrr_defer;
172
173         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
174                 return qman_cb_dqrr_consume;
175
176         fd = &dqrr->fd;
177         /* sg is embedded in an op ctx,
178          * sg[0] is for output
179          * sg[1] for input
180          */
181         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
182
183         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
184         ctx->fd_status = fd->status;
185         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
186                 struct qm_sg_entry *sg_out;
187                 uint32_t len;
188
189                 sg_out = &job->sg[0];
190                 hw_sg_to_cpu(sg_out);
191                 len = sg_out->length;
192                 ctx->op->sym->m_src->pkt_len = len;
193                 ctx->op->sym->m_src->data_len = len;
194         }
195         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
196         dpaa_sec_op_ending(ctx);
197
198         return qman_cb_dqrr_consume;
199 }
200
201 /* caam result is put into this queue */
202 static int
203 dpaa_sec_init_tx(struct qman_fq *fq)
204 {
205         int ret;
206         struct qm_mcc_initfq opts;
207         uint32_t flags;
208
209         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
210                 QMAN_FQ_FLAG_DYNAMIC_FQID;
211
212         ret = qman_create_fq(0, flags, fq);
213         if (unlikely(ret)) {
214                 PMD_INIT_LOG(ERR, "qman_create_fq failed");
215                 return ret;
216         }
217
218         memset(&opts, 0, sizeof(opts));
219         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
220                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
221
222         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
223
224         fq->cb.dqrr = dqrr_out_fq_cb_rx;
225         fq->cb.ern  = ern_sec_fq_handler;
226
227         ret = qman_init_fq(fq, 0, &opts);
228         if (unlikely(ret)) {
229                 PMD_INIT_LOG(ERR, "unable to init caam source fq!");
230                 return ret;
231         }
232
233         return ret;
234 }
235
236 static inline int is_cipher_only(dpaa_sec_session *ses)
237 {
238         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
239                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
240 }
241
242 static inline int is_auth_only(dpaa_sec_session *ses)
243 {
244         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
245                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
246 }
247
248 static inline int is_aead(dpaa_sec_session *ses)
249 {
250         return ((ses->cipher_alg == 0) &&
251                 (ses->auth_alg == 0) &&
252                 (ses->aead_alg != 0));
253 }
254
255 static inline int is_auth_cipher(dpaa_sec_session *ses)
256 {
257         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
258                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
259                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
260 }
261
262 static inline int is_proto_ipsec(dpaa_sec_session *ses)
263 {
264         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
265 }
266
267 static inline int is_encode(dpaa_sec_session *ses)
268 {
269         return ses->dir == DIR_ENC;
270 }
271
272 static inline int is_decode(dpaa_sec_session *ses)
273 {
274         return ses->dir == DIR_DEC;
275 }
276
277 static inline void
278 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
279 {
280         switch (ses->auth_alg) {
281         case RTE_CRYPTO_AUTH_NULL:
282                 ses->digest_length = 0;
283                 break;
284         case RTE_CRYPTO_AUTH_MD5_HMAC:
285                 alginfo_a->algtype =
286                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
287                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
288                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
289                 break;
290         case RTE_CRYPTO_AUTH_SHA1_HMAC:
291                 alginfo_a->algtype =
292                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
293                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
294                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
295                 break;
296         case RTE_CRYPTO_AUTH_SHA224_HMAC:
297                 alginfo_a->algtype =
298                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
299                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
300                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
301                 break;
302         case RTE_CRYPTO_AUTH_SHA256_HMAC:
303                 alginfo_a->algtype =
304                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
305                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
306                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
307                 break;
308         case RTE_CRYPTO_AUTH_SHA384_HMAC:
309                 alginfo_a->algtype =
310                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
311                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
312                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
313                 break;
314         case RTE_CRYPTO_AUTH_SHA512_HMAC:
315                 alginfo_a->algtype =
316                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
317                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
318                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
319                 break;
320         default:
321                 PMD_INIT_LOG(ERR, "unsupported auth alg %u", ses->auth_alg);
322         }
323 }
324
325 static inline void
326 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
327 {
328         switch (ses->cipher_alg) {
329         case RTE_CRYPTO_CIPHER_NULL:
330                 break;
331         case RTE_CRYPTO_CIPHER_AES_CBC:
332                 alginfo_c->algtype =
333                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
334                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
335                 alginfo_c->algmode = OP_ALG_AAI_CBC;
336                 break;
337         case RTE_CRYPTO_CIPHER_3DES_CBC:
338                 alginfo_c->algtype =
339                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
340                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
341                 alginfo_c->algmode = OP_ALG_AAI_CBC;
342                 break;
343         case RTE_CRYPTO_CIPHER_AES_CTR:
344                 alginfo_c->algtype =
345                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
346                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
347                 alginfo_c->algmode = OP_ALG_AAI_CTR;
348                 break;
349         default:
350                 PMD_INIT_LOG(ERR, "unsupported cipher alg %d", ses->cipher_alg);
351         }
352 }
353
354 static inline void
355 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
356 {
357         switch (ses->aead_alg) {
358         case RTE_CRYPTO_AEAD_AES_GCM:
359                 alginfo->algtype = OP_ALG_ALGSEL_AES;
360                 alginfo->algmode = OP_ALG_AAI_GCM;
361                 break;
362         default:
363                 PMD_INIT_LOG(ERR, "unsupported AEAD alg %d", ses->aead_alg);
364         }
365 }
366
367
368 /* prepare command block of the session */
369 static int
370 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
371 {
372         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
373         int32_t shared_desc_len = 0;
374         struct sec_cdb *cdb = &ses->cdb;
375         int err;
376 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
377         int swap = false;
378 #else
379         int swap = true;
380 #endif
381
382         memset(cdb, 0, sizeof(struct sec_cdb));
383
384         if (is_cipher_only(ses)) {
385                 caam_cipher_alg(ses, &alginfo_c);
386                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
387                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
388                         return -ENOTSUP;
389                 }
390
391                 alginfo_c.key = (size_t)ses->cipher_key.data;
392                 alginfo_c.keylen = ses->cipher_key.length;
393                 alginfo_c.key_enc_flags = 0;
394                 alginfo_c.key_type = RTA_DATA_IMM;
395
396                 shared_desc_len = cnstr_shdsc_blkcipher(
397                                                 cdb->sh_desc, true,
398                                                 swap, &alginfo_c,
399                                                 NULL,
400                                                 ses->iv.length,
401                                                 ses->dir);
402         } else if (is_auth_only(ses)) {
403                 caam_auth_alg(ses, &alginfo_a);
404                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
405                         PMD_TX_LOG(ERR, "not supported auth alg\n");
406                         return -ENOTSUP;
407                 }
408
409                 alginfo_a.key = (size_t)ses->auth_key.data;
410                 alginfo_a.keylen = ses->auth_key.length;
411                 alginfo_a.key_enc_flags = 0;
412                 alginfo_a.key_type = RTA_DATA_IMM;
413
414                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
415                                                    swap, &alginfo_a,
416                                                    !ses->dir,
417                                                    ses->digest_length);
418         } else if (is_aead(ses)) {
419                 caam_aead_alg(ses, &alginfo);
420                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
421                         PMD_TX_LOG(ERR, "not supported aead alg\n");
422                         return -ENOTSUP;
423                 }
424                 alginfo.key = (size_t)ses->aead_key.data;
425                 alginfo.keylen = ses->aead_key.length;
426                 alginfo.key_enc_flags = 0;
427                 alginfo.key_type = RTA_DATA_IMM;
428
429                 if (ses->dir == DIR_ENC)
430                         shared_desc_len = cnstr_shdsc_gcm_encap(
431                                         cdb->sh_desc, true, swap,
432                                         &alginfo,
433                                         ses->iv.length,
434                                         ses->digest_length);
435                 else
436                         shared_desc_len = cnstr_shdsc_gcm_decap(
437                                         cdb->sh_desc, true, swap,
438                                         &alginfo,
439                                         ses->iv.length,
440                                         ses->digest_length);
441         } else {
442                 caam_cipher_alg(ses, &alginfo_c);
443                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
444                         PMD_TX_LOG(ERR, "not supported cipher alg\n");
445                         return -ENOTSUP;
446                 }
447
448                 alginfo_c.key = (size_t)ses->cipher_key.data;
449                 alginfo_c.keylen = ses->cipher_key.length;
450                 alginfo_c.key_enc_flags = 0;
451                 alginfo_c.key_type = RTA_DATA_IMM;
452
453                 caam_auth_alg(ses, &alginfo_a);
454                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
455                         PMD_TX_LOG(ERR, "not supported auth alg\n");
456                         return -ENOTSUP;
457                 }
458
459                 alginfo_a.key = (size_t)ses->auth_key.data;
460                 alginfo_a.keylen = ses->auth_key.length;
461                 alginfo_a.key_enc_flags = 0;
462                 alginfo_a.key_type = RTA_DATA_IMM;
463
464                 cdb->sh_desc[0] = alginfo_c.keylen;
465                 cdb->sh_desc[1] = alginfo_a.keylen;
466                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
467                                        MIN_JOB_DESC_SIZE,
468                                        (unsigned int *)cdb->sh_desc,
469                                        &cdb->sh_desc[2], 2);
470
471                 if (err < 0) {
472                         PMD_TX_LOG(ERR, "Crypto: Incorrect key lengths");
473                         return err;
474                 }
475                 if (cdb->sh_desc[2] & 1)
476                         alginfo_c.key_type = RTA_DATA_IMM;
477                 else {
478                         alginfo_c.key = (size_t)dpaa_mem_vtop(
479                                                 (void *)(size_t)alginfo_c.key);
480                         alginfo_c.key_type = RTA_DATA_PTR;
481                 }
482                 if (cdb->sh_desc[2] & (1<<1))
483                         alginfo_a.key_type = RTA_DATA_IMM;
484                 else {
485                         alginfo_a.key = (size_t)dpaa_mem_vtop(
486                                                 (void *)(size_t)alginfo_a.key);
487                         alginfo_a.key_type = RTA_DATA_PTR;
488                 }
489                 cdb->sh_desc[0] = 0;
490                 cdb->sh_desc[1] = 0;
491                 cdb->sh_desc[2] = 0;
492                 if (is_proto_ipsec(ses)) {
493                         if (ses->dir == DIR_ENC) {
494                                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
495                                                 cdb->sh_desc,
496                                                 true, swap, &ses->encap_pdb,
497                                                 (uint8_t *)&ses->ip4_hdr,
498                                                 &alginfo_c, &alginfo_a);
499                         } else if (ses->dir == DIR_DEC) {
500                                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
501                                                 cdb->sh_desc,
502                                                 true, swap, &ses->decap_pdb,
503                                                 &alginfo_c, &alginfo_a);
504                         }
505                 } else {
506                         /* Auth_only_len is set as 0 here and it will be
507                          * overwritten in fd for each packet.
508                          */
509                         shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
510                                         true, swap, &alginfo_c, &alginfo_a,
511                                         ses->iv.length, 0,
512                                         ses->digest_length, ses->dir);
513                 }
514         }
515
516         if (shared_desc_len < 0) {
517                 PMD_TX_LOG(ERR, "error in preparing command block\n");
518                 return shared_desc_len;
519         }
520
521         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
522         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
523         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
524
525         return 0;
526 }
527
528 /* qp is lockless, should be accessed by only one thread */
529 static int
530 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
531 {
532         struct qman_fq *fq;
533         unsigned int pkts = 0;
534         int ret;
535         struct qm_dqrr_entry *dq;
536
537         fq = &qp->outq;
538         ret = qman_set_vdq(fq, (nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES) ?
539                                 DPAA_MAX_DEQUEUE_NUM_FRAMES : nb_ops);
540         if (ret)
541                 return 0;
542
543         do {
544                 const struct qm_fd *fd;
545                 struct dpaa_sec_job *job;
546                 struct dpaa_sec_op_ctx *ctx;
547                 struct rte_crypto_op *op;
548
549                 dq = qman_dequeue(fq);
550                 if (!dq)
551                         continue;
552
553                 fd = &dq->fd;
554                 /* sg is embedded in an op ctx,
555                  * sg[0] is for output
556                  * sg[1] for input
557                  */
558                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
559
560                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
561                 ctx->fd_status = fd->status;
562                 op = ctx->op;
563                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
564                         struct qm_sg_entry *sg_out;
565                         uint32_t len;
566
567                         sg_out = &job->sg[0];
568                         hw_sg_to_cpu(sg_out);
569                         len = sg_out->length;
570                         op->sym->m_src->pkt_len = len;
571                         op->sym->m_src->data_len = len;
572                 }
573                 if (!ctx->fd_status) {
574                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
575                 } else {
576                         printf("\nSEC return err: 0x%x", ctx->fd_status);
577                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
578                 }
579                 ops[pkts++] = op;
580
581                 /* report op status to sym->op and then free the ctx memeory */
582                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
583
584                 qman_dqrr_consume(fq, dq);
585         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
586
587         return pkts;
588 }
589
590 static inline struct dpaa_sec_job *
591 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
592 {
593         struct rte_crypto_sym_op *sym = op->sym;
594         struct rte_mbuf *mbuf = sym->m_src;
595         struct dpaa_sec_job *cf;
596         struct dpaa_sec_op_ctx *ctx;
597         struct qm_sg_entry *sg, *out_sg, *in_sg;
598         phys_addr_t start_addr;
599         uint8_t *old_digest, extra_segs;
600
601         if (is_decode(ses))
602                 extra_segs = 3;
603         else
604                 extra_segs = 2;
605
606         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
607                 PMD_TX_LOG(ERR, "Auth: Max sec segs supported is %d\n",
608                                                                 MAX_SG_ENTRIES);
609                 return NULL;
610         }
611         ctx = dpaa_sec_alloc_ctx(ses);
612         if (!ctx)
613                 return NULL;
614
615         cf = &ctx->job;
616         ctx->op = op;
617         old_digest = ctx->digest;
618
619         /* output */
620         out_sg = &cf->sg[0];
621         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
622         out_sg->length = ses->digest_length;
623         cpu_to_hw_sg(out_sg);
624
625         /* input */
626         in_sg = &cf->sg[1];
627         /* need to extend the input to a compound frame */
628         in_sg->extension = 1;
629         in_sg->final = 1;
630         in_sg->length = sym->auth.data.length;
631         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
632
633         /* 1st seg */
634         sg = in_sg + 1;
635         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
636         sg->length = mbuf->data_len - sym->auth.data.offset;
637         sg->offset = sym->auth.data.offset;
638
639         /* Successive segs */
640         mbuf = mbuf->next;
641         while (mbuf) {
642                 cpu_to_hw_sg(sg);
643                 sg++;
644                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
645                 sg->length = mbuf->data_len;
646                 mbuf = mbuf->next;
647         }
648
649         if (is_decode(ses)) {
650                 /* Digest verification case */
651                 cpu_to_hw_sg(sg);
652                 sg++;
653                 rte_memcpy(old_digest, sym->auth.digest.data,
654                                 ses->digest_length);
655                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
656                 qm_sg_entry_set64(sg, start_addr);
657                 sg->length = ses->digest_length;
658                 in_sg->length += ses->digest_length;
659         } else {
660                 /* Digest calculation case */
661                 sg->length -= ses->digest_length;
662         }
663         sg->final = 1;
664         cpu_to_hw_sg(sg);
665         cpu_to_hw_sg(in_sg);
666
667         return cf;
668 }
669
670 /**
671  * packet looks like:
672  *              |<----data_len------->|
673  *    |ip_header|ah_header|icv|payload|
674  *              ^
675  *              |
676  *         mbuf->pkt.data
677  */
678 static inline struct dpaa_sec_job *
679 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
680 {
681         struct rte_crypto_sym_op *sym = op->sym;
682         struct rte_mbuf *mbuf = sym->m_src;
683         struct dpaa_sec_job *cf;
684         struct dpaa_sec_op_ctx *ctx;
685         struct qm_sg_entry *sg;
686         rte_iova_t start_addr;
687         uint8_t *old_digest;
688
689         ctx = dpaa_sec_alloc_ctx(ses);
690         if (!ctx)
691                 return NULL;
692
693         cf = &ctx->job;
694         ctx->op = op;
695         old_digest = ctx->digest;
696
697         start_addr = rte_pktmbuf_iova(mbuf);
698         /* output */
699         sg = &cf->sg[0];
700         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
701         sg->length = ses->digest_length;
702         cpu_to_hw_sg(sg);
703
704         /* input */
705         sg = &cf->sg[1];
706         if (is_decode(ses)) {
707                 /* need to extend the input to a compound frame */
708                 sg->extension = 1;
709                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
710                 sg->length = sym->auth.data.length + ses->digest_length;
711                 sg->final = 1;
712                 cpu_to_hw_sg(sg);
713
714                 sg = &cf->sg[2];
715                 /* hash result or digest, save digest first */
716                 rte_memcpy(old_digest, sym->auth.digest.data,
717                            ses->digest_length);
718                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
719                 sg->length = sym->auth.data.length;
720                 cpu_to_hw_sg(sg);
721
722                 /* let's check digest by hw */
723                 start_addr = dpaa_mem_vtop_ctx(ctx, old_digest);
724                 sg++;
725                 qm_sg_entry_set64(sg, start_addr);
726                 sg->length = ses->digest_length;
727                 sg->final = 1;
728                 cpu_to_hw_sg(sg);
729         } else {
730                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
731                 sg->length = sym->auth.data.length;
732                 sg->final = 1;
733                 cpu_to_hw_sg(sg);
734         }
735
736         return cf;
737 }
738
739 static inline struct dpaa_sec_job *
740 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
741 {
742         struct rte_crypto_sym_op *sym = op->sym;
743         struct dpaa_sec_job *cf;
744         struct dpaa_sec_op_ctx *ctx;
745         struct qm_sg_entry *sg, *out_sg, *in_sg;
746         struct rte_mbuf *mbuf;
747         uint8_t req_segs;
748         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
749                         ses->iv.offset);
750
751         if (sym->m_dst) {
752                 mbuf = sym->m_dst;
753                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
754         } else {
755                 mbuf = sym->m_src;
756                 req_segs = mbuf->nb_segs * 2 + 3;
757         }
758
759         if (req_segs > MAX_SG_ENTRIES) {
760                 PMD_TX_LOG(ERR, "Cipher: Max sec segs supported is %d\n",
761                                                                 MAX_SG_ENTRIES);
762                 return NULL;
763         }
764
765         ctx = dpaa_sec_alloc_ctx(ses);
766         if (!ctx)
767                 return NULL;
768
769         cf = &ctx->job;
770         ctx->op = op;
771
772         /* output */
773         out_sg = &cf->sg[0];
774         out_sg->extension = 1;
775         out_sg->length = sym->cipher.data.length;
776         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
777         cpu_to_hw_sg(out_sg);
778
779         /* 1st seg */
780         sg = &cf->sg[2];
781         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
782         sg->length = mbuf->data_len - sym->cipher.data.offset;
783         sg->offset = sym->cipher.data.offset;
784
785         /* Successive segs */
786         mbuf = mbuf->next;
787         while (mbuf) {
788                 cpu_to_hw_sg(sg);
789                 sg++;
790                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
791                 sg->length = mbuf->data_len;
792                 mbuf = mbuf->next;
793         }
794         sg->final = 1;
795         cpu_to_hw_sg(sg);
796
797         /* input */
798         mbuf = sym->m_src;
799         in_sg = &cf->sg[1];
800         in_sg->extension = 1;
801         in_sg->final = 1;
802         in_sg->length = sym->cipher.data.length + ses->iv.length;
803
804         sg++;
805         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
806         cpu_to_hw_sg(in_sg);
807
808         /* IV */
809         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
810         sg->length = ses->iv.length;
811         cpu_to_hw_sg(sg);
812
813         /* 1st seg */
814         sg++;
815         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
816         sg->length = mbuf->data_len - sym->cipher.data.offset;
817         sg->offset = sym->cipher.data.offset;
818
819         /* Successive segs */
820         mbuf = mbuf->next;
821         while (mbuf) {
822                 cpu_to_hw_sg(sg);
823                 sg++;
824                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
825                 sg->length = mbuf->data_len;
826                 mbuf = mbuf->next;
827         }
828         sg->final = 1;
829         cpu_to_hw_sg(sg);
830
831         return cf;
832 }
833
834 static inline struct dpaa_sec_job *
835 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
836 {
837         struct rte_crypto_sym_op *sym = op->sym;
838         struct dpaa_sec_job *cf;
839         struct dpaa_sec_op_ctx *ctx;
840         struct qm_sg_entry *sg;
841         rte_iova_t src_start_addr, dst_start_addr;
842         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
843                         ses->iv.offset);
844
845         ctx = dpaa_sec_alloc_ctx(ses);
846         if (!ctx)
847                 return NULL;
848
849         cf = &ctx->job;
850         ctx->op = op;
851
852         src_start_addr = rte_pktmbuf_iova(sym->m_src);
853
854         if (sym->m_dst)
855                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
856         else
857                 dst_start_addr = src_start_addr;
858
859         /* output */
860         sg = &cf->sg[0];
861         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
862         sg->length = sym->cipher.data.length + ses->iv.length;
863         cpu_to_hw_sg(sg);
864
865         /* input */
866         sg = &cf->sg[1];
867
868         /* need to extend the input to a compound frame */
869         sg->extension = 1;
870         sg->final = 1;
871         sg->length = sym->cipher.data.length + ses->iv.length;
872         qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, &cf->sg[2]));
873         cpu_to_hw_sg(sg);
874
875         sg = &cf->sg[2];
876         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
877         sg->length = ses->iv.length;
878         cpu_to_hw_sg(sg);
879
880         sg++;
881         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
882         sg->length = sym->cipher.data.length;
883         sg->final = 1;
884         cpu_to_hw_sg(sg);
885
886         return cf;
887 }
888
889 static inline struct dpaa_sec_job *
890 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
891 {
892         struct rte_crypto_sym_op *sym = op->sym;
893         struct dpaa_sec_job *cf;
894         struct dpaa_sec_op_ctx *ctx;
895         struct qm_sg_entry *sg, *out_sg, *in_sg;
896         struct rte_mbuf *mbuf;
897         uint8_t req_segs;
898         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
899                         ses->iv.offset);
900
901         if (sym->m_dst) {
902                 mbuf = sym->m_dst;
903                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
904         } else {
905                 mbuf = sym->m_src;
906                 req_segs = mbuf->nb_segs * 2 + 4;
907         }
908
909         if (ses->auth_only_len)
910                 req_segs++;
911
912         if (req_segs > MAX_SG_ENTRIES) {
913                 PMD_TX_LOG(ERR, "AEAD: Max sec segs supported is %d\n",
914                                 MAX_SG_ENTRIES);
915                 return NULL;
916         }
917
918         ctx = dpaa_sec_alloc_ctx(ses);
919         if (!ctx)
920                 return NULL;
921
922         cf = &ctx->job;
923         ctx->op = op;
924
925         rte_prefetch0(cf->sg);
926
927         /* output */
928         out_sg = &cf->sg[0];
929         out_sg->extension = 1;
930         if (is_encode(ses))
931                 out_sg->length = sym->aead.data.length + ses->auth_only_len
932                                                 + ses->digest_length;
933         else
934                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
935
936         /* output sg entries */
937         sg = &cf->sg[2];
938         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
939         cpu_to_hw_sg(out_sg);
940
941         /* 1st seg */
942         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
943         sg->length = mbuf->data_len - sym->aead.data.offset +
944                                         ses->auth_only_len;
945         sg->offset = sym->aead.data.offset - ses->auth_only_len;
946
947         /* Successive segs */
948         mbuf = mbuf->next;
949         while (mbuf) {
950                 cpu_to_hw_sg(sg);
951                 sg++;
952                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
953                 sg->length = mbuf->data_len;
954                 mbuf = mbuf->next;
955         }
956         sg->length -= ses->digest_length;
957
958         if (is_encode(ses)) {
959                 cpu_to_hw_sg(sg);
960                 /* set auth output */
961                 sg++;
962                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
963                 sg->length = ses->digest_length;
964         }
965         sg->final = 1;
966         cpu_to_hw_sg(sg);
967
968         /* input */
969         mbuf = sym->m_src;
970         in_sg = &cf->sg[1];
971         in_sg->extension = 1;
972         in_sg->final = 1;
973         if (is_encode(ses))
974                 in_sg->length = ses->iv.length + sym->aead.data.length
975                                                         + ses->auth_only_len;
976         else
977                 in_sg->length = ses->iv.length + sym->aead.data.length
978                                 + ses->auth_only_len + ses->digest_length;
979
980         /* input sg entries */
981         sg++;
982         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
983         cpu_to_hw_sg(in_sg);
984
985         /* 1st seg IV */
986         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
987         sg->length = ses->iv.length;
988         cpu_to_hw_sg(sg);
989
990         /* 2nd seg auth only */
991         if (ses->auth_only_len) {
992                 sg++;
993                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
994                 sg->length = ses->auth_only_len;
995                 cpu_to_hw_sg(sg);
996         }
997
998         /* 3rd seg */
999         sg++;
1000         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1001         sg->length = mbuf->data_len - sym->aead.data.offset;
1002         sg->offset = sym->aead.data.offset;
1003
1004         /* Successive segs */
1005         mbuf = mbuf->next;
1006         while (mbuf) {
1007                 cpu_to_hw_sg(sg);
1008                 sg++;
1009                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1010                 sg->length = mbuf->data_len;
1011                 mbuf = mbuf->next;
1012         }
1013
1014         if (is_decode(ses)) {
1015                 cpu_to_hw_sg(sg);
1016                 sg++;
1017                 memcpy(ctx->digest, sym->aead.digest.data,
1018                         ses->digest_length);
1019                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1020                 sg->length = ses->digest_length;
1021         }
1022         sg->final = 1;
1023         cpu_to_hw_sg(sg);
1024
1025         return cf;
1026 }
1027
1028 static inline struct dpaa_sec_job *
1029 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1030 {
1031         struct rte_crypto_sym_op *sym = op->sym;
1032         struct dpaa_sec_job *cf;
1033         struct dpaa_sec_op_ctx *ctx;
1034         struct qm_sg_entry *sg;
1035         uint32_t length = 0;
1036         rte_iova_t src_start_addr, dst_start_addr;
1037         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1038                         ses->iv.offset);
1039
1040         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1041
1042         if (sym->m_dst)
1043                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1044         else
1045                 dst_start_addr = src_start_addr;
1046
1047         ctx = dpaa_sec_alloc_ctx(ses);
1048         if (!ctx)
1049                 return NULL;
1050
1051         cf = &ctx->job;
1052         ctx->op = op;
1053
1054         /* input */
1055         rte_prefetch0(cf->sg);
1056         sg = &cf->sg[2];
1057         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1058         if (is_encode(ses)) {
1059                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1060                 sg->length = ses->iv.length;
1061                 length += sg->length;
1062                 cpu_to_hw_sg(sg);
1063
1064                 sg++;
1065                 if (ses->auth_only_len) {
1066                         qm_sg_entry_set64(sg,
1067                                           dpaa_mem_vtop(sym->aead.aad.data));
1068                         sg->length = ses->auth_only_len;
1069                         length += sg->length;
1070                         cpu_to_hw_sg(sg);
1071                         sg++;
1072                 }
1073                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1074                 sg->length = sym->aead.data.length;
1075                 length += sg->length;
1076                 sg->final = 1;
1077                 cpu_to_hw_sg(sg);
1078         } else {
1079                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1080                 sg->length = ses->iv.length;
1081                 length += sg->length;
1082                 cpu_to_hw_sg(sg);
1083
1084                 sg++;
1085                 if (ses->auth_only_len) {
1086                         qm_sg_entry_set64(sg,
1087                                           dpaa_mem_vtop(sym->aead.aad.data));
1088                         sg->length = ses->auth_only_len;
1089                         length += sg->length;
1090                         cpu_to_hw_sg(sg);
1091                         sg++;
1092                 }
1093                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1094                 sg->length = sym->aead.data.length;
1095                 length += sg->length;
1096                 cpu_to_hw_sg(sg);
1097
1098                 memcpy(ctx->digest, sym->aead.digest.data,
1099                        ses->digest_length);
1100                 sg++;
1101
1102                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1103                 sg->length = ses->digest_length;
1104                 length += sg->length;
1105                 sg->final = 1;
1106                 cpu_to_hw_sg(sg);
1107         }
1108         /* input compound frame */
1109         cf->sg[1].length = length;
1110         cf->sg[1].extension = 1;
1111         cf->sg[1].final = 1;
1112         cpu_to_hw_sg(&cf->sg[1]);
1113
1114         /* output */
1115         sg++;
1116         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1117         qm_sg_entry_set64(sg,
1118                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1119         sg->length = sym->aead.data.length + ses->auth_only_len;
1120         length = sg->length;
1121         if (is_encode(ses)) {
1122                 cpu_to_hw_sg(sg);
1123                 /* set auth output */
1124                 sg++;
1125                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1126                 sg->length = ses->digest_length;
1127                 length += sg->length;
1128         }
1129         sg->final = 1;
1130         cpu_to_hw_sg(sg);
1131
1132         /* output compound frame */
1133         cf->sg[0].length = length;
1134         cf->sg[0].extension = 1;
1135         cpu_to_hw_sg(&cf->sg[0]);
1136
1137         return cf;
1138 }
1139
1140 static inline struct dpaa_sec_job *
1141 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1142 {
1143         struct rte_crypto_sym_op *sym = op->sym;
1144         struct dpaa_sec_job *cf;
1145         struct dpaa_sec_op_ctx *ctx;
1146         struct qm_sg_entry *sg, *out_sg, *in_sg;
1147         struct rte_mbuf *mbuf;
1148         uint8_t req_segs;
1149         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1150                         ses->iv.offset);
1151
1152         if (sym->m_dst) {
1153                 mbuf = sym->m_dst;
1154                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1155         } else {
1156                 mbuf = sym->m_src;
1157                 req_segs = mbuf->nb_segs * 2 + 4;
1158         }
1159
1160         if (req_segs > MAX_SG_ENTRIES) {
1161                 PMD_TX_LOG(ERR, "Cipher-Auth: Max sec segs supported is %d\n",
1162                                 MAX_SG_ENTRIES);
1163                 return NULL;
1164         }
1165
1166         ctx = dpaa_sec_alloc_ctx(ses);
1167         if (!ctx)
1168                 return NULL;
1169
1170         cf = &ctx->job;
1171         ctx->op = op;
1172
1173         rte_prefetch0(cf->sg);
1174
1175         /* output */
1176         out_sg = &cf->sg[0];
1177         out_sg->extension = 1;
1178         if (is_encode(ses))
1179                 out_sg->length = sym->auth.data.length + ses->digest_length;
1180         else
1181                 out_sg->length = sym->auth.data.length;
1182
1183         /* output sg entries */
1184         sg = &cf->sg[2];
1185         qm_sg_entry_set64(out_sg, dpaa_mem_vtop_ctx(ctx, sg));
1186         cpu_to_hw_sg(out_sg);
1187
1188         /* 1st seg */
1189         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1190         sg->length = mbuf->data_len - sym->auth.data.offset;
1191         sg->offset = sym->auth.data.offset;
1192
1193         /* Successive segs */
1194         mbuf = mbuf->next;
1195         while (mbuf) {
1196                 cpu_to_hw_sg(sg);
1197                 sg++;
1198                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1199                 sg->length = mbuf->data_len;
1200                 mbuf = mbuf->next;
1201         }
1202         sg->length -= ses->digest_length;
1203
1204         if (is_encode(ses)) {
1205                 cpu_to_hw_sg(sg);
1206                 /* set auth output */
1207                 sg++;
1208                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1209                 sg->length = ses->digest_length;
1210         }
1211         sg->final = 1;
1212         cpu_to_hw_sg(sg);
1213
1214         /* input */
1215         mbuf = sym->m_src;
1216         in_sg = &cf->sg[1];
1217         in_sg->extension = 1;
1218         in_sg->final = 1;
1219         if (is_encode(ses))
1220                 in_sg->length = ses->iv.length + sym->auth.data.length;
1221         else
1222                 in_sg->length = ses->iv.length + sym->auth.data.length
1223                                                 + ses->digest_length;
1224
1225         /* input sg entries */
1226         sg++;
1227         qm_sg_entry_set64(in_sg, dpaa_mem_vtop_ctx(ctx, sg));
1228         cpu_to_hw_sg(in_sg);
1229
1230         /* 1st seg IV */
1231         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1232         sg->length = ses->iv.length;
1233         cpu_to_hw_sg(sg);
1234
1235         /* 2nd seg */
1236         sg++;
1237         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1238         sg->length = mbuf->data_len - sym->auth.data.offset;
1239         sg->offset = sym->auth.data.offset;
1240
1241         /* Successive segs */
1242         mbuf = mbuf->next;
1243         while (mbuf) {
1244                 cpu_to_hw_sg(sg);
1245                 sg++;
1246                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1247                 sg->length = mbuf->data_len;
1248                 mbuf = mbuf->next;
1249         }
1250
1251         sg->length -= ses->digest_length;
1252         if (is_decode(ses)) {
1253                 cpu_to_hw_sg(sg);
1254                 sg++;
1255                 memcpy(ctx->digest, sym->auth.digest.data,
1256                         ses->digest_length);
1257                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1258                 sg->length = ses->digest_length;
1259         }
1260         sg->final = 1;
1261         cpu_to_hw_sg(sg);
1262
1263         return cf;
1264 }
1265
1266 static inline struct dpaa_sec_job *
1267 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1268 {
1269         struct rte_crypto_sym_op *sym = op->sym;
1270         struct dpaa_sec_job *cf;
1271         struct dpaa_sec_op_ctx *ctx;
1272         struct qm_sg_entry *sg;
1273         rte_iova_t src_start_addr, dst_start_addr;
1274         uint32_t length = 0;
1275         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1276                         ses->iv.offset);
1277
1278         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1279         if (sym->m_dst)
1280                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1281         else
1282                 dst_start_addr = src_start_addr;
1283
1284         ctx = dpaa_sec_alloc_ctx(ses);
1285         if (!ctx)
1286                 return NULL;
1287
1288         cf = &ctx->job;
1289         ctx->op = op;
1290
1291         /* input */
1292         rte_prefetch0(cf->sg);
1293         sg = &cf->sg[2];
1294         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop_ctx(ctx, sg));
1295         if (is_encode(ses)) {
1296                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1297                 sg->length = ses->iv.length;
1298                 length += sg->length;
1299                 cpu_to_hw_sg(sg);
1300
1301                 sg++;
1302                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1303                 sg->length = sym->auth.data.length;
1304                 length += sg->length;
1305                 sg->final = 1;
1306                 cpu_to_hw_sg(sg);
1307         } else {
1308                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1309                 sg->length = ses->iv.length;
1310                 length += sg->length;
1311                 cpu_to_hw_sg(sg);
1312
1313                 sg++;
1314
1315                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1316                 sg->length = sym->auth.data.length;
1317                 length += sg->length;
1318                 cpu_to_hw_sg(sg);
1319
1320                 memcpy(ctx->digest, sym->auth.digest.data,
1321                        ses->digest_length);
1322                 sg++;
1323
1324                 qm_sg_entry_set64(sg, dpaa_mem_vtop_ctx(ctx, ctx->digest));
1325                 sg->length = ses->digest_length;
1326                 length += sg->length;
1327                 sg->final = 1;
1328                 cpu_to_hw_sg(sg);
1329         }
1330         /* input compound frame */
1331         cf->sg[1].length = length;
1332         cf->sg[1].extension = 1;
1333         cf->sg[1].final = 1;
1334         cpu_to_hw_sg(&cf->sg[1]);
1335
1336         /* output */
1337         sg++;
1338         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop_ctx(ctx, sg));
1339         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1340         sg->length = sym->cipher.data.length;
1341         length = sg->length;
1342         if (is_encode(ses)) {
1343                 cpu_to_hw_sg(sg);
1344                 /* set auth output */
1345                 sg++;
1346                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1347                 sg->length = ses->digest_length;
1348                 length += sg->length;
1349         }
1350         sg->final = 1;
1351         cpu_to_hw_sg(sg);
1352
1353         /* output compound frame */
1354         cf->sg[0].length = length;
1355         cf->sg[0].extension = 1;
1356         cpu_to_hw_sg(&cf->sg[0]);
1357
1358         return cf;
1359 }
1360
1361 static inline struct dpaa_sec_job *
1362 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1363 {
1364         struct rte_crypto_sym_op *sym = op->sym;
1365         struct dpaa_sec_job *cf;
1366         struct dpaa_sec_op_ctx *ctx;
1367         struct qm_sg_entry *sg;
1368         phys_addr_t src_start_addr, dst_start_addr;
1369
1370         ctx = dpaa_sec_alloc_ctx(ses);
1371         if (!ctx)
1372                 return NULL;
1373         cf = &ctx->job;
1374         ctx->op = op;
1375
1376         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1377
1378         if (sym->m_dst)
1379                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1380         else
1381                 dst_start_addr = src_start_addr;
1382
1383         /* input */
1384         sg = &cf->sg[1];
1385         qm_sg_entry_set64(sg, src_start_addr);
1386         sg->length = sym->m_src->pkt_len;
1387         sg->final = 1;
1388         cpu_to_hw_sg(sg);
1389
1390         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1391         /* output */
1392         sg = &cf->sg[0];
1393         qm_sg_entry_set64(sg, dst_start_addr);
1394         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1395         cpu_to_hw_sg(sg);
1396
1397         return cf;
1398 }
1399
1400 static uint16_t
1401 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1402                        uint16_t nb_ops)
1403 {
1404         /* Function to transmit the frames to given device and queuepair */
1405         uint32_t loop;
1406         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1407         uint16_t num_tx = 0;
1408         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1409         uint32_t frames_to_send;
1410         struct rte_crypto_op *op;
1411         struct dpaa_sec_job *cf;
1412         dpaa_sec_session *ses;
1413         struct dpaa_sec_op_ctx *ctx;
1414         uint32_t auth_only_len;
1415         struct qman_fq *inq[DPAA_SEC_BURST];
1416
1417         while (nb_ops) {
1418                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1419                                 DPAA_SEC_BURST : nb_ops;
1420                 for (loop = 0; loop < frames_to_send; loop++) {
1421                         op = *(ops++);
1422                         switch (op->sess_type) {
1423                         case RTE_CRYPTO_OP_WITH_SESSION:
1424                                 ses = (dpaa_sec_session *)
1425                                         get_session_private_data(
1426                                                         op->sym->session,
1427                                                         cryptodev_driver_id);
1428                                 break;
1429                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1430                                 ses = (dpaa_sec_session *)
1431                                         get_sec_session_private_data(
1432                                                         op->sym->sec_session);
1433                                 break;
1434                         default:
1435                                 PMD_TX_LOG(ERR,
1436                                         "sessionless crypto op not supported");
1437                                 frames_to_send = loop;
1438                                 nb_ops = loop;
1439                                 goto send_pkts;
1440                         }
1441                         if (unlikely(!ses->qp || ses->qp != qp)) {
1442                                 PMD_INIT_LOG(DEBUG, "sess->qp - %p qp %p",
1443                                                 ses->qp, qp);
1444                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1445                                         frames_to_send = loop;
1446                                         nb_ops = loop;
1447                                         goto send_pkts;
1448                                 }
1449                         }
1450
1451                         auth_only_len = op->sym->auth.data.length -
1452                                                 op->sym->cipher.data.length;
1453                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1454                                 if (is_auth_only(ses)) {
1455                                         cf = build_auth_only(op, ses);
1456                                 } else if (is_cipher_only(ses)) {
1457                                         cf = build_cipher_only(op, ses);
1458                                 } else if (is_aead(ses)) {
1459                                         cf = build_cipher_auth_gcm(op, ses);
1460                                         auth_only_len = ses->auth_only_len;
1461                                 } else if (is_auth_cipher(ses)) {
1462                                         cf = build_cipher_auth(op, ses);
1463                                 } else if (is_proto_ipsec(ses)) {
1464                                         cf = build_proto(op, ses);
1465                                 } else {
1466                                         PMD_TX_LOG(ERR, "not supported sec op");
1467                                         frames_to_send = loop;
1468                                         nb_ops = loop;
1469                                         goto send_pkts;
1470                                 }
1471                         } else {
1472                                 if (is_auth_only(ses)) {
1473                                         cf = build_auth_only_sg(op, ses);
1474                                 } else if (is_cipher_only(ses)) {
1475                                         cf = build_cipher_only_sg(op, ses);
1476                                 } else if (is_aead(ses)) {
1477                                         cf = build_cipher_auth_gcm_sg(op, ses);
1478                                         auth_only_len = ses->auth_only_len;
1479                                 } else if (is_auth_cipher(ses)) {
1480                                         cf = build_cipher_auth_sg(op, ses);
1481                                 } else {
1482                                         PMD_TX_LOG(ERR, "not supported sec op");
1483                                         frames_to_send = loop;
1484                                         nb_ops = loop;
1485                                         goto send_pkts;
1486                                 }
1487                         }
1488                         if (unlikely(!cf)) {
1489                                 frames_to_send = loop;
1490                                 nb_ops = loop;
1491                                 goto send_pkts;
1492                         }
1493
1494                         fd = &fds[loop];
1495                         inq[loop] = ses->inq;
1496                         fd->opaque_addr = 0;
1497                         fd->cmd = 0;
1498                         ctx = container_of(cf, struct dpaa_sec_op_ctx, job);
1499                         qm_fd_addr_set64(fd, dpaa_mem_vtop_ctx(ctx, cf->sg));
1500                         fd->_format1 = qm_fd_compound;
1501                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1502                         /* Auth_only_len is set as 0 in descriptor and it is
1503                          * overwritten here in the fd.cmd which will update
1504                          * the DPOVRD reg.
1505                          */
1506                         if (auth_only_len)
1507                                 fd->cmd = 0x80000000 | auth_only_len;
1508
1509                 }
1510 send_pkts:
1511                 loop = 0;
1512                 while (loop < frames_to_send) {
1513                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1514                                         frames_to_send - loop);
1515                 }
1516                 nb_ops -= frames_to_send;
1517                 num_tx += frames_to_send;
1518         }
1519
1520         dpaa_qp->tx_pkts += num_tx;
1521         dpaa_qp->tx_errs += nb_ops - num_tx;
1522
1523         return num_tx;
1524 }
1525
1526 static uint16_t
1527 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1528                        uint16_t nb_ops)
1529 {
1530         uint16_t num_rx;
1531         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1532
1533         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1534
1535         dpaa_qp->rx_pkts += num_rx;
1536         dpaa_qp->rx_errs += nb_ops - num_rx;
1537
1538         PMD_RX_LOG(DEBUG, "SEC Received %d Packets\n", num_rx);
1539
1540         return num_rx;
1541 }
1542
1543 /** Release queue pair */
1544 static int
1545 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1546                             uint16_t qp_id)
1547 {
1548         struct dpaa_sec_dev_private *internals;
1549         struct dpaa_sec_qp *qp = NULL;
1550
1551         PMD_INIT_FUNC_TRACE();
1552
1553         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d", dev, qp_id);
1554
1555         internals = dev->data->dev_private;
1556         if (qp_id >= internals->max_nb_queue_pairs) {
1557                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1558                              internals->max_nb_queue_pairs);
1559                 return -EINVAL;
1560         }
1561
1562         qp = &internals->qps[qp_id];
1563         qp->internals = NULL;
1564         dev->data->queue_pairs[qp_id] = NULL;
1565
1566         return 0;
1567 }
1568
1569 /** Setup a queue pair */
1570 static int
1571 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1572                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1573                 __rte_unused int socket_id,
1574                 __rte_unused struct rte_mempool *session_pool)
1575 {
1576         struct dpaa_sec_dev_private *internals;
1577         struct dpaa_sec_qp *qp = NULL;
1578
1579         PMD_INIT_LOG(DEBUG, "dev =%p, queue =%d, conf =%p",
1580                      dev, qp_id, qp_conf);
1581
1582         internals = dev->data->dev_private;
1583         if (qp_id >= internals->max_nb_queue_pairs) {
1584                 PMD_INIT_LOG(ERR, "Max supported qpid %d",
1585                              internals->max_nb_queue_pairs);
1586                 return -EINVAL;
1587         }
1588
1589         qp = &internals->qps[qp_id];
1590         qp->internals = internals;
1591         dev->data->queue_pairs[qp_id] = qp;
1592
1593         return 0;
1594 }
1595
1596 /** Start queue pair */
1597 static int
1598 dpaa_sec_queue_pair_start(__rte_unused struct rte_cryptodev *dev,
1599                           __rte_unused uint16_t queue_pair_id)
1600 {
1601         PMD_INIT_FUNC_TRACE();
1602
1603         return 0;
1604 }
1605
1606 /** Stop queue pair */
1607 static int
1608 dpaa_sec_queue_pair_stop(__rte_unused struct rte_cryptodev *dev,
1609                          __rte_unused uint16_t queue_pair_id)
1610 {
1611         PMD_INIT_FUNC_TRACE();
1612
1613         return 0;
1614 }
1615
1616 /** Return the number of allocated queue pairs */
1617 static uint32_t
1618 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1619 {
1620         PMD_INIT_FUNC_TRACE();
1621
1622         return dev->data->nb_queue_pairs;
1623 }
1624
1625 /** Returns the size of session structure */
1626 static unsigned int
1627 dpaa_sec_session_get_size(struct rte_cryptodev *dev __rte_unused)
1628 {
1629         PMD_INIT_FUNC_TRACE();
1630
1631         return sizeof(dpaa_sec_session);
1632 }
1633
1634 static int
1635 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1636                      struct rte_crypto_sym_xform *xform,
1637                      dpaa_sec_session *session)
1638 {
1639         session->cipher_alg = xform->cipher.algo;
1640         session->iv.length = xform->cipher.iv.length;
1641         session->iv.offset = xform->cipher.iv.offset;
1642         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1643                                                RTE_CACHE_LINE_SIZE);
1644         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1645                 PMD_INIT_LOG(ERR, "No Memory for cipher key\n");
1646                 return -ENOMEM;
1647         }
1648         session->cipher_key.length = xform->cipher.key.length;
1649
1650         memcpy(session->cipher_key.data, xform->cipher.key.data,
1651                xform->cipher.key.length);
1652         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1653                         DIR_ENC : DIR_DEC;
1654
1655         return 0;
1656 }
1657
1658 static int
1659 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1660                    struct rte_crypto_sym_xform *xform,
1661                    dpaa_sec_session *session)
1662 {
1663         session->auth_alg = xform->auth.algo;
1664         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1665                                              RTE_CACHE_LINE_SIZE);
1666         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1667                 PMD_INIT_LOG(ERR, "No Memory for auth key\n");
1668                 return -ENOMEM;
1669         }
1670         session->auth_key.length = xform->auth.key.length;
1671         session->digest_length = xform->auth.digest_length;
1672
1673         memcpy(session->auth_key.data, xform->auth.key.data,
1674                xform->auth.key.length);
1675         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1676                         DIR_ENC : DIR_DEC;
1677
1678         return 0;
1679 }
1680
1681 static int
1682 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1683                    struct rte_crypto_sym_xform *xform,
1684                    dpaa_sec_session *session)
1685 {
1686         session->aead_alg = xform->aead.algo;
1687         session->iv.length = xform->aead.iv.length;
1688         session->iv.offset = xform->aead.iv.offset;
1689         session->auth_only_len = xform->aead.aad_length;
1690         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1691                                              RTE_CACHE_LINE_SIZE);
1692         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1693                 PMD_INIT_LOG(ERR, "No Memory for aead key\n");
1694                 return -ENOMEM;
1695         }
1696         session->aead_key.length = xform->aead.key.length;
1697         session->digest_length = xform->aead.digest_length;
1698
1699         memcpy(session->aead_key.data, xform->aead.key.data,
1700                xform->aead.key.length);
1701         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1702                         DIR_ENC : DIR_DEC;
1703
1704         return 0;
1705 }
1706
1707 static struct qman_fq *
1708 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1709 {
1710         unsigned int i;
1711
1712         for (i = 0; i < qi->max_nb_sessions; i++) {
1713                 if (qi->inq_attach[i] == 0) {
1714                         qi->inq_attach[i] = 1;
1715                         return &qi->inq[i];
1716                 }
1717         }
1718         PMD_DRV_LOG(ERR, "All ses session in use %x", qi->max_nb_sessions);
1719
1720         return NULL;
1721 }
1722
1723 static int
1724 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1725 {
1726         unsigned int i;
1727
1728         for (i = 0; i < qi->max_nb_sessions; i++) {
1729                 if (&qi->inq[i] == fq) {
1730                         qman_retire_fq(fq, NULL);
1731                         qman_oos_fq(fq);
1732                         qi->inq_attach[i] = 0;
1733                         return 0;
1734                 }
1735         }
1736         return -1;
1737 }
1738
1739 static int
1740 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1741 {
1742         int ret;
1743
1744         sess->qp = qp;
1745         ret = dpaa_sec_prep_cdb(sess);
1746         if (ret) {
1747                 PMD_DRV_LOG(ERR, "Unable to prepare sec cdb");
1748                 return -1;
1749         }
1750         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1751                 ret = rte_dpaa_portal_init((void *)0);
1752                 if (ret) {
1753                         PMD_DRV_LOG(ERR, "Failure in affining portal");
1754                         return ret;
1755                 }
1756         }
1757         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1758                                qman_fq_fqid(&qp->outq));
1759         if (ret)
1760                 PMD_DRV_LOG(ERR, "Unable to init sec queue");
1761
1762         return ret;
1763 }
1764
1765 static int
1766 dpaa_sec_qp_attach_sess(struct rte_cryptodev *dev __rte_unused,
1767                         uint16_t qp_id __rte_unused,
1768                         void *ses __rte_unused)
1769 {
1770         PMD_INIT_FUNC_TRACE();
1771         return 0;
1772 }
1773
1774 static int
1775 dpaa_sec_qp_detach_sess(struct rte_cryptodev *dev,
1776                         uint16_t qp_id  __rte_unused,
1777                         void *ses)
1778 {
1779         dpaa_sec_session *sess = ses;
1780         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1781
1782         PMD_INIT_FUNC_TRACE();
1783
1784         if (sess->inq)
1785                 dpaa_sec_detach_rxq(qi, sess->inq);
1786         sess->inq = NULL;
1787
1788         sess->qp = NULL;
1789
1790         return 0;
1791 }
1792
1793 static int
1794 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1795                             struct rte_crypto_sym_xform *xform, void *sess)
1796 {
1797         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1798         dpaa_sec_session *session = sess;
1799
1800         PMD_INIT_FUNC_TRACE();
1801
1802         if (unlikely(sess == NULL)) {
1803                 RTE_LOG(ERR, PMD, "invalid session struct\n");
1804                 return -EINVAL;
1805         }
1806
1807         /* Default IV length = 0 */
1808         session->iv.length = 0;
1809
1810         /* Cipher Only */
1811         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1812                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1813                 dpaa_sec_cipher_init(dev, xform, session);
1814
1815         /* Authentication Only */
1816         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1817                    xform->next == NULL) {
1818                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1819                 dpaa_sec_auth_init(dev, xform, session);
1820
1821         /* Cipher then Authenticate */
1822         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1823                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1824                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1825                         dpaa_sec_cipher_init(dev, xform, session);
1826                         dpaa_sec_auth_init(dev, xform->next, session);
1827                 } else {
1828                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1829                         return -EINVAL;
1830                 }
1831
1832         /* Authenticate then Cipher */
1833         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1834                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1835                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1836                         dpaa_sec_auth_init(dev, xform, session);
1837                         dpaa_sec_cipher_init(dev, xform->next, session);
1838                 } else {
1839                         PMD_DRV_LOG(ERR, "Not supported: Auth then Cipher");
1840                         return -EINVAL;
1841                 }
1842
1843         /* AEAD operation for AES-GCM kind of Algorithms */
1844         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1845                    xform->next == NULL) {
1846                 dpaa_sec_aead_init(dev, xform, session);
1847
1848         } else {
1849                 PMD_DRV_LOG(ERR, "Invalid crypto type");
1850                 return -EINVAL;
1851         }
1852         session->ctx_pool = internals->ctx_pool;
1853         session->inq = dpaa_sec_attach_rxq(internals);
1854         if (session->inq == NULL) {
1855                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
1856                 goto err1;
1857         }
1858
1859         return 0;
1860
1861 err1:
1862         rte_free(session->cipher_key.data);
1863         rte_free(session->auth_key.data);
1864         memset(session, 0, sizeof(dpaa_sec_session));
1865
1866         return -EINVAL;
1867 }
1868
1869 static int
1870 dpaa_sec_session_configure(struct rte_cryptodev *dev,
1871                 struct rte_crypto_sym_xform *xform,
1872                 struct rte_cryptodev_sym_session *sess,
1873                 struct rte_mempool *mempool)
1874 {
1875         void *sess_private_data;
1876         int ret;
1877
1878         PMD_INIT_FUNC_TRACE();
1879
1880         if (rte_mempool_get(mempool, &sess_private_data)) {
1881                 CDEV_LOG_ERR(
1882                         "Couldn't get object from session mempool");
1883                 return -ENOMEM;
1884         }
1885
1886         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1887         if (ret != 0) {
1888                 PMD_DRV_LOG(ERR, "DPAA PMD: failed to configure "
1889                                 "session parameters");
1890
1891                 /* Return session to mempool */
1892                 rte_mempool_put(mempool, sess_private_data);
1893                 return ret;
1894         }
1895
1896         set_session_private_data(sess, dev->driver_id,
1897                         sess_private_data);
1898
1899
1900         return 0;
1901 }
1902
1903 /** Clear the memory of session so it doesn't leave key material behind */
1904 static void
1905 dpaa_sec_session_clear(struct rte_cryptodev *dev,
1906                 struct rte_cryptodev_sym_session *sess)
1907 {
1908         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1909         uint8_t index = dev->driver_id;
1910         void *sess_priv = get_session_private_data(sess, index);
1911
1912         PMD_INIT_FUNC_TRACE();
1913
1914         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1915
1916         if (sess_priv) {
1917                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1918
1919                 if (s->inq)
1920                         dpaa_sec_detach_rxq(qi, s->inq);
1921                 rte_free(s->cipher_key.data);
1922                 rte_free(s->auth_key.data);
1923                 memset(s, 0, sizeof(dpaa_sec_session));
1924                 set_session_private_data(sess, index, NULL);
1925                 rte_mempool_put(sess_mp, sess_priv);
1926         }
1927 }
1928
1929 static int
1930 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1931                            struct rte_security_session_conf *conf,
1932                            void *sess)
1933 {
1934         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1935         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1936         struct rte_crypto_auth_xform *auth_xform;
1937         struct rte_crypto_cipher_xform *cipher_xform;
1938         dpaa_sec_session *session = (dpaa_sec_session *)sess;
1939
1940         PMD_INIT_FUNC_TRACE();
1941
1942         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1943                 cipher_xform = &conf->crypto_xform->cipher;
1944                 auth_xform = &conf->crypto_xform->next->auth;
1945         } else {
1946                 auth_xform = &conf->crypto_xform->auth;
1947                 cipher_xform = &conf->crypto_xform->next->cipher;
1948         }
1949         session->proto_alg = conf->protocol;
1950         session->cipher_key.data = rte_zmalloc(NULL,
1951                                                cipher_xform->key.length,
1952                                                RTE_CACHE_LINE_SIZE);
1953         if (session->cipher_key.data == NULL &&
1954                         cipher_xform->key.length > 0) {
1955                 RTE_LOG(ERR, PMD, "No Memory for cipher key\n");
1956                 return -ENOMEM;
1957         }
1958
1959         session->cipher_key.length = cipher_xform->key.length;
1960         session->auth_key.data = rte_zmalloc(NULL,
1961                                         auth_xform->key.length,
1962                                         RTE_CACHE_LINE_SIZE);
1963         if (session->auth_key.data == NULL &&
1964                         auth_xform->key.length > 0) {
1965                 RTE_LOG(ERR, PMD, "No Memory for auth key\n");
1966                 rte_free(session->cipher_key.data);
1967                 return -ENOMEM;
1968         }
1969         session->auth_key.length = auth_xform->key.length;
1970         memcpy(session->cipher_key.data, cipher_xform->key.data,
1971                         cipher_xform->key.length);
1972         memcpy(session->auth_key.data, auth_xform->key.data,
1973                         auth_xform->key.length);
1974
1975         switch (auth_xform->algo) {
1976         case RTE_CRYPTO_AUTH_SHA1_HMAC:
1977                 session->auth_alg = RTE_CRYPTO_AUTH_SHA1_HMAC;
1978                 break;
1979         case RTE_CRYPTO_AUTH_MD5_HMAC:
1980                 session->auth_alg = RTE_CRYPTO_AUTH_MD5_HMAC;
1981                 break;
1982         case RTE_CRYPTO_AUTH_SHA256_HMAC:
1983                 session->auth_alg = RTE_CRYPTO_AUTH_SHA256_HMAC;
1984                 break;
1985         case RTE_CRYPTO_AUTH_SHA384_HMAC:
1986                 session->auth_alg = RTE_CRYPTO_AUTH_SHA384_HMAC;
1987                 break;
1988         case RTE_CRYPTO_AUTH_SHA512_HMAC:
1989                 session->auth_alg = RTE_CRYPTO_AUTH_SHA512_HMAC;
1990                 break;
1991         case RTE_CRYPTO_AUTH_AES_CMAC:
1992                 session->auth_alg = RTE_CRYPTO_AUTH_AES_CMAC;
1993                 break;
1994         case RTE_CRYPTO_AUTH_NULL:
1995                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1996                 break;
1997         case RTE_CRYPTO_AUTH_SHA224_HMAC:
1998         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
1999         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2000         case RTE_CRYPTO_AUTH_SHA1:
2001         case RTE_CRYPTO_AUTH_SHA256:
2002         case RTE_CRYPTO_AUTH_SHA512:
2003         case RTE_CRYPTO_AUTH_SHA224:
2004         case RTE_CRYPTO_AUTH_SHA384:
2005         case RTE_CRYPTO_AUTH_MD5:
2006         case RTE_CRYPTO_AUTH_AES_GMAC:
2007         case RTE_CRYPTO_AUTH_KASUMI_F9:
2008         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2009         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2010                 RTE_LOG(ERR, PMD, "Crypto: Unsupported auth alg %u\n",
2011                         auth_xform->algo);
2012                 goto out;
2013         default:
2014                 RTE_LOG(ERR, PMD, "Crypto: Undefined Auth specified %u\n",
2015                         auth_xform->algo);
2016                 goto out;
2017         }
2018
2019         switch (cipher_xform->algo) {
2020         case RTE_CRYPTO_CIPHER_AES_CBC:
2021                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CBC;
2022                 break;
2023         case RTE_CRYPTO_CIPHER_3DES_CBC:
2024                 session->cipher_alg = RTE_CRYPTO_CIPHER_3DES_CBC;
2025                 break;
2026         case RTE_CRYPTO_CIPHER_AES_CTR:
2027                 session->cipher_alg = RTE_CRYPTO_CIPHER_AES_CTR;
2028                 break;
2029         case RTE_CRYPTO_CIPHER_NULL:
2030         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2031         case RTE_CRYPTO_CIPHER_3DES_ECB:
2032         case RTE_CRYPTO_CIPHER_AES_ECB:
2033         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2034                 RTE_LOG(ERR, PMD, "Crypto: Unsupported Cipher alg %u\n",
2035                         cipher_xform->algo);
2036                 goto out;
2037         default:
2038                 RTE_LOG(ERR, PMD, "Crypto: Undefined Cipher specified %u\n",
2039                         cipher_xform->algo);
2040                 goto out;
2041         }
2042
2043         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2044                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2045                                 sizeof(session->ip4_hdr));
2046                 session->ip4_hdr.ip_v = IPVERSION;
2047                 session->ip4_hdr.ip_hl = 5;
2048                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2049                                                 sizeof(session->ip4_hdr));
2050                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2051                 session->ip4_hdr.ip_id = 0;
2052                 session->ip4_hdr.ip_off = 0;
2053                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2054                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2055                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2056                                 : IPPROTO_AH;
2057                 session->ip4_hdr.ip_sum = 0;
2058                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2059                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2060                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2061                                                 (void *)&session->ip4_hdr,
2062                                                 sizeof(struct ip));
2063
2064                 session->encap_pdb.options =
2065                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2066                         PDBOPTS_ESP_OIHI_PDB_INL |
2067                         PDBOPTS_ESP_IVSRC |
2068                         PDBHMO_ESP_ENCAP_DTTL;
2069                 session->encap_pdb.spi = ipsec_xform->spi;
2070                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2071
2072                 session->dir = DIR_ENC;
2073         } else if (ipsec_xform->direction ==
2074                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2075                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2076                 session->decap_pdb.options = sizeof(struct ip) << 16;
2077                 session->dir = DIR_DEC;
2078         } else
2079                 goto out;
2080         session->ctx_pool = internals->ctx_pool;
2081         session->inq = dpaa_sec_attach_rxq(internals);
2082         if (session->inq == NULL) {
2083                 PMD_DRV_LOG(ERR, "unable to attach sec queue");
2084                 goto out;
2085         }
2086
2087
2088         return 0;
2089 out:
2090         rte_free(session->auth_key.data);
2091         rte_free(session->cipher_key.data);
2092         memset(session, 0, sizeof(dpaa_sec_session));
2093         return -1;
2094 }
2095
2096 static int
2097 dpaa_sec_security_session_create(void *dev,
2098                                  struct rte_security_session_conf *conf,
2099                                  struct rte_security_session *sess,
2100                                  struct rte_mempool *mempool)
2101 {
2102         void *sess_private_data;
2103         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2104         int ret;
2105
2106         if (rte_mempool_get(mempool, &sess_private_data)) {
2107                 CDEV_LOG_ERR(
2108                         "Couldn't get object from session mempool");
2109                 return -ENOMEM;
2110         }
2111
2112         switch (conf->protocol) {
2113         case RTE_SECURITY_PROTOCOL_IPSEC:
2114                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2115                                 sess_private_data);
2116                 break;
2117         case RTE_SECURITY_PROTOCOL_MACSEC:
2118                 return -ENOTSUP;
2119         default:
2120                 return -EINVAL;
2121         }
2122         if (ret != 0) {
2123                 PMD_DRV_LOG(ERR,
2124                         "DPAA2 PMD: failed to configure session parameters");
2125
2126                 /* Return session to mempool */
2127                 rte_mempool_put(mempool, sess_private_data);
2128                 return ret;
2129         }
2130
2131         set_sec_session_private_data(sess, sess_private_data);
2132
2133         return ret;
2134 }
2135
2136 /** Clear the memory of session so it doesn't leave key material behind */
2137 static int
2138 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2139                 struct rte_security_session *sess)
2140 {
2141         PMD_INIT_FUNC_TRACE();
2142         void *sess_priv = get_sec_session_private_data(sess);
2143
2144         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2145
2146         if (sess_priv) {
2147                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2148
2149                 rte_free(s->cipher_key.data);
2150                 rte_free(s->auth_key.data);
2151                 memset(sess, 0, sizeof(dpaa_sec_session));
2152                 set_sec_session_private_data(sess, NULL);
2153                 rte_mempool_put(sess_mp, sess_priv);
2154         }
2155         return 0;
2156 }
2157
2158
2159 static int
2160 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2161                        struct rte_cryptodev_config *config __rte_unused)
2162 {
2163
2164         char str[20];
2165         struct dpaa_sec_dev_private *internals;
2166
2167         PMD_INIT_FUNC_TRACE();
2168
2169         internals = dev->data->dev_private;
2170         sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2171         if (!internals->ctx_pool) {
2172                 internals->ctx_pool = rte_mempool_create((const char *)str,
2173                                                         CTX_POOL_NUM_BUFS,
2174                                                         CTX_POOL_BUF_SIZE,
2175                                                         CTX_POOL_CACHE_SIZE, 0,
2176                                                         NULL, NULL, NULL, NULL,
2177                                                         SOCKET_ID_ANY, 0);
2178                 if (!internals->ctx_pool) {
2179                         RTE_LOG(ERR, PMD, "%s create failed\n", str);
2180                         return -ENOMEM;
2181                 }
2182         } else
2183                 RTE_LOG(INFO, PMD, "mempool already created for dev_id : %d\n",
2184                         dev->data->dev_id);
2185
2186         return 0;
2187 }
2188
2189 static int
2190 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2191 {
2192         PMD_INIT_FUNC_TRACE();
2193         return 0;
2194 }
2195
2196 static void
2197 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2198 {
2199         PMD_INIT_FUNC_TRACE();
2200 }
2201
2202 static int
2203 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2204 {
2205         struct dpaa_sec_dev_private *internals;
2206
2207         PMD_INIT_FUNC_TRACE();
2208
2209         if (dev == NULL)
2210                 return -ENOMEM;
2211
2212         internals = dev->data->dev_private;
2213         rte_mempool_free(internals->ctx_pool);
2214         internals->ctx_pool = NULL;
2215
2216         return 0;
2217 }
2218
2219 static void
2220 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2221                        struct rte_cryptodev_info *info)
2222 {
2223         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2224
2225         PMD_INIT_FUNC_TRACE();
2226         if (info != NULL) {
2227                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2228                 info->feature_flags = dev->feature_flags;
2229                 info->capabilities = dpaa_sec_capabilities;
2230                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2231                 info->sym.max_nb_sessions_per_qp =
2232                         RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS /
2233                         RTE_DPAA_MAX_NB_SEC_QPS;
2234                 info->driver_id = cryptodev_driver_id;
2235         }
2236 }
2237
2238 static struct rte_cryptodev_ops crypto_ops = {
2239         .dev_configure        = dpaa_sec_dev_configure,
2240         .dev_start            = dpaa_sec_dev_start,
2241         .dev_stop             = dpaa_sec_dev_stop,
2242         .dev_close            = dpaa_sec_dev_close,
2243         .dev_infos_get        = dpaa_sec_dev_infos_get,
2244         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2245         .queue_pair_release   = dpaa_sec_queue_pair_release,
2246         .queue_pair_start     = dpaa_sec_queue_pair_start,
2247         .queue_pair_stop      = dpaa_sec_queue_pair_stop,
2248         .queue_pair_count     = dpaa_sec_queue_pair_count,
2249         .session_get_size     = dpaa_sec_session_get_size,
2250         .session_configure    = dpaa_sec_session_configure,
2251         .session_clear        = dpaa_sec_session_clear,
2252         .qp_attach_session    = dpaa_sec_qp_attach_sess,
2253         .qp_detach_session    = dpaa_sec_qp_detach_sess,
2254 };
2255
2256 static const struct rte_security_capability *
2257 dpaa_sec_capabilities_get(void *device __rte_unused)
2258 {
2259         return dpaa_sec_security_cap;
2260 }
2261
2262 struct rte_security_ops dpaa_sec_security_ops = {
2263         .session_create = dpaa_sec_security_session_create,
2264         .session_update = NULL,
2265         .session_stats_get = NULL,
2266         .session_destroy = dpaa_sec_security_session_destroy,
2267         .set_pkt_metadata = NULL,
2268         .capabilities_get = dpaa_sec_capabilities_get
2269 };
2270
2271 static int
2272 dpaa_sec_uninit(struct rte_cryptodev *dev)
2273 {
2274         struct dpaa_sec_dev_private *internals;
2275
2276         if (dev == NULL)
2277                 return -ENODEV;
2278
2279         internals = dev->data->dev_private;
2280         rte_free(dev->security_ctx);
2281
2282         /* In case close has been called, internals->ctx_pool would be NULL */
2283         rte_mempool_free(internals->ctx_pool);
2284         rte_free(internals);
2285
2286         PMD_INIT_LOG(INFO, "Closing DPAA_SEC device %s on numa socket %u\n",
2287                      dev->data->name, rte_socket_id());
2288
2289         return 0;
2290 }
2291
2292 static int
2293 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2294 {
2295         struct dpaa_sec_dev_private *internals;
2296         struct rte_security_ctx *security_instance;
2297         struct dpaa_sec_qp *qp;
2298         uint32_t i, flags;
2299         int ret;
2300
2301         PMD_INIT_FUNC_TRACE();
2302
2303         cryptodev->driver_id = cryptodev_driver_id;
2304         cryptodev->dev_ops = &crypto_ops;
2305
2306         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2307         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2308         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2309                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2310                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2311                         RTE_CRYPTODEV_FF_SECURITY |
2312                         RTE_CRYPTODEV_FF_MBUF_SCATTER_GATHER;
2313
2314         internals = cryptodev->data->dev_private;
2315         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2316         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2317
2318         /*
2319          * For secondary processes, we don't initialise any further as primary
2320          * has already done this work. Only check we don't need a different
2321          * RX function
2322          */
2323         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2324                 PMD_INIT_LOG(DEBUG, "Device already init by primary process");
2325                 return 0;
2326         }
2327
2328         /* Initialize security_ctx only for primary process*/
2329         security_instance = rte_malloc("rte_security_instances_ops",
2330                                 sizeof(struct rte_security_ctx), 0);
2331         if (security_instance == NULL)
2332                 return -ENOMEM;
2333         security_instance->device = (void *)cryptodev;
2334         security_instance->ops = &dpaa_sec_security_ops;
2335         security_instance->sess_cnt = 0;
2336         cryptodev->security_ctx = security_instance;
2337
2338         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2339                 /* init qman fq for queue pair */
2340                 qp = &internals->qps[i];
2341                 ret = dpaa_sec_init_tx(&qp->outq);
2342                 if (ret) {
2343                         PMD_INIT_LOG(ERR, "config tx of queue pair  %d", i);
2344                         goto init_error;
2345                 }
2346         }
2347
2348         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2349                 QMAN_FQ_FLAG_TO_DCPORTAL;
2350         for (i = 0; i < internals->max_nb_sessions; i++) {
2351                 /* create rx qman fq for sessions*/
2352                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2353                 if (unlikely(ret != 0)) {
2354                         PMD_INIT_LOG(ERR, "sec qman_create_fq failed");
2355                         goto init_error;
2356                 }
2357         }
2358
2359         PMD_INIT_LOG(DEBUG, "driver %s: created\n", cryptodev->data->name);
2360         return 0;
2361
2362 init_error:
2363         PMD_INIT_LOG(ERR, "driver %s: create failed\n", cryptodev->data->name);
2364
2365         dpaa_sec_uninit(cryptodev);
2366         return -EFAULT;
2367 }
2368
2369 static int
2370 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv,
2371                                 struct rte_dpaa_device *dpaa_dev)
2372 {
2373         struct rte_cryptodev *cryptodev;
2374         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2375
2376         int retval;
2377
2378         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2379
2380         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2381         if (cryptodev == NULL)
2382                 return -ENOMEM;
2383
2384         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2385                 cryptodev->data->dev_private = rte_zmalloc_socket(
2386                                         "cryptodev private structure",
2387                                         sizeof(struct dpaa_sec_dev_private),
2388                                         RTE_CACHE_LINE_SIZE,
2389                                         rte_socket_id());
2390
2391                 if (cryptodev->data->dev_private == NULL)
2392                         rte_panic("Cannot allocate memzone for private "
2393                                         "device data");
2394         }
2395
2396         dpaa_dev->crypto_dev = cryptodev;
2397         cryptodev->device = &dpaa_dev->device;
2398         cryptodev->device->driver = &dpaa_drv->driver;
2399
2400         /* init user callbacks */
2401         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2402
2403         /* if sec device version is not configured */
2404         if (!rta_get_sec_era()) {
2405                 const struct device_node *caam_node;
2406
2407                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2408                         const uint32_t *prop = of_get_property(caam_node,
2409                                         "fsl,sec-era",
2410                                         NULL);
2411                         if (prop) {
2412                                 rta_set_sec_era(
2413                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2414                                 break;
2415                         }
2416                 }
2417         }
2418
2419         /* Invoke PMD device initialization function */
2420         retval = dpaa_sec_dev_init(cryptodev);
2421         if (retval == 0)
2422                 return 0;
2423
2424         /* In case of error, cleanup is done */
2425         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2426                 rte_free(cryptodev->data->dev_private);
2427
2428         rte_cryptodev_pmd_release_device(cryptodev);
2429
2430         return -ENXIO;
2431 }
2432
2433 static int
2434 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2435 {
2436         struct rte_cryptodev *cryptodev;
2437         int ret;
2438
2439         cryptodev = dpaa_dev->crypto_dev;
2440         if (cryptodev == NULL)
2441                 return -ENODEV;
2442
2443         ret = dpaa_sec_uninit(cryptodev);
2444         if (ret)
2445                 return ret;
2446
2447         return rte_cryptodev_pmd_destroy(cryptodev);
2448 }
2449
2450 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2451         .drv_type = FSL_DPAA_CRYPTO,
2452         .driver = {
2453                 .name = "DPAA SEC PMD"
2454         },
2455         .probe = cryptodev_dpaa_sec_probe,
2456         .remove = cryptodev_dpaa_sec_remove,
2457 };
2458
2459 static struct cryptodev_driver dpaa_sec_crypto_drv;
2460
2461 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2462 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2463                 cryptodev_driver_id);