crypto/dpaa_sec: support null algos for protocol offload
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2018 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #include <rte_security_driver.h>
19 #include <rte_cycles.h>
20 #include <rte_dev.h>
21 #include <rte_kvargs.h>
22 #include <rte_malloc.h>
23 #include <rte_mbuf.h>
24 #include <rte_memcpy.h>
25 #include <rte_string_fns.h>
26 #include <rte_spinlock.h>
27
28 #include <fsl_usd.h>
29 #include <fsl_qman.h>
30 #include <of.h>
31
32 /* RTA header files */
33 #include <hw/desc/common.h>
34 #include <hw/desc/algo.h>
35 #include <hw/desc/ipsec.h>
36
37 #include <rte_dpaa_bus.h>
38 #include <dpaa_sec.h>
39 #include <dpaa_sec_log.h>
40
41 enum rta_sec_era rta_sec_era;
42
43 int dpaa_logtype_sec;
44
45 static uint8_t cryptodev_driver_id;
46
47 static __thread struct rte_crypto_op **dpaa_sec_ops;
48 static __thread int dpaa_sec_op_nb;
49
50 static int
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52
53 static inline void
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 {
56         if (!ctx->fd_status) {
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58         } else {
59                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61         }
62
63         /* report op status to sym->op and then free the ctx memeory  */
64         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
65 }
66
67 static inline struct dpaa_sec_op_ctx *
68 dpaa_sec_alloc_ctx(dpaa_sec_session *ses)
69 {
70         struct dpaa_sec_op_ctx *ctx;
71         int retval;
72
73         retval = rte_mempool_get(ses->ctx_pool, (void **)(&ctx));
74         if (!ctx || retval) {
75                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
76                 return NULL;
77         }
78         /*
79          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82          * each packet, memset is costlier than dcbz_64().
83          */
84         dcbz_64(&ctx->job.sg[SG_CACHELINE_0]);
85         dcbz_64(&ctx->job.sg[SG_CACHELINE_1]);
86         dcbz_64(&ctx->job.sg[SG_CACHELINE_2]);
87         dcbz_64(&ctx->job.sg[SG_CACHELINE_3]);
88
89         ctx->ctx_pool = ses->ctx_pool;
90         ctx->vtop_offset = (size_t) ctx
91                                 - rte_mempool_virt2iova(ctx);
92
93         return ctx;
94 }
95
96 static inline rte_iova_t
97 dpaa_mem_vtop(void *vaddr)
98 {
99         const struct rte_memseg *ms;
100
101         ms = rte_mem_virt2memseg(vaddr, NULL);
102         if (ms)
103                 return ms->iova + RTE_PTR_DIFF(vaddr, ms->addr);
104         return (size_t)NULL;
105 }
106
107 static inline void *
108 dpaa_mem_ptov(rte_iova_t paddr)
109 {
110         return rte_mem_iova2virt(paddr);
111 }
112
113 static void
114 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
115                    struct qman_fq *fq,
116                    const struct qm_mr_entry *msg)
117 {
118         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
119                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
120 }
121
122 /* initialize the queue with dest chan as caam chan so that
123  * all the packets in this queue could be dispatched into caam
124  */
125 static int
126 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
127                  uint32_t fqid_out)
128 {
129         struct qm_mcc_initfq fq_opts;
130         uint32_t flags;
131         int ret = -1;
132
133         /* Clear FQ options */
134         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
135
136         flags = QMAN_INITFQ_FLAG_SCHED;
137         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
138                           QM_INITFQ_WE_CONTEXTB;
139
140         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
141         fq_opts.fqd.context_b = fqid_out;
142         fq_opts.fqd.dest.channel = qm_channel_caam;
143         fq_opts.fqd.dest.wq = 0;
144
145         fq_in->cb.ern  = ern_sec_fq_handler;
146
147         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
148
149         ret = qman_init_fq(fq_in, flags, &fq_opts);
150         if (unlikely(ret != 0))
151                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
152
153         return ret;
154 }
155
156 /* something is put into in_fq and caam put the crypto result into out_fq */
157 static enum qman_cb_dqrr_result
158 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
159                   struct qman_fq *fq __always_unused,
160                   const struct qm_dqrr_entry *dqrr)
161 {
162         const struct qm_fd *fd;
163         struct dpaa_sec_job *job;
164         struct dpaa_sec_op_ctx *ctx;
165
166         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
167                 return qman_cb_dqrr_defer;
168
169         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
170                 return qman_cb_dqrr_consume;
171
172         fd = &dqrr->fd;
173         /* sg is embedded in an op ctx,
174          * sg[0] is for output
175          * sg[1] for input
176          */
177         job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
178
179         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
180         ctx->fd_status = fd->status;
181         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
182                 struct qm_sg_entry *sg_out;
183                 uint32_t len;
184
185                 sg_out = &job->sg[0];
186                 hw_sg_to_cpu(sg_out);
187                 len = sg_out->length;
188                 ctx->op->sym->m_src->pkt_len = len;
189                 ctx->op->sym->m_src->data_len = len;
190         }
191         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
192         dpaa_sec_op_ending(ctx);
193
194         return qman_cb_dqrr_consume;
195 }
196
197 /* caam result is put into this queue */
198 static int
199 dpaa_sec_init_tx(struct qman_fq *fq)
200 {
201         int ret;
202         struct qm_mcc_initfq opts;
203         uint32_t flags;
204
205         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
206                 QMAN_FQ_FLAG_DYNAMIC_FQID;
207
208         ret = qman_create_fq(0, flags, fq);
209         if (unlikely(ret)) {
210                 DPAA_SEC_ERR("qman_create_fq failed");
211                 return ret;
212         }
213
214         memset(&opts, 0, sizeof(opts));
215         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
216                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
217
218         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
219
220         fq->cb.dqrr = dqrr_out_fq_cb_rx;
221         fq->cb.ern  = ern_sec_fq_handler;
222
223         ret = qman_init_fq(fq, 0, &opts);
224         if (unlikely(ret)) {
225                 DPAA_SEC_ERR("unable to init caam source fq!");
226                 return ret;
227         }
228
229         return ret;
230 }
231
232 static inline int is_cipher_only(dpaa_sec_session *ses)
233 {
234         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
235                 (ses->auth_alg == RTE_CRYPTO_AUTH_NULL));
236 }
237
238 static inline int is_auth_only(dpaa_sec_session *ses)
239 {
240         return ((ses->cipher_alg == RTE_CRYPTO_CIPHER_NULL) &&
241                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL));
242 }
243
244 static inline int is_aead(dpaa_sec_session *ses)
245 {
246         return ((ses->cipher_alg == 0) &&
247                 (ses->auth_alg == 0) &&
248                 (ses->aead_alg != 0));
249 }
250
251 static inline int is_auth_cipher(dpaa_sec_session *ses)
252 {
253         return ((ses->cipher_alg != RTE_CRYPTO_CIPHER_NULL) &&
254                 (ses->auth_alg != RTE_CRYPTO_AUTH_NULL) &&
255                 (ses->proto_alg != RTE_SECURITY_PROTOCOL_IPSEC));
256 }
257
258 static inline int is_proto_ipsec(dpaa_sec_session *ses)
259 {
260         return (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC);
261 }
262
263 static inline int is_encode(dpaa_sec_session *ses)
264 {
265         return ses->dir == DIR_ENC;
266 }
267
268 static inline int is_decode(dpaa_sec_session *ses)
269 {
270         return ses->dir == DIR_DEC;
271 }
272
273 static inline void
274 caam_auth_alg(dpaa_sec_session *ses, struct alginfo *alginfo_a)
275 {
276         switch (ses->auth_alg) {
277         case RTE_CRYPTO_AUTH_NULL:
278                 alginfo_a->algtype =
279                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
280                         OP_PCL_IPSEC_HMAC_NULL : 0;
281                 ses->digest_length = 0;
282                 break;
283         case RTE_CRYPTO_AUTH_MD5_HMAC:
284                 alginfo_a->algtype =
285                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
286                         OP_PCL_IPSEC_HMAC_MD5_96 : OP_ALG_ALGSEL_MD5;
287                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
288                 break;
289         case RTE_CRYPTO_AUTH_SHA1_HMAC:
290                 alginfo_a->algtype =
291                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
292                         OP_PCL_IPSEC_HMAC_SHA1_96 : OP_ALG_ALGSEL_SHA1;
293                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
294                 break;
295         case RTE_CRYPTO_AUTH_SHA224_HMAC:
296                 alginfo_a->algtype =
297                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
298                         OP_PCL_IPSEC_HMAC_SHA1_160 : OP_ALG_ALGSEL_SHA224;
299                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
300                 break;
301         case RTE_CRYPTO_AUTH_SHA256_HMAC:
302                 alginfo_a->algtype =
303                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
304                         OP_PCL_IPSEC_HMAC_SHA2_256_128 : OP_ALG_ALGSEL_SHA256;
305                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
306                 break;
307         case RTE_CRYPTO_AUTH_SHA384_HMAC:
308                 alginfo_a->algtype =
309                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
310                         OP_PCL_IPSEC_HMAC_SHA2_384_192 : OP_ALG_ALGSEL_SHA384;
311                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
312                 break;
313         case RTE_CRYPTO_AUTH_SHA512_HMAC:
314                 alginfo_a->algtype =
315                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
316                         OP_PCL_IPSEC_HMAC_SHA2_512_256 : OP_ALG_ALGSEL_SHA512;
317                 alginfo_a->algmode = OP_ALG_AAI_HMAC;
318                 break;
319         default:
320                 DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
321         }
322 }
323
324 static inline void
325 caam_cipher_alg(dpaa_sec_session *ses, struct alginfo *alginfo_c)
326 {
327         switch (ses->cipher_alg) {
328         case RTE_CRYPTO_CIPHER_NULL:
329                 alginfo_c->algtype =
330                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
331                         OP_PCL_IPSEC_NULL : 0;
332                 break;
333         case RTE_CRYPTO_CIPHER_AES_CBC:
334                 alginfo_c->algtype =
335                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
336                         OP_PCL_IPSEC_AES_CBC : OP_ALG_ALGSEL_AES;
337                 alginfo_c->algmode = OP_ALG_AAI_CBC;
338                 break;
339         case RTE_CRYPTO_CIPHER_3DES_CBC:
340                 alginfo_c->algtype =
341                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
342                         OP_PCL_IPSEC_3DES : OP_ALG_ALGSEL_3DES;
343                 alginfo_c->algmode = OP_ALG_AAI_CBC;
344                 break;
345         case RTE_CRYPTO_CIPHER_AES_CTR:
346                 alginfo_c->algtype =
347                         (ses->proto_alg == RTE_SECURITY_PROTOCOL_IPSEC) ?
348                         OP_PCL_IPSEC_AES_CTR : OP_ALG_ALGSEL_AES;
349                 alginfo_c->algmode = OP_ALG_AAI_CTR;
350                 break;
351         default:
352                 DPAA_SEC_ERR("unsupported cipher alg %d", ses->cipher_alg);
353         }
354 }
355
356 static inline void
357 caam_aead_alg(dpaa_sec_session *ses, struct alginfo *alginfo)
358 {
359         switch (ses->aead_alg) {
360         case RTE_CRYPTO_AEAD_AES_GCM:
361                 alginfo->algtype = OP_ALG_ALGSEL_AES;
362                 alginfo->algmode = OP_ALG_AAI_GCM;
363                 break;
364         default:
365                 DPAA_SEC_ERR("unsupported AEAD alg %d", ses->aead_alg);
366         }
367 }
368
369 /* prepare ipsec proto command block of the session */
370 static int
371 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
372 {
373         struct alginfo cipherdata = {0}, authdata = {0};
374         struct sec_cdb *cdb = &ses->cdb;
375         int32_t shared_desc_len = 0;
376         int err;
377 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
378         int swap = false;
379 #else
380         int swap = true;
381 #endif
382
383         caam_cipher_alg(ses, &cipherdata);
384         if (cipherdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
385                 DPAA_SEC_ERR("not supported cipher alg");
386                 return -ENOTSUP;
387         }
388
389         cipherdata.key = (size_t)ses->cipher_key.data;
390         cipherdata.keylen = ses->cipher_key.length;
391         cipherdata.key_enc_flags = 0;
392         cipherdata.key_type = RTA_DATA_IMM;
393
394         caam_auth_alg(ses, &authdata);
395         if (authdata.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
396                 DPAA_SEC_ERR("not supported auth alg");
397                 return -ENOTSUP;
398         }
399
400         authdata.key = (size_t)ses->auth_key.data;
401         authdata.keylen = ses->auth_key.length;
402         authdata.key_enc_flags = 0;
403         authdata.key_type = RTA_DATA_IMM;
404
405         cdb->sh_desc[0] = cipherdata.keylen;
406         cdb->sh_desc[1] = authdata.keylen;
407         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
408                                MIN_JOB_DESC_SIZE,
409                                (unsigned int *)cdb->sh_desc,
410                                &cdb->sh_desc[2], 2);
411
412         if (err < 0) {
413                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
414                 return err;
415         }
416         if (cdb->sh_desc[2] & 1)
417                 cipherdata.key_type = RTA_DATA_IMM;
418         else {
419                 cipherdata.key = (size_t)dpaa_mem_vtop(
420                                         (void *)(size_t)cipherdata.key);
421                 cipherdata.key_type = RTA_DATA_PTR;
422         }
423         if (cdb->sh_desc[2] & (1<<1))
424                 authdata.key_type = RTA_DATA_IMM;
425         else {
426                 authdata.key = (size_t)dpaa_mem_vtop(
427                                         (void *)(size_t)authdata.key);
428                 authdata.key_type = RTA_DATA_PTR;
429         }
430
431         cdb->sh_desc[0] = 0;
432         cdb->sh_desc[1] = 0;
433         cdb->sh_desc[2] = 0;
434         if (ses->dir == DIR_ENC) {
435                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
436                                 cdb->sh_desc,
437                                 true, swap, SHR_SERIAL,
438                                 &ses->encap_pdb,
439                                 (uint8_t *)&ses->ip4_hdr,
440                                 &cipherdata, &authdata);
441         } else if (ses->dir == DIR_DEC) {
442                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
443                                 cdb->sh_desc,
444                                 true, swap, SHR_SERIAL,
445                                 &ses->decap_pdb,
446                                 &cipherdata, &authdata);
447         }
448         return shared_desc_len;
449 }
450
451 /* prepare command block of the session */
452 static int
453 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
454 {
455         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
456         int32_t shared_desc_len = 0;
457         struct sec_cdb *cdb = &ses->cdb;
458         int err;
459 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
460         int swap = false;
461 #else
462         int swap = true;
463 #endif
464
465         memset(cdb, 0, sizeof(struct sec_cdb));
466
467         if (is_proto_ipsec(ses)) {
468                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
469         } else if (is_cipher_only(ses)) {
470                 caam_cipher_alg(ses, &alginfo_c);
471                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
472                         DPAA_SEC_ERR("not supported cipher alg");
473                         return -ENOTSUP;
474                 }
475
476                 alginfo_c.key = (size_t)ses->cipher_key.data;
477                 alginfo_c.keylen = ses->cipher_key.length;
478                 alginfo_c.key_enc_flags = 0;
479                 alginfo_c.key_type = RTA_DATA_IMM;
480
481                 shared_desc_len = cnstr_shdsc_blkcipher(
482                                                 cdb->sh_desc, true,
483                                                 swap, &alginfo_c,
484                                                 NULL,
485                                                 ses->iv.length,
486                                                 ses->dir);
487         } else if (is_auth_only(ses)) {
488                 caam_auth_alg(ses, &alginfo_a);
489                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
490                         DPAA_SEC_ERR("not supported auth alg");
491                         return -ENOTSUP;
492                 }
493
494                 alginfo_a.key = (size_t)ses->auth_key.data;
495                 alginfo_a.keylen = ses->auth_key.length;
496                 alginfo_a.key_enc_flags = 0;
497                 alginfo_a.key_type = RTA_DATA_IMM;
498
499                 shared_desc_len = cnstr_shdsc_hmac(cdb->sh_desc, true,
500                                                    swap, &alginfo_a,
501                                                    !ses->dir,
502                                                    ses->digest_length);
503         } else if (is_aead(ses)) {
504                 caam_aead_alg(ses, &alginfo);
505                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
506                         DPAA_SEC_ERR("not supported aead alg");
507                         return -ENOTSUP;
508                 }
509                 alginfo.key = (size_t)ses->aead_key.data;
510                 alginfo.keylen = ses->aead_key.length;
511                 alginfo.key_enc_flags = 0;
512                 alginfo.key_type = RTA_DATA_IMM;
513
514                 if (ses->dir == DIR_ENC)
515                         shared_desc_len = cnstr_shdsc_gcm_encap(
516                                         cdb->sh_desc, true, swap,
517                                         &alginfo,
518                                         ses->iv.length,
519                                         ses->digest_length);
520                 else
521                         shared_desc_len = cnstr_shdsc_gcm_decap(
522                                         cdb->sh_desc, true, swap,
523                                         &alginfo,
524                                         ses->iv.length,
525                                         ses->digest_length);
526         } else {
527                 caam_cipher_alg(ses, &alginfo_c);
528                 if (alginfo_c.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
529                         DPAA_SEC_ERR("not supported cipher alg");
530                         return -ENOTSUP;
531                 }
532
533                 alginfo_c.key = (size_t)ses->cipher_key.data;
534                 alginfo_c.keylen = ses->cipher_key.length;
535                 alginfo_c.key_enc_flags = 0;
536                 alginfo_c.key_type = RTA_DATA_IMM;
537
538                 caam_auth_alg(ses, &alginfo_a);
539                 if (alginfo_a.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
540                         DPAA_SEC_ERR("not supported auth alg");
541                         return -ENOTSUP;
542                 }
543
544                 alginfo_a.key = (size_t)ses->auth_key.data;
545                 alginfo_a.keylen = ses->auth_key.length;
546                 alginfo_a.key_enc_flags = 0;
547                 alginfo_a.key_type = RTA_DATA_IMM;
548
549                 cdb->sh_desc[0] = alginfo_c.keylen;
550                 cdb->sh_desc[1] = alginfo_a.keylen;
551                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
552                                        MIN_JOB_DESC_SIZE,
553                                        (unsigned int *)cdb->sh_desc,
554                                        &cdb->sh_desc[2], 2);
555
556                 if (err < 0) {
557                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
558                         return err;
559                 }
560                 if (cdb->sh_desc[2] & 1)
561                         alginfo_c.key_type = RTA_DATA_IMM;
562                 else {
563                         alginfo_c.key = (size_t)dpaa_mem_vtop(
564                                                 (void *)(size_t)alginfo_c.key);
565                         alginfo_c.key_type = RTA_DATA_PTR;
566                 }
567                 if (cdb->sh_desc[2] & (1<<1))
568                         alginfo_a.key_type = RTA_DATA_IMM;
569                 else {
570                         alginfo_a.key = (size_t)dpaa_mem_vtop(
571                                                 (void *)(size_t)alginfo_a.key);
572                         alginfo_a.key_type = RTA_DATA_PTR;
573                 }
574                 cdb->sh_desc[0] = 0;
575                 cdb->sh_desc[1] = 0;
576                 cdb->sh_desc[2] = 0;
577                 /* Auth_only_len is set as 0 here and it will be
578                  * overwritten in fd for each packet.
579                  */
580                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
581                                 true, swap, &alginfo_c, &alginfo_a,
582                                 ses->iv.length, 0,
583                                 ses->digest_length, ses->dir);
584         }
585
586         if (shared_desc_len < 0) {
587                 DPAA_SEC_ERR("error in preparing command block");
588                 return shared_desc_len;
589         }
590
591         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
592         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
593         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
594
595         return 0;
596 }
597
598 /* qp is lockless, should be accessed by only one thread */
599 static int
600 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
601 {
602         struct qman_fq *fq;
603         unsigned int pkts = 0;
604         int num_rx_bufs, ret;
605         struct qm_dqrr_entry *dq;
606         uint32_t vdqcr_flags = 0;
607
608         fq = &qp->outq;
609         /*
610          * Until request for four buffers, we provide exact number of buffers.
611          * Otherwise we do not set the QM_VDQCR_EXACT flag.
612          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
613          * requested, so we request two less in this case.
614          */
615         if (nb_ops < 4) {
616                 vdqcr_flags = QM_VDQCR_EXACT;
617                 num_rx_bufs = nb_ops;
618         } else {
619                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
620                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
621         }
622         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
623         if (ret)
624                 return 0;
625
626         do {
627                 const struct qm_fd *fd;
628                 struct dpaa_sec_job *job;
629                 struct dpaa_sec_op_ctx *ctx;
630                 struct rte_crypto_op *op;
631
632                 dq = qman_dequeue(fq);
633                 if (!dq)
634                         continue;
635
636                 fd = &dq->fd;
637                 /* sg is embedded in an op ctx,
638                  * sg[0] is for output
639                  * sg[1] for input
640                  */
641                 job = dpaa_mem_ptov(qm_fd_addr_get64(fd));
642
643                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
644                 ctx->fd_status = fd->status;
645                 op = ctx->op;
646                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
647                         struct qm_sg_entry *sg_out;
648                         uint32_t len;
649
650                         sg_out = &job->sg[0];
651                         hw_sg_to_cpu(sg_out);
652                         len = sg_out->length;
653                         op->sym->m_src->pkt_len = len;
654                         op->sym->m_src->data_len = len;
655                 }
656                 if (!ctx->fd_status) {
657                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
658                 } else {
659                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
660                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
661                 }
662                 ops[pkts++] = op;
663
664                 /* report op status to sym->op and then free the ctx memeory */
665                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
666
667                 qman_dqrr_consume(fq, dq);
668         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
669
670         return pkts;
671 }
672
673 static inline struct dpaa_sec_job *
674 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
675 {
676         struct rte_crypto_sym_op *sym = op->sym;
677         struct rte_mbuf *mbuf = sym->m_src;
678         struct dpaa_sec_job *cf;
679         struct dpaa_sec_op_ctx *ctx;
680         struct qm_sg_entry *sg, *out_sg, *in_sg;
681         phys_addr_t start_addr;
682         uint8_t *old_digest, extra_segs;
683
684         if (is_decode(ses))
685                 extra_segs = 3;
686         else
687                 extra_segs = 2;
688
689         if ((mbuf->nb_segs + extra_segs) > MAX_SG_ENTRIES) {
690                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
691                                 MAX_SG_ENTRIES);
692                 return NULL;
693         }
694         ctx = dpaa_sec_alloc_ctx(ses);
695         if (!ctx)
696                 return NULL;
697
698         cf = &ctx->job;
699         ctx->op = op;
700         old_digest = ctx->digest;
701
702         /* output */
703         out_sg = &cf->sg[0];
704         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
705         out_sg->length = ses->digest_length;
706         cpu_to_hw_sg(out_sg);
707
708         /* input */
709         in_sg = &cf->sg[1];
710         /* need to extend the input to a compound frame */
711         in_sg->extension = 1;
712         in_sg->final = 1;
713         in_sg->length = sym->auth.data.length;
714         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(&cf->sg[2]));
715
716         /* 1st seg */
717         sg = in_sg + 1;
718         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
719         sg->length = mbuf->data_len - sym->auth.data.offset;
720         sg->offset = sym->auth.data.offset;
721
722         /* Successive segs */
723         mbuf = mbuf->next;
724         while (mbuf) {
725                 cpu_to_hw_sg(sg);
726                 sg++;
727                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
728                 sg->length = mbuf->data_len;
729                 mbuf = mbuf->next;
730         }
731
732         if (is_decode(ses)) {
733                 /* Digest verification case */
734                 cpu_to_hw_sg(sg);
735                 sg++;
736                 rte_memcpy(old_digest, sym->auth.digest.data,
737                                 ses->digest_length);
738                 start_addr = dpaa_mem_vtop(old_digest);
739                 qm_sg_entry_set64(sg, start_addr);
740                 sg->length = ses->digest_length;
741                 in_sg->length += ses->digest_length;
742         } else {
743                 /* Digest calculation case */
744                 sg->length -= ses->digest_length;
745         }
746         sg->final = 1;
747         cpu_to_hw_sg(sg);
748         cpu_to_hw_sg(in_sg);
749
750         return cf;
751 }
752
753 /**
754  * packet looks like:
755  *              |<----data_len------->|
756  *    |ip_header|ah_header|icv|payload|
757  *              ^
758  *              |
759  *         mbuf->pkt.data
760  */
761 static inline struct dpaa_sec_job *
762 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
763 {
764         struct rte_crypto_sym_op *sym = op->sym;
765         struct rte_mbuf *mbuf = sym->m_src;
766         struct dpaa_sec_job *cf;
767         struct dpaa_sec_op_ctx *ctx;
768         struct qm_sg_entry *sg;
769         rte_iova_t start_addr;
770         uint8_t *old_digest;
771
772         ctx = dpaa_sec_alloc_ctx(ses);
773         if (!ctx)
774                 return NULL;
775
776         cf = &ctx->job;
777         ctx->op = op;
778         old_digest = ctx->digest;
779
780         start_addr = rte_pktmbuf_iova(mbuf);
781         /* output */
782         sg = &cf->sg[0];
783         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
784         sg->length = ses->digest_length;
785         cpu_to_hw_sg(sg);
786
787         /* input */
788         sg = &cf->sg[1];
789         if (is_decode(ses)) {
790                 /* need to extend the input to a compound frame */
791                 sg->extension = 1;
792                 qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
793                 sg->length = sym->auth.data.length + ses->digest_length;
794                 sg->final = 1;
795                 cpu_to_hw_sg(sg);
796
797                 sg = &cf->sg[2];
798                 /* hash result or digest, save digest first */
799                 rte_memcpy(old_digest, sym->auth.digest.data,
800                            ses->digest_length);
801                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
802                 sg->length = sym->auth.data.length;
803                 cpu_to_hw_sg(sg);
804
805                 /* let's check digest by hw */
806                 start_addr = dpaa_mem_vtop(old_digest);
807                 sg++;
808                 qm_sg_entry_set64(sg, start_addr);
809                 sg->length = ses->digest_length;
810                 sg->final = 1;
811                 cpu_to_hw_sg(sg);
812         } else {
813                 qm_sg_entry_set64(sg, start_addr + sym->auth.data.offset);
814                 sg->length = sym->auth.data.length;
815                 sg->final = 1;
816                 cpu_to_hw_sg(sg);
817         }
818
819         return cf;
820 }
821
822 static inline struct dpaa_sec_job *
823 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
824 {
825         struct rte_crypto_sym_op *sym = op->sym;
826         struct dpaa_sec_job *cf;
827         struct dpaa_sec_op_ctx *ctx;
828         struct qm_sg_entry *sg, *out_sg, *in_sg;
829         struct rte_mbuf *mbuf;
830         uint8_t req_segs;
831         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
832                         ses->iv.offset);
833
834         if (sym->m_dst) {
835                 mbuf = sym->m_dst;
836                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
837         } else {
838                 mbuf = sym->m_src;
839                 req_segs = mbuf->nb_segs * 2 + 3;
840         }
841
842         if (req_segs > MAX_SG_ENTRIES) {
843                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
844                                 MAX_SG_ENTRIES);
845                 return NULL;
846         }
847
848         ctx = dpaa_sec_alloc_ctx(ses);
849         if (!ctx)
850                 return NULL;
851
852         cf = &ctx->job;
853         ctx->op = op;
854
855         /* output */
856         out_sg = &cf->sg[0];
857         out_sg->extension = 1;
858         out_sg->length = sym->cipher.data.length;
859         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(&cf->sg[2]));
860         cpu_to_hw_sg(out_sg);
861
862         /* 1st seg */
863         sg = &cf->sg[2];
864         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
865         sg->length = mbuf->data_len - sym->cipher.data.offset;
866         sg->offset = sym->cipher.data.offset;
867
868         /* Successive segs */
869         mbuf = mbuf->next;
870         while (mbuf) {
871                 cpu_to_hw_sg(sg);
872                 sg++;
873                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
874                 sg->length = mbuf->data_len;
875                 mbuf = mbuf->next;
876         }
877         sg->final = 1;
878         cpu_to_hw_sg(sg);
879
880         /* input */
881         mbuf = sym->m_src;
882         in_sg = &cf->sg[1];
883         in_sg->extension = 1;
884         in_sg->final = 1;
885         in_sg->length = sym->cipher.data.length + ses->iv.length;
886
887         sg++;
888         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
889         cpu_to_hw_sg(in_sg);
890
891         /* IV */
892         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
893         sg->length = ses->iv.length;
894         cpu_to_hw_sg(sg);
895
896         /* 1st seg */
897         sg++;
898         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
899         sg->length = mbuf->data_len - sym->cipher.data.offset;
900         sg->offset = sym->cipher.data.offset;
901
902         /* Successive segs */
903         mbuf = mbuf->next;
904         while (mbuf) {
905                 cpu_to_hw_sg(sg);
906                 sg++;
907                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
908                 sg->length = mbuf->data_len;
909                 mbuf = mbuf->next;
910         }
911         sg->final = 1;
912         cpu_to_hw_sg(sg);
913
914         return cf;
915 }
916
917 static inline struct dpaa_sec_job *
918 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
919 {
920         struct rte_crypto_sym_op *sym = op->sym;
921         struct dpaa_sec_job *cf;
922         struct dpaa_sec_op_ctx *ctx;
923         struct qm_sg_entry *sg;
924         rte_iova_t src_start_addr, dst_start_addr;
925         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
926                         ses->iv.offset);
927
928         ctx = dpaa_sec_alloc_ctx(ses);
929         if (!ctx)
930                 return NULL;
931
932         cf = &ctx->job;
933         ctx->op = op;
934
935         src_start_addr = rte_pktmbuf_iova(sym->m_src);
936
937         if (sym->m_dst)
938                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
939         else
940                 dst_start_addr = src_start_addr;
941
942         /* output */
943         sg = &cf->sg[0];
944         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
945         sg->length = sym->cipher.data.length + ses->iv.length;
946         cpu_to_hw_sg(sg);
947
948         /* input */
949         sg = &cf->sg[1];
950
951         /* need to extend the input to a compound frame */
952         sg->extension = 1;
953         sg->final = 1;
954         sg->length = sym->cipher.data.length + ses->iv.length;
955         qm_sg_entry_set64(sg, dpaa_mem_vtop(&cf->sg[2]));
956         cpu_to_hw_sg(sg);
957
958         sg = &cf->sg[2];
959         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
960         sg->length = ses->iv.length;
961         cpu_to_hw_sg(sg);
962
963         sg++;
964         qm_sg_entry_set64(sg, src_start_addr + sym->cipher.data.offset);
965         sg->length = sym->cipher.data.length;
966         sg->final = 1;
967         cpu_to_hw_sg(sg);
968
969         return cf;
970 }
971
972 static inline struct dpaa_sec_job *
973 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
974 {
975         struct rte_crypto_sym_op *sym = op->sym;
976         struct dpaa_sec_job *cf;
977         struct dpaa_sec_op_ctx *ctx;
978         struct qm_sg_entry *sg, *out_sg, *in_sg;
979         struct rte_mbuf *mbuf;
980         uint8_t req_segs;
981         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
982                         ses->iv.offset);
983
984         if (sym->m_dst) {
985                 mbuf = sym->m_dst;
986                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
987         } else {
988                 mbuf = sym->m_src;
989                 req_segs = mbuf->nb_segs * 2 + 4;
990         }
991
992         if (ses->auth_only_len)
993                 req_segs++;
994
995         if (req_segs > MAX_SG_ENTRIES) {
996                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
997                                 MAX_SG_ENTRIES);
998                 return NULL;
999         }
1000
1001         ctx = dpaa_sec_alloc_ctx(ses);
1002         if (!ctx)
1003                 return NULL;
1004
1005         cf = &ctx->job;
1006         ctx->op = op;
1007
1008         rte_prefetch0(cf->sg);
1009
1010         /* output */
1011         out_sg = &cf->sg[0];
1012         out_sg->extension = 1;
1013         if (is_encode(ses))
1014                 out_sg->length = sym->aead.data.length + ses->auth_only_len
1015                                                 + ses->digest_length;
1016         else
1017                 out_sg->length = sym->aead.data.length + ses->auth_only_len;
1018
1019         /* output sg entries */
1020         sg = &cf->sg[2];
1021         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1022         cpu_to_hw_sg(out_sg);
1023
1024         /* 1st seg */
1025         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1026         sg->length = mbuf->data_len - sym->aead.data.offset +
1027                                         ses->auth_only_len;
1028         sg->offset = sym->aead.data.offset - ses->auth_only_len;
1029
1030         /* Successive segs */
1031         mbuf = mbuf->next;
1032         while (mbuf) {
1033                 cpu_to_hw_sg(sg);
1034                 sg++;
1035                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1036                 sg->length = mbuf->data_len;
1037                 mbuf = mbuf->next;
1038         }
1039         sg->length -= ses->digest_length;
1040
1041         if (is_encode(ses)) {
1042                 cpu_to_hw_sg(sg);
1043                 /* set auth output */
1044                 sg++;
1045                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1046                 sg->length = ses->digest_length;
1047         }
1048         sg->final = 1;
1049         cpu_to_hw_sg(sg);
1050
1051         /* input */
1052         mbuf = sym->m_src;
1053         in_sg = &cf->sg[1];
1054         in_sg->extension = 1;
1055         in_sg->final = 1;
1056         if (is_encode(ses))
1057                 in_sg->length = ses->iv.length + sym->aead.data.length
1058                                                         + ses->auth_only_len;
1059         else
1060                 in_sg->length = ses->iv.length + sym->aead.data.length
1061                                 + ses->auth_only_len + ses->digest_length;
1062
1063         /* input sg entries */
1064         sg++;
1065         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1066         cpu_to_hw_sg(in_sg);
1067
1068         /* 1st seg IV */
1069         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1070         sg->length = ses->iv.length;
1071         cpu_to_hw_sg(sg);
1072
1073         /* 2nd seg auth only */
1074         if (ses->auth_only_len) {
1075                 sg++;
1076                 qm_sg_entry_set64(sg, dpaa_mem_vtop(sym->aead.aad.data));
1077                 sg->length = ses->auth_only_len;
1078                 cpu_to_hw_sg(sg);
1079         }
1080
1081         /* 3rd seg */
1082         sg++;
1083         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1084         sg->length = mbuf->data_len - sym->aead.data.offset;
1085         sg->offset = sym->aead.data.offset;
1086
1087         /* Successive segs */
1088         mbuf = mbuf->next;
1089         while (mbuf) {
1090                 cpu_to_hw_sg(sg);
1091                 sg++;
1092                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1093                 sg->length = mbuf->data_len;
1094                 mbuf = mbuf->next;
1095         }
1096
1097         if (is_decode(ses)) {
1098                 cpu_to_hw_sg(sg);
1099                 sg++;
1100                 memcpy(ctx->digest, sym->aead.digest.data,
1101                         ses->digest_length);
1102                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1103                 sg->length = ses->digest_length;
1104         }
1105         sg->final = 1;
1106         cpu_to_hw_sg(sg);
1107
1108         return cf;
1109 }
1110
1111 static inline struct dpaa_sec_job *
1112 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1113 {
1114         struct rte_crypto_sym_op *sym = op->sym;
1115         struct dpaa_sec_job *cf;
1116         struct dpaa_sec_op_ctx *ctx;
1117         struct qm_sg_entry *sg;
1118         uint32_t length = 0;
1119         rte_iova_t src_start_addr, dst_start_addr;
1120         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1121                         ses->iv.offset);
1122
1123         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1124
1125         if (sym->m_dst)
1126                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1127         else
1128                 dst_start_addr = src_start_addr;
1129
1130         ctx = dpaa_sec_alloc_ctx(ses);
1131         if (!ctx)
1132                 return NULL;
1133
1134         cf = &ctx->job;
1135         ctx->op = op;
1136
1137         /* input */
1138         rte_prefetch0(cf->sg);
1139         sg = &cf->sg[2];
1140         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1141         if (is_encode(ses)) {
1142                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1143                 sg->length = ses->iv.length;
1144                 length += sg->length;
1145                 cpu_to_hw_sg(sg);
1146
1147                 sg++;
1148                 if (ses->auth_only_len) {
1149                         qm_sg_entry_set64(sg,
1150                                           dpaa_mem_vtop(sym->aead.aad.data));
1151                         sg->length = ses->auth_only_len;
1152                         length += sg->length;
1153                         cpu_to_hw_sg(sg);
1154                         sg++;
1155                 }
1156                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1157                 sg->length = sym->aead.data.length;
1158                 length += sg->length;
1159                 sg->final = 1;
1160                 cpu_to_hw_sg(sg);
1161         } else {
1162                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1163                 sg->length = ses->iv.length;
1164                 length += sg->length;
1165                 cpu_to_hw_sg(sg);
1166
1167                 sg++;
1168                 if (ses->auth_only_len) {
1169                         qm_sg_entry_set64(sg,
1170                                           dpaa_mem_vtop(sym->aead.aad.data));
1171                         sg->length = ses->auth_only_len;
1172                         length += sg->length;
1173                         cpu_to_hw_sg(sg);
1174                         sg++;
1175                 }
1176                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1177                 sg->length = sym->aead.data.length;
1178                 length += sg->length;
1179                 cpu_to_hw_sg(sg);
1180
1181                 memcpy(ctx->digest, sym->aead.digest.data,
1182                        ses->digest_length);
1183                 sg++;
1184
1185                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1186                 sg->length = ses->digest_length;
1187                 length += sg->length;
1188                 sg->final = 1;
1189                 cpu_to_hw_sg(sg);
1190         }
1191         /* input compound frame */
1192         cf->sg[1].length = length;
1193         cf->sg[1].extension = 1;
1194         cf->sg[1].final = 1;
1195         cpu_to_hw_sg(&cf->sg[1]);
1196
1197         /* output */
1198         sg++;
1199         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1200         qm_sg_entry_set64(sg,
1201                 dst_start_addr + sym->aead.data.offset - ses->auth_only_len);
1202         sg->length = sym->aead.data.length + ses->auth_only_len;
1203         length = sg->length;
1204         if (is_encode(ses)) {
1205                 cpu_to_hw_sg(sg);
1206                 /* set auth output */
1207                 sg++;
1208                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1209                 sg->length = ses->digest_length;
1210                 length += sg->length;
1211         }
1212         sg->final = 1;
1213         cpu_to_hw_sg(sg);
1214
1215         /* output compound frame */
1216         cf->sg[0].length = length;
1217         cf->sg[0].extension = 1;
1218         cpu_to_hw_sg(&cf->sg[0]);
1219
1220         return cf;
1221 }
1222
1223 static inline struct dpaa_sec_job *
1224 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1225 {
1226         struct rte_crypto_sym_op *sym = op->sym;
1227         struct dpaa_sec_job *cf;
1228         struct dpaa_sec_op_ctx *ctx;
1229         struct qm_sg_entry *sg, *out_sg, *in_sg;
1230         struct rte_mbuf *mbuf;
1231         uint8_t req_segs;
1232         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1233                         ses->iv.offset);
1234
1235         if (sym->m_dst) {
1236                 mbuf = sym->m_dst;
1237                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1238         } else {
1239                 mbuf = sym->m_src;
1240                 req_segs = mbuf->nb_segs * 2 + 4;
1241         }
1242
1243         if (req_segs > MAX_SG_ENTRIES) {
1244                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1245                                 MAX_SG_ENTRIES);
1246                 return NULL;
1247         }
1248
1249         ctx = dpaa_sec_alloc_ctx(ses);
1250         if (!ctx)
1251                 return NULL;
1252
1253         cf = &ctx->job;
1254         ctx->op = op;
1255
1256         rte_prefetch0(cf->sg);
1257
1258         /* output */
1259         out_sg = &cf->sg[0];
1260         out_sg->extension = 1;
1261         if (is_encode(ses))
1262                 out_sg->length = sym->auth.data.length + ses->digest_length;
1263         else
1264                 out_sg->length = sym->auth.data.length;
1265
1266         /* output sg entries */
1267         sg = &cf->sg[2];
1268         qm_sg_entry_set64(out_sg, dpaa_mem_vtop(sg));
1269         cpu_to_hw_sg(out_sg);
1270
1271         /* 1st seg */
1272         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1273         sg->length = mbuf->data_len - sym->auth.data.offset;
1274         sg->offset = sym->auth.data.offset;
1275
1276         /* Successive segs */
1277         mbuf = mbuf->next;
1278         while (mbuf) {
1279                 cpu_to_hw_sg(sg);
1280                 sg++;
1281                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1282                 sg->length = mbuf->data_len;
1283                 mbuf = mbuf->next;
1284         }
1285         sg->length -= ses->digest_length;
1286
1287         if (is_encode(ses)) {
1288                 cpu_to_hw_sg(sg);
1289                 /* set auth output */
1290                 sg++;
1291                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1292                 sg->length = ses->digest_length;
1293         }
1294         sg->final = 1;
1295         cpu_to_hw_sg(sg);
1296
1297         /* input */
1298         mbuf = sym->m_src;
1299         in_sg = &cf->sg[1];
1300         in_sg->extension = 1;
1301         in_sg->final = 1;
1302         if (is_encode(ses))
1303                 in_sg->length = ses->iv.length + sym->auth.data.length;
1304         else
1305                 in_sg->length = ses->iv.length + sym->auth.data.length
1306                                                 + ses->digest_length;
1307
1308         /* input sg entries */
1309         sg++;
1310         qm_sg_entry_set64(in_sg, dpaa_mem_vtop(sg));
1311         cpu_to_hw_sg(in_sg);
1312
1313         /* 1st seg IV */
1314         qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1315         sg->length = ses->iv.length;
1316         cpu_to_hw_sg(sg);
1317
1318         /* 2nd seg */
1319         sg++;
1320         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1321         sg->length = mbuf->data_len - sym->auth.data.offset;
1322         sg->offset = sym->auth.data.offset;
1323
1324         /* Successive segs */
1325         mbuf = mbuf->next;
1326         while (mbuf) {
1327                 cpu_to_hw_sg(sg);
1328                 sg++;
1329                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1330                 sg->length = mbuf->data_len;
1331                 mbuf = mbuf->next;
1332         }
1333
1334         sg->length -= ses->digest_length;
1335         if (is_decode(ses)) {
1336                 cpu_to_hw_sg(sg);
1337                 sg++;
1338                 memcpy(ctx->digest, sym->auth.digest.data,
1339                         ses->digest_length);
1340                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1341                 sg->length = ses->digest_length;
1342         }
1343         sg->final = 1;
1344         cpu_to_hw_sg(sg);
1345
1346         return cf;
1347 }
1348
1349 static inline struct dpaa_sec_job *
1350 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1351 {
1352         struct rte_crypto_sym_op *sym = op->sym;
1353         struct dpaa_sec_job *cf;
1354         struct dpaa_sec_op_ctx *ctx;
1355         struct qm_sg_entry *sg;
1356         rte_iova_t src_start_addr, dst_start_addr;
1357         uint32_t length = 0;
1358         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1359                         ses->iv.offset);
1360
1361         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1362         if (sym->m_dst)
1363                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1364         else
1365                 dst_start_addr = src_start_addr;
1366
1367         ctx = dpaa_sec_alloc_ctx(ses);
1368         if (!ctx)
1369                 return NULL;
1370
1371         cf = &ctx->job;
1372         ctx->op = op;
1373
1374         /* input */
1375         rte_prefetch0(cf->sg);
1376         sg = &cf->sg[2];
1377         qm_sg_entry_set64(&cf->sg[1], dpaa_mem_vtop(sg));
1378         if (is_encode(ses)) {
1379                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1380                 sg->length = ses->iv.length;
1381                 length += sg->length;
1382                 cpu_to_hw_sg(sg);
1383
1384                 sg++;
1385                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1386                 sg->length = sym->auth.data.length;
1387                 length += sg->length;
1388                 sg->final = 1;
1389                 cpu_to_hw_sg(sg);
1390         } else {
1391                 qm_sg_entry_set64(sg, dpaa_mem_vtop(IV_ptr));
1392                 sg->length = ses->iv.length;
1393                 length += sg->length;
1394                 cpu_to_hw_sg(sg);
1395
1396                 sg++;
1397
1398                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1399                 sg->length = sym->auth.data.length;
1400                 length += sg->length;
1401                 cpu_to_hw_sg(sg);
1402
1403                 memcpy(ctx->digest, sym->auth.digest.data,
1404                        ses->digest_length);
1405                 sg++;
1406
1407                 qm_sg_entry_set64(sg, dpaa_mem_vtop(ctx->digest));
1408                 sg->length = ses->digest_length;
1409                 length += sg->length;
1410                 sg->final = 1;
1411                 cpu_to_hw_sg(sg);
1412         }
1413         /* input compound frame */
1414         cf->sg[1].length = length;
1415         cf->sg[1].extension = 1;
1416         cf->sg[1].final = 1;
1417         cpu_to_hw_sg(&cf->sg[1]);
1418
1419         /* output */
1420         sg++;
1421         qm_sg_entry_set64(&cf->sg[0], dpaa_mem_vtop(sg));
1422         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1423         sg->length = sym->cipher.data.length;
1424         length = sg->length;
1425         if (is_encode(ses)) {
1426                 cpu_to_hw_sg(sg);
1427                 /* set auth output */
1428                 sg++;
1429                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1430                 sg->length = ses->digest_length;
1431                 length += sg->length;
1432         }
1433         sg->final = 1;
1434         cpu_to_hw_sg(sg);
1435
1436         /* output compound frame */
1437         cf->sg[0].length = length;
1438         cf->sg[0].extension = 1;
1439         cpu_to_hw_sg(&cf->sg[0]);
1440
1441         return cf;
1442 }
1443
1444 static inline struct dpaa_sec_job *
1445 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1446 {
1447         struct rte_crypto_sym_op *sym = op->sym;
1448         struct dpaa_sec_job *cf;
1449         struct dpaa_sec_op_ctx *ctx;
1450         struct qm_sg_entry *sg;
1451         phys_addr_t src_start_addr, dst_start_addr;
1452
1453         ctx = dpaa_sec_alloc_ctx(ses);
1454         if (!ctx)
1455                 return NULL;
1456         cf = &ctx->job;
1457         ctx->op = op;
1458
1459         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1460
1461         if (sym->m_dst)
1462                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1463         else
1464                 dst_start_addr = src_start_addr;
1465
1466         /* input */
1467         sg = &cf->sg[1];
1468         qm_sg_entry_set64(sg, src_start_addr);
1469         sg->length = sym->m_src->pkt_len;
1470         sg->final = 1;
1471         cpu_to_hw_sg(sg);
1472
1473         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1474         /* output */
1475         sg = &cf->sg[0];
1476         qm_sg_entry_set64(sg, dst_start_addr);
1477         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1478         cpu_to_hw_sg(sg);
1479
1480         return cf;
1481 }
1482
1483 static uint16_t
1484 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1485                        uint16_t nb_ops)
1486 {
1487         /* Function to transmit the frames to given device and queuepair */
1488         uint32_t loop;
1489         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1490         uint16_t num_tx = 0;
1491         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1492         uint32_t frames_to_send;
1493         struct rte_crypto_op *op;
1494         struct dpaa_sec_job *cf;
1495         dpaa_sec_session *ses;
1496         uint32_t auth_only_len;
1497         struct qman_fq *inq[DPAA_SEC_BURST];
1498
1499         while (nb_ops) {
1500                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1501                                 DPAA_SEC_BURST : nb_ops;
1502                 for (loop = 0; loop < frames_to_send; loop++) {
1503                         op = *(ops++);
1504                         switch (op->sess_type) {
1505                         case RTE_CRYPTO_OP_WITH_SESSION:
1506                                 ses = (dpaa_sec_session *)
1507                                         get_sym_session_private_data(
1508                                                         op->sym->session,
1509                                                         cryptodev_driver_id);
1510                                 break;
1511                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1512                                 ses = (dpaa_sec_session *)
1513                                         get_sec_session_private_data(
1514                                                         op->sym->sec_session);
1515                                 break;
1516                         default:
1517                                 DPAA_SEC_DP_ERR(
1518                                         "sessionless crypto op not supported");
1519                                 frames_to_send = loop;
1520                                 nb_ops = loop;
1521                                 goto send_pkts;
1522                         }
1523                         if (unlikely(!ses->qp)) {
1524                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1525                                         frames_to_send = loop;
1526                                         nb_ops = loop;
1527                                         goto send_pkts;
1528                                 }
1529                         } else if (unlikely(ses->qp != qp)) {
1530                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1531                                         " New qp = %p\n", ses->qp, qp);
1532                                 frames_to_send = loop;
1533                                 nb_ops = loop;
1534                                 goto send_pkts;
1535                         }
1536
1537                         auth_only_len = op->sym->auth.data.length -
1538                                                 op->sym->cipher.data.length;
1539                         if (rte_pktmbuf_is_contiguous(op->sym->m_src)) {
1540                                 if (is_proto_ipsec(ses)) {
1541                                         cf = build_proto(op, ses);
1542                                 } else if (is_auth_only(ses)) {
1543                                         cf = build_auth_only(op, ses);
1544                                 } else if (is_cipher_only(ses)) {
1545                                         cf = build_cipher_only(op, ses);
1546                                 } else if (is_aead(ses)) {
1547                                         cf = build_cipher_auth_gcm(op, ses);
1548                                         auth_only_len = ses->auth_only_len;
1549                                 } else if (is_auth_cipher(ses)) {
1550                                         cf = build_cipher_auth(op, ses);
1551                                 } else {
1552                                         DPAA_SEC_DP_ERR("not supported ops");
1553                                         frames_to_send = loop;
1554                                         nb_ops = loop;
1555                                         goto send_pkts;
1556                                 }
1557                         } else {
1558                                 if (is_auth_only(ses)) {
1559                                         cf = build_auth_only_sg(op, ses);
1560                                 } else if (is_cipher_only(ses)) {
1561                                         cf = build_cipher_only_sg(op, ses);
1562                                 } else if (is_aead(ses)) {
1563                                         cf = build_cipher_auth_gcm_sg(op, ses);
1564                                         auth_only_len = ses->auth_only_len;
1565                                 } else if (is_auth_cipher(ses)) {
1566                                         cf = build_cipher_auth_sg(op, ses);
1567                                 } else {
1568                                         DPAA_SEC_DP_ERR("not supported ops");
1569                                         frames_to_send = loop;
1570                                         nb_ops = loop;
1571                                         goto send_pkts;
1572                                 }
1573                         }
1574                         if (unlikely(!cf)) {
1575                                 frames_to_send = loop;
1576                                 nb_ops = loop;
1577                                 goto send_pkts;
1578                         }
1579
1580                         fd = &fds[loop];
1581                         inq[loop] = ses->inq;
1582                         fd->opaque_addr = 0;
1583                         fd->cmd = 0;
1584                         qm_fd_addr_set64(fd, dpaa_mem_vtop(cf->sg));
1585                         fd->_format1 = qm_fd_compound;
1586                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1587                         /* Auth_only_len is set as 0 in descriptor and it is
1588                          * overwritten here in the fd.cmd which will update
1589                          * the DPOVRD reg.
1590                          */
1591                         if (auth_only_len)
1592                                 fd->cmd = 0x80000000 | auth_only_len;
1593
1594                 }
1595 send_pkts:
1596                 loop = 0;
1597                 while (loop < frames_to_send) {
1598                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1599                                         frames_to_send - loop);
1600                 }
1601                 nb_ops -= frames_to_send;
1602                 num_tx += frames_to_send;
1603         }
1604
1605         dpaa_qp->tx_pkts += num_tx;
1606         dpaa_qp->tx_errs += nb_ops - num_tx;
1607
1608         return num_tx;
1609 }
1610
1611 static uint16_t
1612 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1613                        uint16_t nb_ops)
1614 {
1615         uint16_t num_rx;
1616         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1617
1618         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1619
1620         dpaa_qp->rx_pkts += num_rx;
1621         dpaa_qp->rx_errs += nb_ops - num_rx;
1622
1623         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1624
1625         return num_rx;
1626 }
1627
1628 /** Release queue pair */
1629 static int
1630 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1631                             uint16_t qp_id)
1632 {
1633         struct dpaa_sec_dev_private *internals;
1634         struct dpaa_sec_qp *qp = NULL;
1635
1636         PMD_INIT_FUNC_TRACE();
1637
1638         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1639
1640         internals = dev->data->dev_private;
1641         if (qp_id >= internals->max_nb_queue_pairs) {
1642                 DPAA_SEC_ERR("Max supported qpid %d",
1643                              internals->max_nb_queue_pairs);
1644                 return -EINVAL;
1645         }
1646
1647         qp = &internals->qps[qp_id];
1648         qp->internals = NULL;
1649         dev->data->queue_pairs[qp_id] = NULL;
1650
1651         return 0;
1652 }
1653
1654 /** Setup a queue pair */
1655 static int
1656 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1657                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1658                 __rte_unused int socket_id,
1659                 __rte_unused struct rte_mempool *session_pool)
1660 {
1661         struct dpaa_sec_dev_private *internals;
1662         struct dpaa_sec_qp *qp = NULL;
1663
1664         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1665
1666         internals = dev->data->dev_private;
1667         if (qp_id >= internals->max_nb_queue_pairs) {
1668                 DPAA_SEC_ERR("Max supported qpid %d",
1669                              internals->max_nb_queue_pairs);
1670                 return -EINVAL;
1671         }
1672
1673         qp = &internals->qps[qp_id];
1674         qp->internals = internals;
1675         dev->data->queue_pairs[qp_id] = qp;
1676
1677         return 0;
1678 }
1679
1680 /** Return the number of allocated queue pairs */
1681 static uint32_t
1682 dpaa_sec_queue_pair_count(struct rte_cryptodev *dev)
1683 {
1684         PMD_INIT_FUNC_TRACE();
1685
1686         return dev->data->nb_queue_pairs;
1687 }
1688
1689 /** Returns the size of session structure */
1690 static unsigned int
1691 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1692 {
1693         PMD_INIT_FUNC_TRACE();
1694
1695         return sizeof(dpaa_sec_session);
1696 }
1697
1698 static int
1699 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1700                      struct rte_crypto_sym_xform *xform,
1701                      dpaa_sec_session *session)
1702 {
1703         session->cipher_alg = xform->cipher.algo;
1704         session->iv.length = xform->cipher.iv.length;
1705         session->iv.offset = xform->cipher.iv.offset;
1706         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1707                                                RTE_CACHE_LINE_SIZE);
1708         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1709                 DPAA_SEC_ERR("No Memory for cipher key");
1710                 return -ENOMEM;
1711         }
1712         session->cipher_key.length = xform->cipher.key.length;
1713
1714         memcpy(session->cipher_key.data, xform->cipher.key.data,
1715                xform->cipher.key.length);
1716         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
1717                         DIR_ENC : DIR_DEC;
1718
1719         return 0;
1720 }
1721
1722 static int
1723 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
1724                    struct rte_crypto_sym_xform *xform,
1725                    dpaa_sec_session *session)
1726 {
1727         session->auth_alg = xform->auth.algo;
1728         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
1729                                              RTE_CACHE_LINE_SIZE);
1730         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
1731                 DPAA_SEC_ERR("No Memory for auth key");
1732                 return -ENOMEM;
1733         }
1734         session->auth_key.length = xform->auth.key.length;
1735         session->digest_length = xform->auth.digest_length;
1736
1737         memcpy(session->auth_key.data, xform->auth.key.data,
1738                xform->auth.key.length);
1739         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
1740                         DIR_ENC : DIR_DEC;
1741
1742         return 0;
1743 }
1744
1745 static int
1746 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
1747                    struct rte_crypto_sym_xform *xform,
1748                    dpaa_sec_session *session)
1749 {
1750         session->aead_alg = xform->aead.algo;
1751         session->iv.length = xform->aead.iv.length;
1752         session->iv.offset = xform->aead.iv.offset;
1753         session->auth_only_len = xform->aead.aad_length;
1754         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
1755                                              RTE_CACHE_LINE_SIZE);
1756         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
1757                 DPAA_SEC_ERR("No Memory for aead key\n");
1758                 return -ENOMEM;
1759         }
1760         session->aead_key.length = xform->aead.key.length;
1761         session->digest_length = xform->aead.digest_length;
1762
1763         memcpy(session->aead_key.data, xform->aead.key.data,
1764                xform->aead.key.length);
1765         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
1766                         DIR_ENC : DIR_DEC;
1767
1768         return 0;
1769 }
1770
1771 static struct qman_fq *
1772 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
1773 {
1774         unsigned int i;
1775
1776         for (i = 0; i < qi->max_nb_sessions; i++) {
1777                 if (qi->inq_attach[i] == 0) {
1778                         qi->inq_attach[i] = 1;
1779                         return &qi->inq[i];
1780                 }
1781         }
1782         DPAA_SEC_WARN("All ses session in use %x", qi->max_nb_sessions);
1783
1784         return NULL;
1785 }
1786
1787 static int
1788 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
1789 {
1790         unsigned int i;
1791
1792         for (i = 0; i < qi->max_nb_sessions; i++) {
1793                 if (&qi->inq[i] == fq) {
1794                         qman_retire_fq(fq, NULL);
1795                         qman_oos_fq(fq);
1796                         qi->inq_attach[i] = 0;
1797                         return 0;
1798                 }
1799         }
1800         return -1;
1801 }
1802
1803 static int
1804 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
1805 {
1806         int ret;
1807
1808         sess->qp = qp;
1809         ret = dpaa_sec_prep_cdb(sess);
1810         if (ret) {
1811                 DPAA_SEC_ERR("Unable to prepare sec cdb");
1812                 return -1;
1813         }
1814         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
1815                 ret = rte_dpaa_portal_init((void *)0);
1816                 if (ret) {
1817                         DPAA_SEC_ERR("Failure in affining portal");
1818                         return ret;
1819                 }
1820         }
1821         ret = dpaa_sec_init_rx(sess->inq, dpaa_mem_vtop(&sess->cdb),
1822                                qman_fq_fqid(&qp->outq));
1823         if (ret)
1824                 DPAA_SEC_ERR("Unable to init sec queue");
1825
1826         return ret;
1827 }
1828
1829 static int
1830 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
1831                             struct rte_crypto_sym_xform *xform, void *sess)
1832 {
1833         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1834         dpaa_sec_session *session = sess;
1835
1836         PMD_INIT_FUNC_TRACE();
1837
1838         if (unlikely(sess == NULL)) {
1839                 DPAA_SEC_ERR("invalid session struct");
1840                 return -EINVAL;
1841         }
1842         memset(session, 0, sizeof(dpaa_sec_session));
1843
1844         /* Default IV length = 0 */
1845         session->iv.length = 0;
1846
1847         /* Cipher Only */
1848         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
1849                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
1850                 dpaa_sec_cipher_init(dev, xform, session);
1851
1852         /* Authentication Only */
1853         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1854                    xform->next == NULL) {
1855                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
1856                 dpaa_sec_auth_init(dev, xform, session);
1857
1858         /* Cipher then Authenticate */
1859         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
1860                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
1861                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
1862                         dpaa_sec_cipher_init(dev, xform, session);
1863                         dpaa_sec_auth_init(dev, xform->next, session);
1864                 } else {
1865                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
1866                         return -EINVAL;
1867                 }
1868
1869         /* Authenticate then Cipher */
1870         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
1871                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
1872                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
1873                         dpaa_sec_auth_init(dev, xform, session);
1874                         dpaa_sec_cipher_init(dev, xform->next, session);
1875                 } else {
1876                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
1877                         return -EINVAL;
1878                 }
1879
1880         /* AEAD operation for AES-GCM kind of Algorithms */
1881         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
1882                    xform->next == NULL) {
1883                 dpaa_sec_aead_init(dev, xform, session);
1884
1885         } else {
1886                 DPAA_SEC_ERR("Invalid crypto type");
1887                 return -EINVAL;
1888         }
1889         session->ctx_pool = internals->ctx_pool;
1890         rte_spinlock_lock(&internals->lock);
1891         session->inq = dpaa_sec_attach_rxq(internals);
1892         rte_spinlock_unlock(&internals->lock);
1893         if (session->inq == NULL) {
1894                 DPAA_SEC_ERR("unable to attach sec queue");
1895                 goto err1;
1896         }
1897
1898         return 0;
1899
1900 err1:
1901         rte_free(session->cipher_key.data);
1902         rte_free(session->auth_key.data);
1903         memset(session, 0, sizeof(dpaa_sec_session));
1904
1905         return -EINVAL;
1906 }
1907
1908 static int
1909 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
1910                 struct rte_crypto_sym_xform *xform,
1911                 struct rte_cryptodev_sym_session *sess,
1912                 struct rte_mempool *mempool)
1913 {
1914         void *sess_private_data;
1915         int ret;
1916
1917         PMD_INIT_FUNC_TRACE();
1918
1919         if (rte_mempool_get(mempool, &sess_private_data)) {
1920                 DPAA_SEC_ERR("Couldn't get object from session mempool");
1921                 return -ENOMEM;
1922         }
1923
1924         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
1925         if (ret != 0) {
1926                 DPAA_SEC_ERR("failed to configure session parameters");
1927
1928                 /* Return session to mempool */
1929                 rte_mempool_put(mempool, sess_private_data);
1930                 return ret;
1931         }
1932
1933         set_sym_session_private_data(sess, dev->driver_id,
1934                         sess_private_data);
1935
1936
1937         return 0;
1938 }
1939
1940 /** Clear the memory of session so it doesn't leave key material behind */
1941 static void
1942 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
1943                 struct rte_cryptodev_sym_session *sess)
1944 {
1945         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
1946         uint8_t index = dev->driver_id;
1947         void *sess_priv = get_sym_session_private_data(sess, index);
1948
1949         PMD_INIT_FUNC_TRACE();
1950
1951         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
1952
1953         if (sess_priv) {
1954                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
1955
1956                 if (s->inq)
1957                         dpaa_sec_detach_rxq(qi, s->inq);
1958                 rte_free(s->cipher_key.data);
1959                 rte_free(s->auth_key.data);
1960                 memset(s, 0, sizeof(dpaa_sec_session));
1961                 set_sym_session_private_data(sess, index, NULL);
1962                 rte_mempool_put(sess_mp, sess_priv);
1963         }
1964 }
1965
1966 static int
1967 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
1968                            struct rte_security_session_conf *conf,
1969                            void *sess)
1970 {
1971         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
1972         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
1973         struct rte_crypto_auth_xform *auth_xform = NULL;
1974         struct rte_crypto_cipher_xform *cipher_xform = NULL;
1975         dpaa_sec_session *session = (dpaa_sec_session *)sess;
1976
1977         PMD_INIT_FUNC_TRACE();
1978
1979         memset(session, 0, sizeof(dpaa_sec_session));
1980         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
1981                 cipher_xform = &conf->crypto_xform->cipher;
1982                 if (conf->crypto_xform->next)
1983                         auth_xform = &conf->crypto_xform->next->auth;
1984         } else {
1985                 auth_xform = &conf->crypto_xform->auth;
1986                 if (conf->crypto_xform->next)
1987                         cipher_xform = &conf->crypto_xform->next->cipher;
1988         }
1989         session->proto_alg = conf->protocol;
1990
1991         if (cipher_xform && cipher_xform->algo != RTE_CRYPTO_CIPHER_NULL) {
1992                 session->cipher_key.data = rte_zmalloc(NULL,
1993                                                        cipher_xform->key.length,
1994                                                        RTE_CACHE_LINE_SIZE);
1995                 if (session->cipher_key.data == NULL &&
1996                                 cipher_xform->key.length > 0) {
1997                         DPAA_SEC_ERR("No Memory for cipher key");
1998                         return -ENOMEM;
1999                 }
2000                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2001                                 cipher_xform->key.length);
2002                 session->cipher_key.length = cipher_xform->key.length;
2003
2004                 switch (cipher_xform->algo) {
2005                 case RTE_CRYPTO_CIPHER_AES_CBC:
2006                 case RTE_CRYPTO_CIPHER_3DES_CBC:
2007                 case RTE_CRYPTO_CIPHER_AES_CTR:
2008                         break;
2009                 default:
2010                         DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2011                                 cipher_xform->algo);
2012                         goto out;
2013                 }
2014                 session->cipher_alg = cipher_xform->algo;
2015         } else {
2016                 session->cipher_key.data = NULL;
2017                 session->cipher_key.length = 0;
2018                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2019         }
2020
2021         if (auth_xform && auth_xform->algo != RTE_CRYPTO_AUTH_NULL) {
2022                 session->auth_key.data = rte_zmalloc(NULL,
2023                                                 auth_xform->key.length,
2024                                                 RTE_CACHE_LINE_SIZE);
2025                 if (session->auth_key.data == NULL &&
2026                                 auth_xform->key.length > 0) {
2027                         DPAA_SEC_ERR("No Memory for auth key");
2028                         rte_free(session->cipher_key.data);
2029                         return -ENOMEM;
2030                 }
2031                 memcpy(session->auth_key.data, auth_xform->key.data,
2032                                 auth_xform->key.length);
2033                 session->auth_key.length = auth_xform->key.length;
2034
2035                 switch (auth_xform->algo) {
2036                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
2037                 case RTE_CRYPTO_AUTH_MD5_HMAC:
2038                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
2039                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
2040                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
2041                 case RTE_CRYPTO_AUTH_AES_CMAC:
2042                         break;
2043                 default:
2044                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2045                                 auth_xform->algo);
2046                         goto out;
2047                 }
2048                 session->auth_alg = auth_xform->algo;
2049         } else {
2050                 session->auth_key.data = NULL;
2051                 session->auth_key.length = 0;
2052                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2053         }
2054
2055         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2056                 memset(&session->encap_pdb, 0, sizeof(struct ipsec_encap_pdb) +
2057                                 sizeof(session->ip4_hdr));
2058                 session->ip4_hdr.ip_v = IPVERSION;
2059                 session->ip4_hdr.ip_hl = 5;
2060                 session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2061                                                 sizeof(session->ip4_hdr));
2062                 session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2063                 session->ip4_hdr.ip_id = 0;
2064                 session->ip4_hdr.ip_off = 0;
2065                 session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2066                 session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2067                                 RTE_SECURITY_IPSEC_SA_PROTO_ESP) ? IPPROTO_ESP
2068                                 : IPPROTO_AH;
2069                 session->ip4_hdr.ip_sum = 0;
2070                 session->ip4_hdr.ip_src = ipsec_xform->tunnel.ipv4.src_ip;
2071                 session->ip4_hdr.ip_dst = ipsec_xform->tunnel.ipv4.dst_ip;
2072                 session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2073                                                 (void *)&session->ip4_hdr,
2074                                                 sizeof(struct ip));
2075
2076                 session->encap_pdb.options =
2077                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2078                         PDBOPTS_ESP_OIHI_PDB_INL |
2079                         PDBOPTS_ESP_IVSRC |
2080                         PDBHMO_ESP_ENCAP_DTTL |
2081                         PDBHMO_ESP_SNR;
2082                 session->encap_pdb.spi = ipsec_xform->spi;
2083                 session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2084
2085                 session->dir = DIR_ENC;
2086         } else if (ipsec_xform->direction ==
2087                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2088                 memset(&session->decap_pdb, 0, sizeof(struct ipsec_decap_pdb));
2089                 session->decap_pdb.options = sizeof(struct ip) << 16;
2090                 session->dir = DIR_DEC;
2091         } else
2092                 goto out;
2093         session->ctx_pool = internals->ctx_pool;
2094         rte_spinlock_lock(&internals->lock);
2095         session->inq = dpaa_sec_attach_rxq(internals);
2096         rte_spinlock_unlock(&internals->lock);
2097         if (session->inq == NULL) {
2098                 DPAA_SEC_ERR("unable to attach sec queue");
2099                 goto out;
2100         }
2101
2102
2103         return 0;
2104 out:
2105         rte_free(session->auth_key.data);
2106         rte_free(session->cipher_key.data);
2107         memset(session, 0, sizeof(dpaa_sec_session));
2108         return -1;
2109 }
2110
2111 static int
2112 dpaa_sec_security_session_create(void *dev,
2113                                  struct rte_security_session_conf *conf,
2114                                  struct rte_security_session *sess,
2115                                  struct rte_mempool *mempool)
2116 {
2117         void *sess_private_data;
2118         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2119         int ret;
2120
2121         if (rte_mempool_get(mempool, &sess_private_data)) {
2122                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2123                 return -ENOMEM;
2124         }
2125
2126         switch (conf->protocol) {
2127         case RTE_SECURITY_PROTOCOL_IPSEC:
2128                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2129                                 sess_private_data);
2130                 break;
2131         case RTE_SECURITY_PROTOCOL_MACSEC:
2132                 return -ENOTSUP;
2133         default:
2134                 return -EINVAL;
2135         }
2136         if (ret != 0) {
2137                 DPAA_SEC_ERR("failed to configure session parameters");
2138                 /* Return session to mempool */
2139                 rte_mempool_put(mempool, sess_private_data);
2140                 return ret;
2141         }
2142
2143         set_sec_session_private_data(sess, sess_private_data);
2144
2145         return ret;
2146 }
2147
2148 /** Clear the memory of session so it doesn't leave key material behind */
2149 static int
2150 dpaa_sec_security_session_destroy(void *dev __rte_unused,
2151                 struct rte_security_session *sess)
2152 {
2153         PMD_INIT_FUNC_TRACE();
2154         void *sess_priv = get_sec_session_private_data(sess);
2155
2156         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2157
2158         if (sess_priv) {
2159                 struct rte_mempool *sess_mp = rte_mempool_from_obj(sess_priv);
2160
2161                 rte_free(s->cipher_key.data);
2162                 rte_free(s->auth_key.data);
2163                 memset(sess, 0, sizeof(dpaa_sec_session));
2164                 set_sec_session_private_data(sess, NULL);
2165                 rte_mempool_put(sess_mp, sess_priv);
2166         }
2167         return 0;
2168 }
2169
2170
2171 static int
2172 dpaa_sec_dev_configure(struct rte_cryptodev *dev,
2173                        struct rte_cryptodev_config *config __rte_unused)
2174 {
2175
2176         char str[20];
2177         struct dpaa_sec_dev_private *internals;
2178
2179         PMD_INIT_FUNC_TRACE();
2180
2181         internals = dev->data->dev_private;
2182         sprintf(str, "ctx_pool_%d", dev->data->dev_id);
2183         if (!internals->ctx_pool) {
2184                 internals->ctx_pool = rte_mempool_create((const char *)str,
2185                                                         CTX_POOL_NUM_BUFS,
2186                                                         CTX_POOL_BUF_SIZE,
2187                                                         CTX_POOL_CACHE_SIZE, 0,
2188                                                         NULL, NULL, NULL, NULL,
2189                                                         SOCKET_ID_ANY, 0);
2190                 if (!internals->ctx_pool) {
2191                         DPAA_SEC_ERR("%s create failed\n", str);
2192                         return -ENOMEM;
2193                 }
2194         } else
2195                 DPAA_SEC_INFO("mempool already created for dev_id : %d",
2196                                 dev->data->dev_id);
2197
2198         return 0;
2199 }
2200
2201 static int
2202 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
2203 {
2204         PMD_INIT_FUNC_TRACE();
2205         return 0;
2206 }
2207
2208 static void
2209 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
2210 {
2211         PMD_INIT_FUNC_TRACE();
2212 }
2213
2214 static int
2215 dpaa_sec_dev_close(struct rte_cryptodev *dev)
2216 {
2217         struct dpaa_sec_dev_private *internals;
2218
2219         PMD_INIT_FUNC_TRACE();
2220
2221         if (dev == NULL)
2222                 return -ENOMEM;
2223
2224         internals = dev->data->dev_private;
2225         rte_mempool_free(internals->ctx_pool);
2226         internals->ctx_pool = NULL;
2227
2228         return 0;
2229 }
2230
2231 static void
2232 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
2233                        struct rte_cryptodev_info *info)
2234 {
2235         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2236
2237         PMD_INIT_FUNC_TRACE();
2238         if (info != NULL) {
2239                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
2240                 info->feature_flags = dev->feature_flags;
2241                 info->capabilities = dpaa_sec_capabilities;
2242                 info->sym.max_nb_sessions = internals->max_nb_sessions;
2243                 info->driver_id = cryptodev_driver_id;
2244         }
2245 }
2246
2247 static struct rte_cryptodev_ops crypto_ops = {
2248         .dev_configure        = dpaa_sec_dev_configure,
2249         .dev_start            = dpaa_sec_dev_start,
2250         .dev_stop             = dpaa_sec_dev_stop,
2251         .dev_close            = dpaa_sec_dev_close,
2252         .dev_infos_get        = dpaa_sec_dev_infos_get,
2253         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
2254         .queue_pair_release   = dpaa_sec_queue_pair_release,
2255         .queue_pair_count     = dpaa_sec_queue_pair_count,
2256         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
2257         .sym_session_configure    = dpaa_sec_sym_session_configure,
2258         .sym_session_clear        = dpaa_sec_sym_session_clear
2259 };
2260
2261 static const struct rte_security_capability *
2262 dpaa_sec_capabilities_get(void *device __rte_unused)
2263 {
2264         return dpaa_sec_security_cap;
2265 }
2266
2267 struct rte_security_ops dpaa_sec_security_ops = {
2268         .session_create = dpaa_sec_security_session_create,
2269         .session_update = NULL,
2270         .session_stats_get = NULL,
2271         .session_destroy = dpaa_sec_security_session_destroy,
2272         .set_pkt_metadata = NULL,
2273         .capabilities_get = dpaa_sec_capabilities_get
2274 };
2275
2276 static int
2277 dpaa_sec_uninit(struct rte_cryptodev *dev)
2278 {
2279         struct dpaa_sec_dev_private *internals;
2280
2281         if (dev == NULL)
2282                 return -ENODEV;
2283
2284         internals = dev->data->dev_private;
2285         rte_free(dev->security_ctx);
2286
2287         /* In case close has been called, internals->ctx_pool would be NULL */
2288         rte_mempool_free(internals->ctx_pool);
2289         rte_free(internals);
2290
2291         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
2292                       dev->data->name, rte_socket_id());
2293
2294         return 0;
2295 }
2296
2297 static int
2298 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
2299 {
2300         struct dpaa_sec_dev_private *internals;
2301         struct rte_security_ctx *security_instance;
2302         struct dpaa_sec_qp *qp;
2303         uint32_t i, flags;
2304         int ret;
2305
2306         PMD_INIT_FUNC_TRACE();
2307
2308         cryptodev->driver_id = cryptodev_driver_id;
2309         cryptodev->dev_ops = &crypto_ops;
2310
2311         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
2312         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
2313         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
2314                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
2315                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
2316                         RTE_CRYPTODEV_FF_SECURITY |
2317                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
2318                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
2319                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
2320                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
2321                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
2322
2323         internals = cryptodev->data->dev_private;
2324         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
2325         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
2326
2327         /*
2328          * For secondary processes, we don't initialise any further as primary
2329          * has already done this work. Only check we don't need a different
2330          * RX function
2331          */
2332         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2333                 DPAA_SEC_WARN("Device already init by primary process");
2334                 return 0;
2335         }
2336
2337         /* Initialize security_ctx only for primary process*/
2338         security_instance = rte_malloc("rte_security_instances_ops",
2339                                 sizeof(struct rte_security_ctx), 0);
2340         if (security_instance == NULL)
2341                 return -ENOMEM;
2342         security_instance->device = (void *)cryptodev;
2343         security_instance->ops = &dpaa_sec_security_ops;
2344         security_instance->sess_cnt = 0;
2345         cryptodev->security_ctx = security_instance;
2346
2347         rte_spinlock_init(&internals->lock);
2348         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
2349                 /* init qman fq for queue pair */
2350                 qp = &internals->qps[i];
2351                 ret = dpaa_sec_init_tx(&qp->outq);
2352                 if (ret) {
2353                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
2354                         goto init_error;
2355                 }
2356         }
2357
2358         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
2359                 QMAN_FQ_FLAG_TO_DCPORTAL;
2360         for (i = 0; i < internals->max_nb_sessions; i++) {
2361                 /* create rx qman fq for sessions*/
2362                 ret = qman_create_fq(0, flags, &internals->inq[i]);
2363                 if (unlikely(ret != 0)) {
2364                         DPAA_SEC_ERR("sec qman_create_fq failed");
2365                         goto init_error;
2366                 }
2367         }
2368
2369         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
2370         return 0;
2371
2372 init_error:
2373         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
2374
2375         dpaa_sec_uninit(cryptodev);
2376         return -EFAULT;
2377 }
2378
2379 static int
2380 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
2381                                 struct rte_dpaa_device *dpaa_dev)
2382 {
2383         struct rte_cryptodev *cryptodev;
2384         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
2385
2386         int retval;
2387
2388         sprintf(cryptodev_name, "dpaa_sec-%d", dpaa_dev->id.dev_id);
2389
2390         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
2391         if (cryptodev == NULL)
2392                 return -ENOMEM;
2393
2394         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
2395                 cryptodev->data->dev_private = rte_zmalloc_socket(
2396                                         "cryptodev private structure",
2397                                         sizeof(struct dpaa_sec_dev_private),
2398                                         RTE_CACHE_LINE_SIZE,
2399                                         rte_socket_id());
2400
2401                 if (cryptodev->data->dev_private == NULL)
2402                         rte_panic("Cannot allocate memzone for private "
2403                                         "device data");
2404         }
2405
2406         dpaa_dev->crypto_dev = cryptodev;
2407         cryptodev->device = &dpaa_dev->device;
2408
2409         /* init user callbacks */
2410         TAILQ_INIT(&(cryptodev->link_intr_cbs));
2411
2412         /* if sec device version is not configured */
2413         if (!rta_get_sec_era()) {
2414                 const struct device_node *caam_node;
2415
2416                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
2417                         const uint32_t *prop = of_get_property(caam_node,
2418                                         "fsl,sec-era",
2419                                         NULL);
2420                         if (prop) {
2421                                 rta_set_sec_era(
2422                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
2423                                 break;
2424                         }
2425                 }
2426         }
2427
2428         /* Invoke PMD device initialization function */
2429         retval = dpaa_sec_dev_init(cryptodev);
2430         if (retval == 0)
2431                 return 0;
2432
2433         /* In case of error, cleanup is done */
2434         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2435                 rte_free(cryptodev->data->dev_private);
2436
2437         rte_cryptodev_pmd_release_device(cryptodev);
2438
2439         return -ENXIO;
2440 }
2441
2442 static int
2443 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
2444 {
2445         struct rte_cryptodev *cryptodev;
2446         int ret;
2447
2448         cryptodev = dpaa_dev->crypto_dev;
2449         if (cryptodev == NULL)
2450                 return -ENODEV;
2451
2452         ret = dpaa_sec_uninit(cryptodev);
2453         if (ret)
2454                 return ret;
2455
2456         return rte_cryptodev_pmd_destroy(cryptodev);
2457 }
2458
2459 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
2460         .drv_type = FSL_DPAA_CRYPTO,
2461         .driver = {
2462                 .name = "DPAA SEC PMD"
2463         },
2464         .probe = cryptodev_dpaa_sec_probe,
2465         .remove = cryptodev_dpaa_sec_remove,
2466 };
2467
2468 static struct cryptodev_driver dpaa_sec_crypto_drv;
2469
2470 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
2471 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
2472                 cryptodev_driver_id);
2473
2474 RTE_INIT(dpaa_sec_init_log)
2475 {
2476         dpaa_logtype_sec = rte_log_register("pmd.crypto.dpaa");
2477         if (dpaa_logtype_sec >= 0)
2478                 rte_log_set_level(dpaa_logtype_sec, RTE_LOG_NOTICE);
2479 }