5ce7e35827e984f92b794bd911852c2e2c3e5677
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_mbuf.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29
30 #include <fsl_usd.h>
31 #include <fsl_qman.h>
32 #include <dpaa_of.h>
33
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39
40 #include <rte_dpaa_bus.h>
41 #include <dpaa_sec.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
45
46 static uint8_t cryptodev_driver_id;
47
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
50
51 static int
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53
54 static inline void
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 {
57         if (!ctx->fd_status) {
58                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59         } else {
60                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62         }
63 }
64
65 static inline struct dpaa_sec_op_ctx *
66 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 {
68         struct dpaa_sec_op_ctx *ctx;
69         int i, retval;
70
71         retval = rte_mempool_get(
72                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
73                         (void **)(&ctx));
74         if (!ctx || retval) {
75                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
76                 return NULL;
77         }
78         /*
79          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82          * each packet, memset is costlier than dcbz_64().
83          */
84         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
85                 dcbz_64(&ctx->job.sg[i]);
86
87         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
88         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
89
90         return ctx;
91 }
92
93 static void
94 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95                    struct qman_fq *fq,
96                    const struct qm_mr_entry *msg)
97 {
98         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
99                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
100 }
101
102 /* initialize the queue with dest chan as caam chan so that
103  * all the packets in this queue could be dispatched into caam
104  */
105 static int
106 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
107                  uint32_t fqid_out)
108 {
109         struct qm_mcc_initfq fq_opts;
110         uint32_t flags;
111         int ret = -1;
112
113         /* Clear FQ options */
114         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115
116         flags = QMAN_INITFQ_FLAG_SCHED;
117         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
118                           QM_INITFQ_WE_CONTEXTB;
119
120         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
121         fq_opts.fqd.context_b = fqid_out;
122         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
123         fq_opts.fqd.dest.wq = 0;
124
125         fq_in->cb.ern  = ern_sec_fq_handler;
126
127         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128
129         ret = qman_init_fq(fq_in, flags, &fq_opts);
130         if (unlikely(ret != 0))
131                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
132
133         return ret;
134 }
135
136 /* something is put into in_fq and caam put the crypto result into out_fq */
137 static enum qman_cb_dqrr_result
138 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
139                   struct qman_fq *fq __always_unused,
140                   const struct qm_dqrr_entry *dqrr)
141 {
142         const struct qm_fd *fd;
143         struct dpaa_sec_job *job;
144         struct dpaa_sec_op_ctx *ctx;
145
146         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
147                 return qman_cb_dqrr_defer;
148
149         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
150                 return qman_cb_dqrr_consume;
151
152         fd = &dqrr->fd;
153         /* sg is embedded in an op ctx,
154          * sg[0] is for output
155          * sg[1] for input
156          */
157         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158
159         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
160         ctx->fd_status = fd->status;
161         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
162                 struct qm_sg_entry *sg_out;
163                 uint32_t len;
164                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
165                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166
167                 sg_out = &job->sg[0];
168                 hw_sg_to_cpu(sg_out);
169                 len = sg_out->length;
170                 mbuf->pkt_len = len;
171                 while (mbuf->next != NULL) {
172                         len -= mbuf->data_len;
173                         mbuf = mbuf->next;
174                 }
175                 mbuf->data_len = len;
176         }
177         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
178         dpaa_sec_op_ending(ctx);
179
180         return qman_cb_dqrr_consume;
181 }
182
183 /* caam result is put into this queue */
184 static int
185 dpaa_sec_init_tx(struct qman_fq *fq)
186 {
187         int ret;
188         struct qm_mcc_initfq opts;
189         uint32_t flags;
190
191         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
192                 QMAN_FQ_FLAG_DYNAMIC_FQID;
193
194         ret = qman_create_fq(0, flags, fq);
195         if (unlikely(ret)) {
196                 DPAA_SEC_ERR("qman_create_fq failed");
197                 return ret;
198         }
199
200         memset(&opts, 0, sizeof(opts));
201         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
202                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203
204         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205
206         fq->cb.dqrr = dqrr_out_fq_cb_rx;
207         fq->cb.ern  = ern_sec_fq_handler;
208
209         ret = qman_init_fq(fq, 0, &opts);
210         if (unlikely(ret)) {
211                 DPAA_SEC_ERR("unable to init caam source fq!");
212                 return ret;
213         }
214
215         return ret;
216 }
217
218 static inline int is_aead(dpaa_sec_session *ses)
219 {
220         return ((ses->cipher_alg == 0) &&
221                 (ses->auth_alg == 0) &&
222                 (ses->aead_alg != 0));
223 }
224
225 static inline int is_encode(dpaa_sec_session *ses)
226 {
227         return ses->dir == DIR_ENC;
228 }
229
230 static inline int is_decode(dpaa_sec_session *ses)
231 {
232         return ses->dir == DIR_DEC;
233 }
234
235 #ifdef RTE_LIBRTE_SECURITY
236 static int
237 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 {
239         struct alginfo authdata = {0}, cipherdata = {0};
240         struct sec_cdb *cdb = &ses->cdb;
241         struct alginfo *p_authdata = NULL;
242         int32_t shared_desc_len = 0;
243 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
244         int swap = false;
245 #else
246         int swap = true;
247 #endif
248
249         cipherdata.key = (size_t)ses->cipher_key.data;
250         cipherdata.keylen = ses->cipher_key.length;
251         cipherdata.key_enc_flags = 0;
252         cipherdata.key_type = RTA_DATA_IMM;
253         cipherdata.algtype = ses->cipher_key.alg;
254         cipherdata.algmode = ses->cipher_key.algmode;
255
256         if (ses->auth_alg) {
257                 authdata.key = (size_t)ses->auth_key.data;
258                 authdata.keylen = ses->auth_key.length;
259                 authdata.key_enc_flags = 0;
260                 authdata.key_type = RTA_DATA_IMM;
261                 authdata.algtype = ses->auth_key.alg;
262                 authdata.algmode = ses->auth_key.algmode;
263
264                 p_authdata = &authdata;
265         }
266
267         if (rta_inline_pdcp_query(authdata.algtype,
268                                 cipherdata.algtype,
269                                 ses->pdcp.sn_size,
270                                 ses->pdcp.hfn_ovd)) {
271                 cipherdata.key =
272                         (size_t)rte_dpaa_mem_vtop((void *)
273                                         (size_t)cipherdata.key);
274                 cipherdata.key_type = RTA_DATA_PTR;
275         }
276
277         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
278                 if (ses->dir == DIR_ENC)
279                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
280                                         cdb->sh_desc, 1, swap,
281                                         ses->pdcp.hfn,
282                                         ses->pdcp.sn_size,
283                                         ses->pdcp.bearer,
284                                         ses->pdcp.pkt_dir,
285                                         ses->pdcp.hfn_threshold,
286                                         &cipherdata, &authdata,
287                                         0);
288                 else if (ses->dir == DIR_DEC)
289                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
290                                         cdb->sh_desc, 1, swap,
291                                         ses->pdcp.hfn,
292                                         ses->pdcp.sn_size,
293                                         ses->pdcp.bearer,
294                                         ses->pdcp.pkt_dir,
295                                         ses->pdcp.hfn_threshold,
296                                         &cipherdata, &authdata,
297                                         0);
298         } else {
299                 if (ses->dir == DIR_ENC)
300                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
301                                         cdb->sh_desc, 1, swap,
302                                         ses->pdcp.sn_size,
303                                         ses->pdcp.hfn,
304                                         ses->pdcp.bearer,
305                                         ses->pdcp.pkt_dir,
306                                         ses->pdcp.hfn_threshold,
307                                         &cipherdata, p_authdata, 0);
308                 else if (ses->dir == DIR_DEC)
309                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
310                                         cdb->sh_desc, 1, swap,
311                                         ses->pdcp.sn_size,
312                                         ses->pdcp.hfn,
313                                         ses->pdcp.bearer,
314                                         ses->pdcp.pkt_dir,
315                                         ses->pdcp.hfn_threshold,
316                                         &cipherdata, p_authdata, 0);
317         }
318         return shared_desc_len;
319 }
320
321 /* prepare ipsec proto command block of the session */
322 static int
323 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
324 {
325         struct alginfo cipherdata = {0}, authdata = {0};
326         struct sec_cdb *cdb = &ses->cdb;
327         int32_t shared_desc_len = 0;
328         int err;
329 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
330         int swap = false;
331 #else
332         int swap = true;
333 #endif
334
335         cipherdata.key = (size_t)ses->cipher_key.data;
336         cipherdata.keylen = ses->cipher_key.length;
337         cipherdata.key_enc_flags = 0;
338         cipherdata.key_type = RTA_DATA_IMM;
339         cipherdata.algtype = ses->cipher_key.alg;
340         cipherdata.algmode = ses->cipher_key.algmode;
341
342         if (ses->auth_key.length) {
343                 authdata.key = (size_t)ses->auth_key.data;
344                 authdata.keylen = ses->auth_key.length;
345                 authdata.key_enc_flags = 0;
346                 authdata.key_type = RTA_DATA_IMM;
347                 authdata.algtype = ses->auth_key.alg;
348                 authdata.algmode = ses->auth_key.algmode;
349         }
350
351         cdb->sh_desc[0] = cipherdata.keylen;
352         cdb->sh_desc[1] = authdata.keylen;
353         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
354                                DESC_JOB_IO_LEN,
355                                (unsigned int *)cdb->sh_desc,
356                                &cdb->sh_desc[2], 2);
357
358         if (err < 0) {
359                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
360                 return err;
361         }
362         if (cdb->sh_desc[2] & 1)
363                 cipherdata.key_type = RTA_DATA_IMM;
364         else {
365                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
366                                         (void *)(size_t)cipherdata.key);
367                 cipherdata.key_type = RTA_DATA_PTR;
368         }
369         if (cdb->sh_desc[2] & (1<<1))
370                 authdata.key_type = RTA_DATA_IMM;
371         else {
372                 authdata.key = (size_t)rte_dpaa_mem_vtop(
373                                         (void *)(size_t)authdata.key);
374                 authdata.key_type = RTA_DATA_PTR;
375         }
376
377         cdb->sh_desc[0] = 0;
378         cdb->sh_desc[1] = 0;
379         cdb->sh_desc[2] = 0;
380         if (ses->dir == DIR_ENC) {
381                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
382                                 cdb->sh_desc,
383                                 true, swap, SHR_SERIAL,
384                                 &ses->encap_pdb,
385                                 (uint8_t *)&ses->ip4_hdr,
386                                 &cipherdata, &authdata);
387         } else if (ses->dir == DIR_DEC) {
388                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
389                                 cdb->sh_desc,
390                                 true, swap, SHR_SERIAL,
391                                 &ses->decap_pdb,
392                                 &cipherdata, &authdata);
393         }
394         return shared_desc_len;
395 }
396 #endif
397 /* prepare command block of the session */
398 static int
399 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
400 {
401         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
402         int32_t shared_desc_len = 0;
403         struct sec_cdb *cdb = &ses->cdb;
404         int err;
405 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
406         int swap = false;
407 #else
408         int swap = true;
409 #endif
410
411         memset(cdb, 0, sizeof(struct sec_cdb));
412
413         switch (ses->ctxt) {
414 #ifdef RTE_LIBRTE_SECURITY
415         case DPAA_SEC_IPSEC:
416                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
417                 break;
418         case DPAA_SEC_PDCP:
419                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
420                 break;
421 #endif
422         case DPAA_SEC_CIPHER:
423                 alginfo_c.key = (size_t)ses->cipher_key.data;
424                 alginfo_c.keylen = ses->cipher_key.length;
425                 alginfo_c.key_enc_flags = 0;
426                 alginfo_c.key_type = RTA_DATA_IMM;
427                 alginfo_c.algtype = ses->cipher_key.alg;
428                 alginfo_c.algmode = ses->cipher_key.algmode;
429
430                 switch (ses->cipher_alg) {
431                 case RTE_CRYPTO_CIPHER_AES_CBC:
432                 case RTE_CRYPTO_CIPHER_3DES_CBC:
433                 case RTE_CRYPTO_CIPHER_AES_CTR:
434                 case RTE_CRYPTO_CIPHER_3DES_CTR:
435                         shared_desc_len = cnstr_shdsc_blkcipher(
436                                         cdb->sh_desc, true,
437                                         swap, SHR_NEVER, &alginfo_c,
438                                         ses->iv.length,
439                                         ses->dir);
440                         break;
441                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
442                         shared_desc_len = cnstr_shdsc_snow_f8(
443                                         cdb->sh_desc, true, swap,
444                                         &alginfo_c,
445                                         ses->dir);
446                         break;
447                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
448                         shared_desc_len = cnstr_shdsc_zuce(
449                                         cdb->sh_desc, true, swap,
450                                         &alginfo_c,
451                                         ses->dir);
452                         break;
453                 default:
454                         DPAA_SEC_ERR("unsupported cipher alg %d",
455                                      ses->cipher_alg);
456                         return -ENOTSUP;
457                 }
458                 break;
459         case DPAA_SEC_AUTH:
460                 alginfo_a.key = (size_t)ses->auth_key.data;
461                 alginfo_a.keylen = ses->auth_key.length;
462                 alginfo_a.key_enc_flags = 0;
463                 alginfo_a.key_type = RTA_DATA_IMM;
464                 alginfo_a.algtype = ses->auth_key.alg;
465                 alginfo_a.algmode = ses->auth_key.algmode;
466                 switch (ses->auth_alg) {
467                 case RTE_CRYPTO_AUTH_MD5_HMAC:
468                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
469                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
470                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
471                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
472                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
473                         shared_desc_len = cnstr_shdsc_hmac(
474                                                 cdb->sh_desc, true,
475                                                 swap, SHR_NEVER, &alginfo_a,
476                                                 !ses->dir,
477                                                 ses->digest_length);
478                         break;
479                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
480                         shared_desc_len = cnstr_shdsc_snow_f9(
481                                                 cdb->sh_desc, true, swap,
482                                                 &alginfo_a,
483                                                 !ses->dir,
484                                                 ses->digest_length);
485                         break;
486                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
487                         shared_desc_len = cnstr_shdsc_zuca(
488                                                 cdb->sh_desc, true, swap,
489                                                 &alginfo_a,
490                                                 !ses->dir,
491                                                 ses->digest_length);
492                         break;
493                 default:
494                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
495                 }
496                 break;
497         case DPAA_SEC_AEAD:
498                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
499                         DPAA_SEC_ERR("not supported aead alg");
500                         return -ENOTSUP;
501                 }
502                 alginfo.key = (size_t)ses->aead_key.data;
503                 alginfo.keylen = ses->aead_key.length;
504                 alginfo.key_enc_flags = 0;
505                 alginfo.key_type = RTA_DATA_IMM;
506                 alginfo.algtype = ses->aead_key.alg;
507                 alginfo.algmode = ses->aead_key.algmode;
508
509                 if (ses->dir == DIR_ENC)
510                         shared_desc_len = cnstr_shdsc_gcm_encap(
511                                         cdb->sh_desc, true, swap, SHR_NEVER,
512                                         &alginfo,
513                                         ses->iv.length,
514                                         ses->digest_length);
515                 else
516                         shared_desc_len = cnstr_shdsc_gcm_decap(
517                                         cdb->sh_desc, true, swap, SHR_NEVER,
518                                         &alginfo,
519                                         ses->iv.length,
520                                         ses->digest_length);
521                 break;
522         case DPAA_SEC_CIPHER_HASH:
523                 alginfo_c.key = (size_t)ses->cipher_key.data;
524                 alginfo_c.keylen = ses->cipher_key.length;
525                 alginfo_c.key_enc_flags = 0;
526                 alginfo_c.key_type = RTA_DATA_IMM;
527                 alginfo_c.algtype = ses->cipher_key.alg;
528                 alginfo_c.algmode = ses->cipher_key.algmode;
529
530                 alginfo_a.key = (size_t)ses->auth_key.data;
531                 alginfo_a.keylen = ses->auth_key.length;
532                 alginfo_a.key_enc_flags = 0;
533                 alginfo_a.key_type = RTA_DATA_IMM;
534                 alginfo_a.algtype = ses->auth_key.alg;
535                 alginfo_a.algmode = ses->auth_key.algmode;
536
537                 cdb->sh_desc[0] = alginfo_c.keylen;
538                 cdb->sh_desc[1] = alginfo_a.keylen;
539                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
540                                        DESC_JOB_IO_LEN,
541                                        (unsigned int *)cdb->sh_desc,
542                                        &cdb->sh_desc[2], 2);
543
544                 if (err < 0) {
545                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
546                         return err;
547                 }
548                 if (cdb->sh_desc[2] & 1)
549                         alginfo_c.key_type = RTA_DATA_IMM;
550                 else {
551                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
552                                                 (void *)(size_t)alginfo_c.key);
553                         alginfo_c.key_type = RTA_DATA_PTR;
554                 }
555                 if (cdb->sh_desc[2] & (1<<1))
556                         alginfo_a.key_type = RTA_DATA_IMM;
557                 else {
558                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
559                                                 (void *)(size_t)alginfo_a.key);
560                         alginfo_a.key_type = RTA_DATA_PTR;
561                 }
562                 cdb->sh_desc[0] = 0;
563                 cdb->sh_desc[1] = 0;
564                 cdb->sh_desc[2] = 0;
565                 /* Auth_only_len is set as 0 here and it will be
566                  * overwritten in fd for each packet.
567                  */
568                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
569                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
570                                 ses->iv.length,
571                                 ses->digest_length, ses->dir);
572                 break;
573         case DPAA_SEC_HASH_CIPHER:
574         default:
575                 DPAA_SEC_ERR("error: Unsupported session");
576                 return -ENOTSUP;
577         }
578
579         if (shared_desc_len < 0) {
580                 DPAA_SEC_ERR("error in preparing command block");
581                 return shared_desc_len;
582         }
583
584         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
585         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
586         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
587
588         return 0;
589 }
590
591 /* qp is lockless, should be accessed by only one thread */
592 static int
593 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
594 {
595         struct qman_fq *fq;
596         unsigned int pkts = 0;
597         int num_rx_bufs, ret;
598         struct qm_dqrr_entry *dq;
599         uint32_t vdqcr_flags = 0;
600
601         fq = &qp->outq;
602         /*
603          * Until request for four buffers, we provide exact number of buffers.
604          * Otherwise we do not set the QM_VDQCR_EXACT flag.
605          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
606          * requested, so we request two less in this case.
607          */
608         if (nb_ops < 4) {
609                 vdqcr_flags = QM_VDQCR_EXACT;
610                 num_rx_bufs = nb_ops;
611         } else {
612                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
613                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
614         }
615         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
616         if (ret)
617                 return 0;
618
619         do {
620                 const struct qm_fd *fd;
621                 struct dpaa_sec_job *job;
622                 struct dpaa_sec_op_ctx *ctx;
623                 struct rte_crypto_op *op;
624
625                 dq = qman_dequeue(fq);
626                 if (!dq)
627                         continue;
628
629                 fd = &dq->fd;
630                 /* sg is embedded in an op ctx,
631                  * sg[0] is for output
632                  * sg[1] for input
633                  */
634                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
635
636                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
637                 ctx->fd_status = fd->status;
638                 op = ctx->op;
639                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
640                         struct qm_sg_entry *sg_out;
641                         uint32_t len;
642                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
643                                                 op->sym->m_src : op->sym->m_dst;
644
645                         sg_out = &job->sg[0];
646                         hw_sg_to_cpu(sg_out);
647                         len = sg_out->length;
648                         mbuf->pkt_len = len;
649                         while (mbuf->next != NULL) {
650                                 len -= mbuf->data_len;
651                                 mbuf = mbuf->next;
652                         }
653                         mbuf->data_len = len;
654                 }
655                 if (!ctx->fd_status) {
656                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
657                 } else {
658                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
659                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
660                 }
661                 ops[pkts++] = op;
662
663                 /* report op status to sym->op and then free the ctx memeory */
664                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
665
666                 qman_dqrr_consume(fq, dq);
667         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
668
669         return pkts;
670 }
671
672 static inline struct dpaa_sec_job *
673 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
674 {
675         struct rte_crypto_sym_op *sym = op->sym;
676         struct rte_mbuf *mbuf = sym->m_src;
677         struct dpaa_sec_job *cf;
678         struct dpaa_sec_op_ctx *ctx;
679         struct qm_sg_entry *sg, *out_sg, *in_sg;
680         phys_addr_t start_addr;
681         uint8_t *old_digest, extra_segs;
682         int data_len, data_offset;
683
684         data_len = sym->auth.data.length;
685         data_offset = sym->auth.data.offset;
686
687         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
688             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
689                 if ((data_len & 7) || (data_offset & 7)) {
690                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
691                         return NULL;
692                 }
693
694                 data_len = data_len >> 3;
695                 data_offset = data_offset >> 3;
696         }
697
698         if (is_decode(ses))
699                 extra_segs = 3;
700         else
701                 extra_segs = 2;
702
703         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
704                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
705                                 MAX_SG_ENTRIES);
706                 return NULL;
707         }
708         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
709         if (!ctx)
710                 return NULL;
711
712         cf = &ctx->job;
713         ctx->op = op;
714         old_digest = ctx->digest;
715
716         /* output */
717         out_sg = &cf->sg[0];
718         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
719         out_sg->length = ses->digest_length;
720         cpu_to_hw_sg(out_sg);
721
722         /* input */
723         in_sg = &cf->sg[1];
724         /* need to extend the input to a compound frame */
725         in_sg->extension = 1;
726         in_sg->final = 1;
727         in_sg->length = data_len;
728         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
729
730         /* 1st seg */
731         sg = in_sg + 1;
732
733         if (ses->iv.length) {
734                 uint8_t *iv_ptr;
735
736                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
737                                                    ses->iv.offset);
738
739                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
740                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
741                         sg->length = 12;
742                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
743                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
744                         sg->length = 8;
745                 } else {
746                         sg->length = ses->iv.length;
747                 }
748                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
749                 in_sg->length += sg->length;
750                 cpu_to_hw_sg(sg);
751                 sg++;
752         }
753
754         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
755         sg->offset = data_offset;
756
757         if (data_len <= (mbuf->data_len - data_offset)) {
758                 sg->length = data_len;
759         } else {
760                 sg->length = mbuf->data_len - data_offset;
761
762                 /* remaining i/p segs */
763                 while ((data_len = data_len - sg->length) &&
764                        (mbuf = mbuf->next)) {
765                         cpu_to_hw_sg(sg);
766                         sg++;
767                         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
768                         if (data_len > mbuf->data_len)
769                                 sg->length = mbuf->data_len;
770                         else
771                                 sg->length = data_len;
772                 }
773         }
774
775         if (is_decode(ses)) {
776                 /* Digest verification case */
777                 cpu_to_hw_sg(sg);
778                 sg++;
779                 rte_memcpy(old_digest, sym->auth.digest.data,
780                                 ses->digest_length);
781                 start_addr = rte_dpaa_mem_vtop(old_digest);
782                 qm_sg_entry_set64(sg, start_addr);
783                 sg->length = ses->digest_length;
784                 in_sg->length += ses->digest_length;
785         }
786         sg->final = 1;
787         cpu_to_hw_sg(sg);
788         cpu_to_hw_sg(in_sg);
789
790         return cf;
791 }
792
793 /**
794  * packet looks like:
795  *              |<----data_len------->|
796  *    |ip_header|ah_header|icv|payload|
797  *              ^
798  *              |
799  *         mbuf->pkt.data
800  */
801 static inline struct dpaa_sec_job *
802 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
803 {
804         struct rte_crypto_sym_op *sym = op->sym;
805         struct rte_mbuf *mbuf = sym->m_src;
806         struct dpaa_sec_job *cf;
807         struct dpaa_sec_op_ctx *ctx;
808         struct qm_sg_entry *sg, *in_sg;
809         rte_iova_t start_addr;
810         uint8_t *old_digest;
811         int data_len, data_offset;
812
813         data_len = sym->auth.data.length;
814         data_offset = sym->auth.data.offset;
815
816         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
817             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
818                 if ((data_len & 7) || (data_offset & 7)) {
819                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
820                         return NULL;
821                 }
822
823                 data_len = data_len >> 3;
824                 data_offset = data_offset >> 3;
825         }
826
827         ctx = dpaa_sec_alloc_ctx(ses, 4);
828         if (!ctx)
829                 return NULL;
830
831         cf = &ctx->job;
832         ctx->op = op;
833         old_digest = ctx->digest;
834
835         start_addr = rte_pktmbuf_iova(mbuf);
836         /* output */
837         sg = &cf->sg[0];
838         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
839         sg->length = ses->digest_length;
840         cpu_to_hw_sg(sg);
841
842         /* input */
843         in_sg = &cf->sg[1];
844         /* need to extend the input to a compound frame */
845         in_sg->extension = 1;
846         in_sg->final = 1;
847         in_sg->length = data_len;
848         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
849         sg = &cf->sg[2];
850
851         if (ses->iv.length) {
852                 uint8_t *iv_ptr;
853
854                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
855                                                    ses->iv.offset);
856
857                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
858                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
859                         sg->length = 12;
860                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
861                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
862                         sg->length = 8;
863                 } else {
864                         sg->length = ses->iv.length;
865                 }
866                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
867                 in_sg->length += sg->length;
868                 cpu_to_hw_sg(sg);
869                 sg++;
870         }
871
872         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
873         sg->offset = data_offset;
874         sg->length = data_len;
875
876         if (is_decode(ses)) {
877                 /* Digest verification case */
878                 cpu_to_hw_sg(sg);
879                 /* hash result or digest, save digest first */
880                 rte_memcpy(old_digest, sym->auth.digest.data,
881                                 ses->digest_length);
882                 /* let's check digest by hw */
883                 start_addr = rte_dpaa_mem_vtop(old_digest);
884                 sg++;
885                 qm_sg_entry_set64(sg, start_addr);
886                 sg->length = ses->digest_length;
887                 in_sg->length += ses->digest_length;
888         }
889         sg->final = 1;
890         cpu_to_hw_sg(sg);
891         cpu_to_hw_sg(in_sg);
892
893         return cf;
894 }
895
896 static inline struct dpaa_sec_job *
897 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
898 {
899         struct rte_crypto_sym_op *sym = op->sym;
900         struct dpaa_sec_job *cf;
901         struct dpaa_sec_op_ctx *ctx;
902         struct qm_sg_entry *sg, *out_sg, *in_sg;
903         struct rte_mbuf *mbuf;
904         uint8_t req_segs;
905         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
906                         ses->iv.offset);
907         int data_len, data_offset;
908
909         data_len = sym->cipher.data.length;
910         data_offset = sym->cipher.data.offset;
911
912         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
913                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
914                 if ((data_len & 7) || (data_offset & 7)) {
915                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
916                         return NULL;
917                 }
918
919                 data_len = data_len >> 3;
920                 data_offset = data_offset >> 3;
921         }
922
923         if (sym->m_dst) {
924                 mbuf = sym->m_dst;
925                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
926         } else {
927                 mbuf = sym->m_src;
928                 req_segs = mbuf->nb_segs * 2 + 3;
929         }
930         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
931                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
932                                 MAX_SG_ENTRIES);
933                 return NULL;
934         }
935
936         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
937         if (!ctx)
938                 return NULL;
939
940         cf = &ctx->job;
941         ctx->op = op;
942
943         /* output */
944         out_sg = &cf->sg[0];
945         out_sg->extension = 1;
946         out_sg->length = data_len;
947         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
948         cpu_to_hw_sg(out_sg);
949
950         /* 1st seg */
951         sg = &cf->sg[2];
952         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
953         sg->length = mbuf->data_len - data_offset;
954         sg->offset = data_offset;
955
956         /* Successive segs */
957         mbuf = mbuf->next;
958         while (mbuf) {
959                 cpu_to_hw_sg(sg);
960                 sg++;
961                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
962                 sg->length = mbuf->data_len;
963                 mbuf = mbuf->next;
964         }
965         sg->final = 1;
966         cpu_to_hw_sg(sg);
967
968         /* input */
969         mbuf = sym->m_src;
970         in_sg = &cf->sg[1];
971         in_sg->extension = 1;
972         in_sg->final = 1;
973         in_sg->length = data_len + ses->iv.length;
974
975         sg++;
976         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
977         cpu_to_hw_sg(in_sg);
978
979         /* IV */
980         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
981         sg->length = ses->iv.length;
982         cpu_to_hw_sg(sg);
983
984         /* 1st seg */
985         sg++;
986         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
987         sg->length = mbuf->data_len - data_offset;
988         sg->offset = data_offset;
989
990         /* Successive segs */
991         mbuf = mbuf->next;
992         while (mbuf) {
993                 cpu_to_hw_sg(sg);
994                 sg++;
995                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
996                 sg->length = mbuf->data_len;
997                 mbuf = mbuf->next;
998         }
999         sg->final = 1;
1000         cpu_to_hw_sg(sg);
1001
1002         return cf;
1003 }
1004
1005 static inline struct dpaa_sec_job *
1006 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1007 {
1008         struct rte_crypto_sym_op *sym = op->sym;
1009         struct dpaa_sec_job *cf;
1010         struct dpaa_sec_op_ctx *ctx;
1011         struct qm_sg_entry *sg;
1012         rte_iova_t src_start_addr, dst_start_addr;
1013         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1014                         ses->iv.offset);
1015         int data_len, data_offset;
1016
1017         data_len = sym->cipher.data.length;
1018         data_offset = sym->cipher.data.offset;
1019
1020         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1021                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1022                 if ((data_len & 7) || (data_offset & 7)) {
1023                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1024                         return NULL;
1025                 }
1026
1027                 data_len = data_len >> 3;
1028                 data_offset = data_offset >> 3;
1029         }
1030
1031         ctx = dpaa_sec_alloc_ctx(ses, 4);
1032         if (!ctx)
1033                 return NULL;
1034
1035         cf = &ctx->job;
1036         ctx->op = op;
1037
1038         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1039
1040         if (sym->m_dst)
1041                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1042         else
1043                 dst_start_addr = src_start_addr;
1044
1045         /* output */
1046         sg = &cf->sg[0];
1047         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1048         sg->length = data_len + ses->iv.length;
1049         cpu_to_hw_sg(sg);
1050
1051         /* input */
1052         sg = &cf->sg[1];
1053
1054         /* need to extend the input to a compound frame */
1055         sg->extension = 1;
1056         sg->final = 1;
1057         sg->length = data_len + ses->iv.length;
1058         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1059         cpu_to_hw_sg(sg);
1060
1061         sg = &cf->sg[2];
1062         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1063         sg->length = ses->iv.length;
1064         cpu_to_hw_sg(sg);
1065
1066         sg++;
1067         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1068         sg->length = data_len;
1069         sg->final = 1;
1070         cpu_to_hw_sg(sg);
1071
1072         return cf;
1073 }
1074
1075 static inline struct dpaa_sec_job *
1076 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1077 {
1078         struct rte_crypto_sym_op *sym = op->sym;
1079         struct dpaa_sec_job *cf;
1080         struct dpaa_sec_op_ctx *ctx;
1081         struct qm_sg_entry *sg, *out_sg, *in_sg;
1082         struct rte_mbuf *mbuf;
1083         uint8_t req_segs;
1084         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1085                         ses->iv.offset);
1086
1087         if (sym->m_dst) {
1088                 mbuf = sym->m_dst;
1089                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1090         } else {
1091                 mbuf = sym->m_src;
1092                 req_segs = mbuf->nb_segs * 2 + 4;
1093         }
1094
1095         if (ses->auth_only_len)
1096                 req_segs++;
1097
1098         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1099                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1100                                 MAX_SG_ENTRIES);
1101                 return NULL;
1102         }
1103
1104         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1105         if (!ctx)
1106                 return NULL;
1107
1108         cf = &ctx->job;
1109         ctx->op = op;
1110
1111         rte_prefetch0(cf->sg);
1112
1113         /* output */
1114         out_sg = &cf->sg[0];
1115         out_sg->extension = 1;
1116         if (is_encode(ses))
1117                 out_sg->length = sym->aead.data.length + ses->digest_length;
1118         else
1119                 out_sg->length = sym->aead.data.length;
1120
1121         /* output sg entries */
1122         sg = &cf->sg[2];
1123         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1124         cpu_to_hw_sg(out_sg);
1125
1126         /* 1st seg */
1127         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1128         sg->length = mbuf->data_len - sym->aead.data.offset;
1129         sg->offset = sym->aead.data.offset;
1130
1131         /* Successive segs */
1132         mbuf = mbuf->next;
1133         while (mbuf) {
1134                 cpu_to_hw_sg(sg);
1135                 sg++;
1136                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1137                 sg->length = mbuf->data_len;
1138                 mbuf = mbuf->next;
1139         }
1140         sg->length -= ses->digest_length;
1141
1142         if (is_encode(ses)) {
1143                 cpu_to_hw_sg(sg);
1144                 /* set auth output */
1145                 sg++;
1146                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1147                 sg->length = ses->digest_length;
1148         }
1149         sg->final = 1;
1150         cpu_to_hw_sg(sg);
1151
1152         /* input */
1153         mbuf = sym->m_src;
1154         in_sg = &cf->sg[1];
1155         in_sg->extension = 1;
1156         in_sg->final = 1;
1157         if (is_encode(ses))
1158                 in_sg->length = ses->iv.length + sym->aead.data.length
1159                                                         + ses->auth_only_len;
1160         else
1161                 in_sg->length = ses->iv.length + sym->aead.data.length
1162                                 + ses->auth_only_len + ses->digest_length;
1163
1164         /* input sg entries */
1165         sg++;
1166         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1167         cpu_to_hw_sg(in_sg);
1168
1169         /* 1st seg IV */
1170         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1171         sg->length = ses->iv.length;
1172         cpu_to_hw_sg(sg);
1173
1174         /* 2nd seg auth only */
1175         if (ses->auth_only_len) {
1176                 sg++;
1177                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1178                 sg->length = ses->auth_only_len;
1179                 cpu_to_hw_sg(sg);
1180         }
1181
1182         /* 3rd seg */
1183         sg++;
1184         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1185         sg->length = mbuf->data_len - sym->aead.data.offset;
1186         sg->offset = sym->aead.data.offset;
1187
1188         /* Successive segs */
1189         mbuf = mbuf->next;
1190         while (mbuf) {
1191                 cpu_to_hw_sg(sg);
1192                 sg++;
1193                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1194                 sg->length = mbuf->data_len;
1195                 mbuf = mbuf->next;
1196         }
1197
1198         if (is_decode(ses)) {
1199                 cpu_to_hw_sg(sg);
1200                 sg++;
1201                 memcpy(ctx->digest, sym->aead.digest.data,
1202                         ses->digest_length);
1203                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1204                 sg->length = ses->digest_length;
1205         }
1206         sg->final = 1;
1207         cpu_to_hw_sg(sg);
1208
1209         return cf;
1210 }
1211
1212 static inline struct dpaa_sec_job *
1213 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1214 {
1215         struct rte_crypto_sym_op *sym = op->sym;
1216         struct dpaa_sec_job *cf;
1217         struct dpaa_sec_op_ctx *ctx;
1218         struct qm_sg_entry *sg;
1219         uint32_t length = 0;
1220         rte_iova_t src_start_addr, dst_start_addr;
1221         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1222                         ses->iv.offset);
1223
1224         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1225
1226         if (sym->m_dst)
1227                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1228         else
1229                 dst_start_addr = src_start_addr;
1230
1231         ctx = dpaa_sec_alloc_ctx(ses, 7);
1232         if (!ctx)
1233                 return NULL;
1234
1235         cf = &ctx->job;
1236         ctx->op = op;
1237
1238         /* input */
1239         rte_prefetch0(cf->sg);
1240         sg = &cf->sg[2];
1241         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1242         if (is_encode(ses)) {
1243                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1244                 sg->length = ses->iv.length;
1245                 length += sg->length;
1246                 cpu_to_hw_sg(sg);
1247
1248                 sg++;
1249                 if (ses->auth_only_len) {
1250                         qm_sg_entry_set64(sg,
1251                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1252                         sg->length = ses->auth_only_len;
1253                         length += sg->length;
1254                         cpu_to_hw_sg(sg);
1255                         sg++;
1256                 }
1257                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1258                 sg->length = sym->aead.data.length;
1259                 length += sg->length;
1260                 sg->final = 1;
1261                 cpu_to_hw_sg(sg);
1262         } else {
1263                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1264                 sg->length = ses->iv.length;
1265                 length += sg->length;
1266                 cpu_to_hw_sg(sg);
1267
1268                 sg++;
1269                 if (ses->auth_only_len) {
1270                         qm_sg_entry_set64(sg,
1271                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1272                         sg->length = ses->auth_only_len;
1273                         length += sg->length;
1274                         cpu_to_hw_sg(sg);
1275                         sg++;
1276                 }
1277                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1278                 sg->length = sym->aead.data.length;
1279                 length += sg->length;
1280                 cpu_to_hw_sg(sg);
1281
1282                 memcpy(ctx->digest, sym->aead.digest.data,
1283                        ses->digest_length);
1284                 sg++;
1285
1286                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1287                 sg->length = ses->digest_length;
1288                 length += sg->length;
1289                 sg->final = 1;
1290                 cpu_to_hw_sg(sg);
1291         }
1292         /* input compound frame */
1293         cf->sg[1].length = length;
1294         cf->sg[1].extension = 1;
1295         cf->sg[1].final = 1;
1296         cpu_to_hw_sg(&cf->sg[1]);
1297
1298         /* output */
1299         sg++;
1300         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1301         qm_sg_entry_set64(sg,
1302                 dst_start_addr + sym->aead.data.offset);
1303         sg->length = sym->aead.data.length;
1304         length = sg->length;
1305         if (is_encode(ses)) {
1306                 cpu_to_hw_sg(sg);
1307                 /* set auth output */
1308                 sg++;
1309                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1310                 sg->length = ses->digest_length;
1311                 length += sg->length;
1312         }
1313         sg->final = 1;
1314         cpu_to_hw_sg(sg);
1315
1316         /* output compound frame */
1317         cf->sg[0].length = length;
1318         cf->sg[0].extension = 1;
1319         cpu_to_hw_sg(&cf->sg[0]);
1320
1321         return cf;
1322 }
1323
1324 static inline struct dpaa_sec_job *
1325 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1326 {
1327         struct rte_crypto_sym_op *sym = op->sym;
1328         struct dpaa_sec_job *cf;
1329         struct dpaa_sec_op_ctx *ctx;
1330         struct qm_sg_entry *sg, *out_sg, *in_sg;
1331         struct rte_mbuf *mbuf;
1332         uint8_t req_segs;
1333         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1334                         ses->iv.offset);
1335
1336         if (sym->m_dst) {
1337                 mbuf = sym->m_dst;
1338                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1339         } else {
1340                 mbuf = sym->m_src;
1341                 req_segs = mbuf->nb_segs * 2 + 4;
1342         }
1343
1344         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1345                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1346                                 MAX_SG_ENTRIES);
1347                 return NULL;
1348         }
1349
1350         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1351         if (!ctx)
1352                 return NULL;
1353
1354         cf = &ctx->job;
1355         ctx->op = op;
1356
1357         rte_prefetch0(cf->sg);
1358
1359         /* output */
1360         out_sg = &cf->sg[0];
1361         out_sg->extension = 1;
1362         if (is_encode(ses))
1363                 out_sg->length = sym->auth.data.length + ses->digest_length;
1364         else
1365                 out_sg->length = sym->auth.data.length;
1366
1367         /* output sg entries */
1368         sg = &cf->sg[2];
1369         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1370         cpu_to_hw_sg(out_sg);
1371
1372         /* 1st seg */
1373         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1374         sg->length = mbuf->data_len - sym->auth.data.offset;
1375         sg->offset = sym->auth.data.offset;
1376
1377         /* Successive segs */
1378         mbuf = mbuf->next;
1379         while (mbuf) {
1380                 cpu_to_hw_sg(sg);
1381                 sg++;
1382                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1383                 sg->length = mbuf->data_len;
1384                 mbuf = mbuf->next;
1385         }
1386         sg->length -= ses->digest_length;
1387
1388         if (is_encode(ses)) {
1389                 cpu_to_hw_sg(sg);
1390                 /* set auth output */
1391                 sg++;
1392                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1393                 sg->length = ses->digest_length;
1394         }
1395         sg->final = 1;
1396         cpu_to_hw_sg(sg);
1397
1398         /* input */
1399         mbuf = sym->m_src;
1400         in_sg = &cf->sg[1];
1401         in_sg->extension = 1;
1402         in_sg->final = 1;
1403         if (is_encode(ses))
1404                 in_sg->length = ses->iv.length + sym->auth.data.length;
1405         else
1406                 in_sg->length = ses->iv.length + sym->auth.data.length
1407                                                 + ses->digest_length;
1408
1409         /* input sg entries */
1410         sg++;
1411         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1412         cpu_to_hw_sg(in_sg);
1413
1414         /* 1st seg IV */
1415         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1416         sg->length = ses->iv.length;
1417         cpu_to_hw_sg(sg);
1418
1419         /* 2nd seg */
1420         sg++;
1421         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1422         sg->length = mbuf->data_len - sym->auth.data.offset;
1423         sg->offset = sym->auth.data.offset;
1424
1425         /* Successive segs */
1426         mbuf = mbuf->next;
1427         while (mbuf) {
1428                 cpu_to_hw_sg(sg);
1429                 sg++;
1430                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1431                 sg->length = mbuf->data_len;
1432                 mbuf = mbuf->next;
1433         }
1434
1435         sg->length -= ses->digest_length;
1436         if (is_decode(ses)) {
1437                 cpu_to_hw_sg(sg);
1438                 sg++;
1439                 memcpy(ctx->digest, sym->auth.digest.data,
1440                         ses->digest_length);
1441                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1442                 sg->length = ses->digest_length;
1443         }
1444         sg->final = 1;
1445         cpu_to_hw_sg(sg);
1446
1447         return cf;
1448 }
1449
1450 static inline struct dpaa_sec_job *
1451 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1452 {
1453         struct rte_crypto_sym_op *sym = op->sym;
1454         struct dpaa_sec_job *cf;
1455         struct dpaa_sec_op_ctx *ctx;
1456         struct qm_sg_entry *sg;
1457         rte_iova_t src_start_addr, dst_start_addr;
1458         uint32_t length = 0;
1459         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1460                         ses->iv.offset);
1461
1462         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1463         if (sym->m_dst)
1464                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1465         else
1466                 dst_start_addr = src_start_addr;
1467
1468         ctx = dpaa_sec_alloc_ctx(ses, 7);
1469         if (!ctx)
1470                 return NULL;
1471
1472         cf = &ctx->job;
1473         ctx->op = op;
1474
1475         /* input */
1476         rte_prefetch0(cf->sg);
1477         sg = &cf->sg[2];
1478         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1479         if (is_encode(ses)) {
1480                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1481                 sg->length = ses->iv.length;
1482                 length += sg->length;
1483                 cpu_to_hw_sg(sg);
1484
1485                 sg++;
1486                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1487                 sg->length = sym->auth.data.length;
1488                 length += sg->length;
1489                 sg->final = 1;
1490                 cpu_to_hw_sg(sg);
1491         } else {
1492                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1493                 sg->length = ses->iv.length;
1494                 length += sg->length;
1495                 cpu_to_hw_sg(sg);
1496
1497                 sg++;
1498
1499                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1500                 sg->length = sym->auth.data.length;
1501                 length += sg->length;
1502                 cpu_to_hw_sg(sg);
1503
1504                 memcpy(ctx->digest, sym->auth.digest.data,
1505                        ses->digest_length);
1506                 sg++;
1507
1508                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1509                 sg->length = ses->digest_length;
1510                 length += sg->length;
1511                 sg->final = 1;
1512                 cpu_to_hw_sg(sg);
1513         }
1514         /* input compound frame */
1515         cf->sg[1].length = length;
1516         cf->sg[1].extension = 1;
1517         cf->sg[1].final = 1;
1518         cpu_to_hw_sg(&cf->sg[1]);
1519
1520         /* output */
1521         sg++;
1522         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1523         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1524         sg->length = sym->cipher.data.length;
1525         length = sg->length;
1526         if (is_encode(ses)) {
1527                 cpu_to_hw_sg(sg);
1528                 /* set auth output */
1529                 sg++;
1530                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1531                 sg->length = ses->digest_length;
1532                 length += sg->length;
1533         }
1534         sg->final = 1;
1535         cpu_to_hw_sg(sg);
1536
1537         /* output compound frame */
1538         cf->sg[0].length = length;
1539         cf->sg[0].extension = 1;
1540         cpu_to_hw_sg(&cf->sg[0]);
1541
1542         return cf;
1543 }
1544
1545 #ifdef RTE_LIBRTE_SECURITY
1546 static inline struct dpaa_sec_job *
1547 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1548 {
1549         struct rte_crypto_sym_op *sym = op->sym;
1550         struct dpaa_sec_job *cf;
1551         struct dpaa_sec_op_ctx *ctx;
1552         struct qm_sg_entry *sg;
1553         phys_addr_t src_start_addr, dst_start_addr;
1554
1555         ctx = dpaa_sec_alloc_ctx(ses, 2);
1556         if (!ctx)
1557                 return NULL;
1558         cf = &ctx->job;
1559         ctx->op = op;
1560
1561         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1562
1563         if (sym->m_dst)
1564                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1565         else
1566                 dst_start_addr = src_start_addr;
1567
1568         /* input */
1569         sg = &cf->sg[1];
1570         qm_sg_entry_set64(sg, src_start_addr);
1571         sg->length = sym->m_src->pkt_len;
1572         sg->final = 1;
1573         cpu_to_hw_sg(sg);
1574
1575         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1576         /* output */
1577         sg = &cf->sg[0];
1578         qm_sg_entry_set64(sg, dst_start_addr);
1579         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1580         cpu_to_hw_sg(sg);
1581
1582         return cf;
1583 }
1584
1585 static inline struct dpaa_sec_job *
1586 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1587 {
1588         struct rte_crypto_sym_op *sym = op->sym;
1589         struct dpaa_sec_job *cf;
1590         struct dpaa_sec_op_ctx *ctx;
1591         struct qm_sg_entry *sg, *out_sg, *in_sg;
1592         struct rte_mbuf *mbuf;
1593         uint8_t req_segs;
1594         uint32_t in_len = 0, out_len = 0;
1595
1596         if (sym->m_dst)
1597                 mbuf = sym->m_dst;
1598         else
1599                 mbuf = sym->m_src;
1600
1601         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1602         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1603                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1604                                 MAX_SG_ENTRIES);
1605                 return NULL;
1606         }
1607
1608         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1609         if (!ctx)
1610                 return NULL;
1611         cf = &ctx->job;
1612         ctx->op = op;
1613         /* output */
1614         out_sg = &cf->sg[0];
1615         out_sg->extension = 1;
1616         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1617
1618         /* 1st seg */
1619         sg = &cf->sg[2];
1620         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1621         sg->offset = 0;
1622
1623         /* Successive segs */
1624         while (mbuf->next) {
1625                 sg->length = mbuf->data_len;
1626                 out_len += sg->length;
1627                 mbuf = mbuf->next;
1628                 cpu_to_hw_sg(sg);
1629                 sg++;
1630                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1631                 sg->offset = 0;
1632         }
1633         sg->length = mbuf->buf_len - mbuf->data_off;
1634         out_len += sg->length;
1635         sg->final = 1;
1636         cpu_to_hw_sg(sg);
1637
1638         out_sg->length = out_len;
1639         cpu_to_hw_sg(out_sg);
1640
1641         /* input */
1642         mbuf = sym->m_src;
1643         in_sg = &cf->sg[1];
1644         in_sg->extension = 1;
1645         in_sg->final = 1;
1646         in_len = mbuf->data_len;
1647
1648         sg++;
1649         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1650
1651         /* 1st seg */
1652         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1653         sg->length = mbuf->data_len;
1654         sg->offset = 0;
1655
1656         /* Successive segs */
1657         mbuf = mbuf->next;
1658         while (mbuf) {
1659                 cpu_to_hw_sg(sg);
1660                 sg++;
1661                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1662                 sg->length = mbuf->data_len;
1663                 sg->offset = 0;
1664                 in_len += sg->length;
1665                 mbuf = mbuf->next;
1666         }
1667         sg->final = 1;
1668         cpu_to_hw_sg(sg);
1669
1670         in_sg->length = in_len;
1671         cpu_to_hw_sg(in_sg);
1672
1673         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1674
1675         return cf;
1676 }
1677 #endif
1678
1679 static uint16_t
1680 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1681                        uint16_t nb_ops)
1682 {
1683         /* Function to transmit the frames to given device and queuepair */
1684         uint32_t loop;
1685         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1686         uint16_t num_tx = 0;
1687         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1688         uint32_t frames_to_send;
1689         struct rte_crypto_op *op;
1690         struct dpaa_sec_job *cf;
1691         dpaa_sec_session *ses;
1692         uint16_t auth_hdr_len, auth_tail_len;
1693         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1694         struct qman_fq *inq[DPAA_SEC_BURST];
1695
1696         while (nb_ops) {
1697                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1698                                 DPAA_SEC_BURST : nb_ops;
1699                 for (loop = 0; loop < frames_to_send; loop++) {
1700                         op = *(ops++);
1701                         if (op->sym->m_src->seqn != 0) {
1702                                 index = op->sym->m_src->seqn - 1;
1703                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1704                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1705                                         flags[loop] = ((index & 0x0f) << 8);
1706                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1707                                         DPAA_PER_LCORE_DQRR_SIZE--;
1708                                         DPAA_PER_LCORE_DQRR_HELD &=
1709                                                                 ~(1 << index);
1710                                 }
1711                         }
1712
1713                         switch (op->sess_type) {
1714                         case RTE_CRYPTO_OP_WITH_SESSION:
1715                                 ses = (dpaa_sec_session *)
1716                                         get_sym_session_private_data(
1717                                                         op->sym->session,
1718                                                         cryptodev_driver_id);
1719                                 break;
1720 #ifdef RTE_LIBRTE_SECURITY
1721                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1722                                 ses = (dpaa_sec_session *)
1723                                         get_sec_session_private_data(
1724                                                         op->sym->sec_session);
1725                                 break;
1726 #endif
1727                         default:
1728                                 DPAA_SEC_DP_ERR(
1729                                         "sessionless crypto op not supported");
1730                                 frames_to_send = loop;
1731                                 nb_ops = loop;
1732                                 goto send_pkts;
1733                         }
1734
1735                         if (!ses) {
1736                                 DPAA_SEC_DP_ERR("session not available");
1737                                 frames_to_send = loop;
1738                                 nb_ops = loop;
1739                                 goto send_pkts;
1740                         }
1741
1742                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1743                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1744                                         frames_to_send = loop;
1745                                         nb_ops = loop;
1746                                         goto send_pkts;
1747                                 }
1748                         } else if (unlikely(ses->qp[rte_lcore_id() %
1749                                                 MAX_DPAA_CORES] != qp)) {
1750                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1751                                         " New qp = %p\n",
1752                                         ses->qp[rte_lcore_id() %
1753                                         MAX_DPAA_CORES], qp);
1754                                 frames_to_send = loop;
1755                                 nb_ops = loop;
1756                                 goto send_pkts;
1757                         }
1758
1759                         auth_hdr_len = op->sym->auth.data.length -
1760                                                 op->sym->cipher.data.length;
1761                         auth_tail_len = 0;
1762
1763                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1764                                   ((op->sym->m_dst == NULL) ||
1765                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1766                                 switch (ses->ctxt) {
1767 #ifdef RTE_LIBRTE_SECURITY
1768                                 case DPAA_SEC_PDCP:
1769                                 case DPAA_SEC_IPSEC:
1770                                         cf = build_proto(op, ses);
1771                                         break;
1772 #endif
1773                                 case DPAA_SEC_AUTH:
1774                                         cf = build_auth_only(op, ses);
1775                                         break;
1776                                 case DPAA_SEC_CIPHER:
1777                                         cf = build_cipher_only(op, ses);
1778                                         break;
1779                                 case DPAA_SEC_AEAD:
1780                                         cf = build_cipher_auth_gcm(op, ses);
1781                                         auth_hdr_len = ses->auth_only_len;
1782                                         break;
1783                                 case DPAA_SEC_CIPHER_HASH:
1784                                         auth_hdr_len =
1785                                                 op->sym->cipher.data.offset
1786                                                 - op->sym->auth.data.offset;
1787                                         auth_tail_len =
1788                                                 op->sym->auth.data.length
1789                                                 - op->sym->cipher.data.length
1790                                                 - auth_hdr_len;
1791                                         cf = build_cipher_auth(op, ses);
1792                                         break;
1793                                 default:
1794                                         DPAA_SEC_DP_ERR("not supported ops");
1795                                         frames_to_send = loop;
1796                                         nb_ops = loop;
1797                                         goto send_pkts;
1798                                 }
1799                         } else {
1800                                 switch (ses->ctxt) {
1801 #ifdef RTE_LIBRTE_SECURITY
1802                                 case DPAA_SEC_PDCP:
1803                                 case DPAA_SEC_IPSEC:
1804                                         cf = build_proto_sg(op, ses);
1805                                         break;
1806 #endif
1807                                 case DPAA_SEC_AUTH:
1808                                         cf = build_auth_only_sg(op, ses);
1809                                         break;
1810                                 case DPAA_SEC_CIPHER:
1811                                         cf = build_cipher_only_sg(op, ses);
1812                                         break;
1813                                 case DPAA_SEC_AEAD:
1814                                         cf = build_cipher_auth_gcm_sg(op, ses);
1815                                         auth_hdr_len = ses->auth_only_len;
1816                                         break;
1817                                 case DPAA_SEC_CIPHER_HASH:
1818                                         auth_hdr_len =
1819                                                 op->sym->cipher.data.offset
1820                                                 - op->sym->auth.data.offset;
1821                                         auth_tail_len =
1822                                                 op->sym->auth.data.length
1823                                                 - op->sym->cipher.data.length
1824                                                 - auth_hdr_len;
1825                                         cf = build_cipher_auth_sg(op, ses);
1826                                         break;
1827                                 default:
1828                                         DPAA_SEC_DP_ERR("not supported ops");
1829                                         frames_to_send = loop;
1830                                         nb_ops = loop;
1831                                         goto send_pkts;
1832                                 }
1833                         }
1834                         if (unlikely(!cf)) {
1835                                 frames_to_send = loop;
1836                                 nb_ops = loop;
1837                                 goto send_pkts;
1838                         }
1839
1840                         fd = &fds[loop];
1841                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1842                         fd->opaque_addr = 0;
1843                         fd->cmd = 0;
1844                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1845                         fd->_format1 = qm_fd_compound;
1846                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1847
1848                         /* Auth_only_len is set as 0 in descriptor and it is
1849                          * overwritten here in the fd.cmd which will update
1850                          * the DPOVRD reg.
1851                          */
1852                         if (auth_hdr_len || auth_tail_len) {
1853                                 fd->cmd = 0x80000000;
1854                                 fd->cmd |=
1855                                         ((auth_tail_len << 16) | auth_hdr_len);
1856                         }
1857
1858 #ifdef RTE_LIBRTE_SECURITY
1859                         /* In case of PDCP, per packet HFN is stored in
1860                          * mbuf priv after sym_op.
1861                          */
1862                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1863                                 fd->cmd = 0x80000000 |
1864                                         *((uint32_t *)((uint8_t *)op +
1865                                         ses->pdcp.hfn_ovd_offset));
1866                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1867                                         *((uint32_t *)((uint8_t *)op +
1868                                         ses->pdcp.hfn_ovd_offset)),
1869                                         ses->pdcp.hfn_ovd);
1870                         }
1871 #endif
1872                 }
1873 send_pkts:
1874                 loop = 0;
1875                 while (loop < frames_to_send) {
1876                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1877                                         &flags[loop], frames_to_send - loop);
1878                 }
1879                 nb_ops -= frames_to_send;
1880                 num_tx += frames_to_send;
1881         }
1882
1883         dpaa_qp->tx_pkts += num_tx;
1884         dpaa_qp->tx_errs += nb_ops - num_tx;
1885
1886         return num_tx;
1887 }
1888
1889 static uint16_t
1890 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1891                        uint16_t nb_ops)
1892 {
1893         uint16_t num_rx;
1894         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1895
1896         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1897
1898         dpaa_qp->rx_pkts += num_rx;
1899         dpaa_qp->rx_errs += nb_ops - num_rx;
1900
1901         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1902
1903         return num_rx;
1904 }
1905
1906 /** Release queue pair */
1907 static int
1908 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1909                             uint16_t qp_id)
1910 {
1911         struct dpaa_sec_dev_private *internals;
1912         struct dpaa_sec_qp *qp = NULL;
1913
1914         PMD_INIT_FUNC_TRACE();
1915
1916         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1917
1918         internals = dev->data->dev_private;
1919         if (qp_id >= internals->max_nb_queue_pairs) {
1920                 DPAA_SEC_ERR("Max supported qpid %d",
1921                              internals->max_nb_queue_pairs);
1922                 return -EINVAL;
1923         }
1924
1925         qp = &internals->qps[qp_id];
1926         rte_mempool_free(qp->ctx_pool);
1927         qp->internals = NULL;
1928         dev->data->queue_pairs[qp_id] = NULL;
1929
1930         return 0;
1931 }
1932
1933 /** Setup a queue pair */
1934 static int
1935 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1936                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1937                 __rte_unused int socket_id)
1938 {
1939         struct dpaa_sec_dev_private *internals;
1940         struct dpaa_sec_qp *qp = NULL;
1941         char str[20];
1942
1943         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1944
1945         internals = dev->data->dev_private;
1946         if (qp_id >= internals->max_nb_queue_pairs) {
1947                 DPAA_SEC_ERR("Max supported qpid %d",
1948                              internals->max_nb_queue_pairs);
1949                 return -EINVAL;
1950         }
1951
1952         qp = &internals->qps[qp_id];
1953         qp->internals = internals;
1954         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1955                         dev->data->dev_id, qp_id);
1956         if (!qp->ctx_pool) {
1957                 qp->ctx_pool = rte_mempool_create((const char *)str,
1958                                                         CTX_POOL_NUM_BUFS,
1959                                                         CTX_POOL_BUF_SIZE,
1960                                                         CTX_POOL_CACHE_SIZE, 0,
1961                                                         NULL, NULL, NULL, NULL,
1962                                                         SOCKET_ID_ANY, 0);
1963                 if (!qp->ctx_pool) {
1964                         DPAA_SEC_ERR("%s create failed\n", str);
1965                         return -ENOMEM;
1966                 }
1967         } else
1968                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1969                                 dev->data->dev_id, qp_id);
1970         dev->data->queue_pairs[qp_id] = qp;
1971
1972         return 0;
1973 }
1974
1975 /** Returns the size of session structure */
1976 static unsigned int
1977 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1978 {
1979         PMD_INIT_FUNC_TRACE();
1980
1981         return sizeof(dpaa_sec_session);
1982 }
1983
1984 static int
1985 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
1986                      struct rte_crypto_sym_xform *xform,
1987                      dpaa_sec_session *session)
1988 {
1989         session->ctxt = DPAA_SEC_CIPHER;
1990         session->cipher_alg = xform->cipher.algo;
1991         session->iv.length = xform->cipher.iv.length;
1992         session->iv.offset = xform->cipher.iv.offset;
1993         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
1994                                                RTE_CACHE_LINE_SIZE);
1995         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
1996                 DPAA_SEC_ERR("No Memory for cipher key");
1997                 return -ENOMEM;
1998         }
1999         session->cipher_key.length = xform->cipher.key.length;
2000
2001         memcpy(session->cipher_key.data, xform->cipher.key.data,
2002                xform->cipher.key.length);
2003         switch (xform->cipher.algo) {
2004         case RTE_CRYPTO_CIPHER_AES_CBC:
2005                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2006                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2007                 break;
2008         case RTE_CRYPTO_CIPHER_3DES_CBC:
2009                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2010                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2011                 break;
2012         case RTE_CRYPTO_CIPHER_AES_CTR:
2013                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2014                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2015                 break;
2016         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2017                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2018                 break;
2019         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2020                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2021                 break;
2022         default:
2023                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2024                               xform->cipher.algo);
2025                 return -ENOTSUP;
2026         }
2027         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2028                         DIR_ENC : DIR_DEC;
2029
2030         return 0;
2031 }
2032
2033 static int
2034 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2035                    struct rte_crypto_sym_xform *xform,
2036                    dpaa_sec_session *session)
2037 {
2038         session->ctxt = DPAA_SEC_AUTH;
2039         session->auth_alg = xform->auth.algo;
2040         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2041                                              RTE_CACHE_LINE_SIZE);
2042         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2043                 DPAA_SEC_ERR("No Memory for auth key");
2044                 return -ENOMEM;
2045         }
2046         session->auth_key.length = xform->auth.key.length;
2047         session->digest_length = xform->auth.digest_length;
2048         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2049                 session->iv.offset = xform->auth.iv.offset;
2050                 session->iv.length = xform->auth.iv.length;
2051         }
2052
2053         memcpy(session->auth_key.data, xform->auth.key.data,
2054                xform->auth.key.length);
2055
2056         switch (xform->auth.algo) {
2057         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2058                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2059                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2060                 break;
2061         case RTE_CRYPTO_AUTH_MD5_HMAC:
2062                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2063                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2064                 break;
2065         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2066                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2067                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2068                 break;
2069         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2070                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2071                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2072                 break;
2073         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2074                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2075                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2076                 break;
2077         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2078                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2079                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2080                 break;
2081         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2082                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2083                 session->auth_key.algmode = OP_ALG_AAI_F9;
2084                 break;
2085         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2086                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2087                 session->auth_key.algmode = OP_ALG_AAI_F9;
2088                 break;
2089         default:
2090                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2091                               xform->auth.algo);
2092                 return -ENOTSUP;
2093         }
2094
2095         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2096                         DIR_ENC : DIR_DEC;
2097
2098         return 0;
2099 }
2100
2101 static int
2102 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2103                    struct rte_crypto_sym_xform *xform,
2104                    dpaa_sec_session *session)
2105 {
2106
2107         struct rte_crypto_cipher_xform *cipher_xform;
2108         struct rte_crypto_auth_xform *auth_xform;
2109
2110         session->ctxt = DPAA_SEC_CIPHER_HASH;
2111         if (session->auth_cipher_text) {
2112                 cipher_xform = &xform->cipher;
2113                 auth_xform = &xform->next->auth;
2114         } else {
2115                 cipher_xform = &xform->next->cipher;
2116                 auth_xform = &xform->auth;
2117         }
2118
2119         /* Set IV parameters */
2120         session->iv.offset = cipher_xform->iv.offset;
2121         session->iv.length = cipher_xform->iv.length;
2122
2123         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2124                                                RTE_CACHE_LINE_SIZE);
2125         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2126                 DPAA_SEC_ERR("No Memory for cipher key");
2127                 return -ENOMEM;
2128         }
2129         session->cipher_key.length = cipher_xform->key.length;
2130         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2131                                              RTE_CACHE_LINE_SIZE);
2132         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2133                 DPAA_SEC_ERR("No Memory for auth key");
2134                 return -ENOMEM;
2135         }
2136         session->auth_key.length = auth_xform->key.length;
2137         memcpy(session->cipher_key.data, cipher_xform->key.data,
2138                cipher_xform->key.length);
2139         memcpy(session->auth_key.data, auth_xform->key.data,
2140                auth_xform->key.length);
2141
2142         session->digest_length = auth_xform->digest_length;
2143         session->auth_alg = auth_xform->algo;
2144
2145         switch (auth_xform->algo) {
2146         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2147                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2148                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2149                 break;
2150         case RTE_CRYPTO_AUTH_MD5_HMAC:
2151                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2152                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2153                 break;
2154         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2155                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2156                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2157                 break;
2158         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2159                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2160                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2161                 break;
2162         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2163                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2164                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2165                 break;
2166         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2167                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2168                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2169                 break;
2170         default:
2171                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2172                               auth_xform->algo);
2173                 return -ENOTSUP;
2174         }
2175
2176         session->cipher_alg = cipher_xform->algo;
2177
2178         switch (cipher_xform->algo) {
2179         case RTE_CRYPTO_CIPHER_AES_CBC:
2180                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2181                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2182                 break;
2183         case RTE_CRYPTO_CIPHER_3DES_CBC:
2184                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2185                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2186                 break;
2187         case RTE_CRYPTO_CIPHER_AES_CTR:
2188                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2189                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2190                 break;
2191         default:
2192                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2193                               cipher_xform->algo);
2194                 return -ENOTSUP;
2195         }
2196         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2197                                 DIR_ENC : DIR_DEC;
2198         return 0;
2199 }
2200
2201 static int
2202 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2203                    struct rte_crypto_sym_xform *xform,
2204                    dpaa_sec_session *session)
2205 {
2206         session->aead_alg = xform->aead.algo;
2207         session->ctxt = DPAA_SEC_AEAD;
2208         session->iv.length = xform->aead.iv.length;
2209         session->iv.offset = xform->aead.iv.offset;
2210         session->auth_only_len = xform->aead.aad_length;
2211         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2212                                              RTE_CACHE_LINE_SIZE);
2213         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2214                 DPAA_SEC_ERR("No Memory for aead key\n");
2215                 return -ENOMEM;
2216         }
2217         session->aead_key.length = xform->aead.key.length;
2218         session->digest_length = xform->aead.digest_length;
2219
2220         memcpy(session->aead_key.data, xform->aead.key.data,
2221                xform->aead.key.length);
2222
2223         switch (session->aead_alg) {
2224         case RTE_CRYPTO_AEAD_AES_GCM:
2225                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2226                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2227                 break;
2228         default:
2229                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2230                 return -ENOTSUP;
2231         }
2232
2233         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2234                         DIR_ENC : DIR_DEC;
2235
2236         return 0;
2237 }
2238
2239 static struct qman_fq *
2240 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2241 {
2242         unsigned int i;
2243
2244         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2245                 if (qi->inq_attach[i] == 0) {
2246                         qi->inq_attach[i] = 1;
2247                         return &qi->inq[i];
2248                 }
2249         }
2250         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2251
2252         return NULL;
2253 }
2254
2255 static int
2256 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2257 {
2258         unsigned int i;
2259
2260         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2261                 if (&qi->inq[i] == fq) {
2262                         if (qman_retire_fq(fq, NULL) != 0)
2263                                 DPAA_SEC_WARN("Queue is not retired\n");
2264                         qman_oos_fq(fq);
2265                         qi->inq_attach[i] = 0;
2266                         return 0;
2267                 }
2268         }
2269         return -1;
2270 }
2271
2272 static int
2273 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2274 {
2275         int ret;
2276
2277         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2278         ret = dpaa_sec_prep_cdb(sess);
2279         if (ret) {
2280                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2281                 return ret;
2282         }
2283         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2284                 ret = rte_dpaa_portal_init((void *)0);
2285                 if (ret) {
2286                         DPAA_SEC_ERR("Failure in affining portal");
2287                         return ret;
2288                 }
2289         }
2290         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2291                                rte_dpaa_mem_vtop(&sess->cdb),
2292                                qman_fq_fqid(&qp->outq));
2293         if (ret)
2294                 DPAA_SEC_ERR("Unable to init sec queue");
2295
2296         return ret;
2297 }
2298
2299 static inline void
2300 free_session_data(dpaa_sec_session *s)
2301 {
2302         if (is_aead(s))
2303                 rte_free(s->aead_key.data);
2304         else {
2305                 rte_free(s->auth_key.data);
2306                 rte_free(s->cipher_key.data);
2307         }
2308         memset(s, 0, sizeof(dpaa_sec_session));
2309 }
2310
2311 static int
2312 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2313                             struct rte_crypto_sym_xform *xform, void *sess)
2314 {
2315         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2316         dpaa_sec_session *session = sess;
2317         uint32_t i;
2318         int ret;
2319
2320         PMD_INIT_FUNC_TRACE();
2321
2322         if (unlikely(sess == NULL)) {
2323                 DPAA_SEC_ERR("invalid session struct");
2324                 return -EINVAL;
2325         }
2326         memset(session, 0, sizeof(dpaa_sec_session));
2327
2328         /* Default IV length = 0 */
2329         session->iv.length = 0;
2330
2331         /* Cipher Only */
2332         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2333                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2334                 ret = dpaa_sec_cipher_init(dev, xform, session);
2335
2336         /* Authentication Only */
2337         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2338                    xform->next == NULL) {
2339                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2340                 session->ctxt = DPAA_SEC_AUTH;
2341                 ret = dpaa_sec_auth_init(dev, xform, session);
2342
2343         /* Cipher then Authenticate */
2344         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2345                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2346                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2347                         session->auth_cipher_text = 1;
2348                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2349                                 ret = dpaa_sec_auth_init(dev, xform, session);
2350                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2351                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2352                         else
2353                                 ret = dpaa_sec_chain_init(dev, xform, session);
2354                 } else {
2355                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2356                         return -ENOTSUP;
2357                 }
2358         /* Authenticate then Cipher */
2359         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2360                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2361                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2362                         session->auth_cipher_text = 0;
2363                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2364                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2365                         else if (xform->next->cipher.algo
2366                                         == RTE_CRYPTO_CIPHER_NULL)
2367                                 ret = dpaa_sec_auth_init(dev, xform, session);
2368                         else
2369                                 ret = dpaa_sec_chain_init(dev, xform, session);
2370                 } else {
2371                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2372                         return -ENOTSUP;
2373                 }
2374
2375         /* AEAD operation for AES-GCM kind of Algorithms */
2376         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2377                    xform->next == NULL) {
2378                 ret = dpaa_sec_aead_init(dev, xform, session);
2379
2380         } else {
2381                 DPAA_SEC_ERR("Invalid crypto type");
2382                 return -EINVAL;
2383         }
2384         if (ret) {
2385                 DPAA_SEC_ERR("unable to init session");
2386                 goto err1;
2387         }
2388
2389         rte_spinlock_lock(&internals->lock);
2390         for (i = 0; i < MAX_DPAA_CORES; i++) {
2391                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2392                 if (session->inq[i] == NULL) {
2393                         DPAA_SEC_ERR("unable to attach sec queue");
2394                         rte_spinlock_unlock(&internals->lock);
2395                         ret = -EBUSY;
2396                         goto err1;
2397                 }
2398         }
2399         rte_spinlock_unlock(&internals->lock);
2400
2401         return 0;
2402
2403 err1:
2404         free_session_data(session);
2405         return ret;
2406 }
2407
2408 static int
2409 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2410                 struct rte_crypto_sym_xform *xform,
2411                 struct rte_cryptodev_sym_session *sess,
2412                 struct rte_mempool *mempool)
2413 {
2414         void *sess_private_data;
2415         int ret;
2416
2417         PMD_INIT_FUNC_TRACE();
2418
2419         if (rte_mempool_get(mempool, &sess_private_data)) {
2420                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2421                 return -ENOMEM;
2422         }
2423
2424         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2425         if (ret != 0) {
2426                 DPAA_SEC_ERR("failed to configure session parameters");
2427
2428                 /* Return session to mempool */
2429                 rte_mempool_put(mempool, sess_private_data);
2430                 return ret;
2431         }
2432
2433         set_sym_session_private_data(sess, dev->driver_id,
2434                         sess_private_data);
2435
2436
2437         return 0;
2438 }
2439
2440 static inline void
2441 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2442 {
2443         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2444         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2445         uint8_t i;
2446
2447         for (i = 0; i < MAX_DPAA_CORES; i++) {
2448                 if (s->inq[i])
2449                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2450                 s->inq[i] = NULL;
2451                 s->qp[i] = NULL;
2452         }
2453         free_session_data(s);
2454         rte_mempool_put(sess_mp, (void *)s);
2455 }
2456
2457 /** Clear the memory of session so it doesn't leave key material behind */
2458 static void
2459 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2460                 struct rte_cryptodev_sym_session *sess)
2461 {
2462         PMD_INIT_FUNC_TRACE();
2463         uint8_t index = dev->driver_id;
2464         void *sess_priv = get_sym_session_private_data(sess, index);
2465         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2466
2467         if (sess_priv) {
2468                 free_session_memory(dev, s);
2469                 set_sym_session_private_data(sess, index, NULL);
2470         }
2471 }
2472
2473 #ifdef RTE_LIBRTE_SECURITY
2474 static int
2475 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2476                         struct rte_security_ipsec_xform *ipsec_xform,
2477                         dpaa_sec_session *session)
2478 {
2479         PMD_INIT_FUNC_TRACE();
2480
2481         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2482                                                RTE_CACHE_LINE_SIZE);
2483         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2484                 DPAA_SEC_ERR("No Memory for aead key");
2485                 return -ENOMEM;
2486         }
2487         memcpy(session->aead_key.data, aead_xform->key.data,
2488                aead_xform->key.length);
2489
2490         session->digest_length = aead_xform->digest_length;
2491         session->aead_key.length = aead_xform->key.length;
2492
2493         switch (aead_xform->algo) {
2494         case RTE_CRYPTO_AEAD_AES_GCM:
2495                 switch (session->digest_length) {
2496                 case 8:
2497                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2498                         break;
2499                 case 12:
2500                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2501                         break;
2502                 case 16:
2503                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2504                         break;
2505                 default:
2506                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2507                                      session->digest_length);
2508                         return -EINVAL;
2509                 }
2510                 if (session->dir == DIR_ENC) {
2511                         memcpy(session->encap_pdb.gcm.salt,
2512                                 (uint8_t *)&(ipsec_xform->salt), 4);
2513                 } else {
2514                         memcpy(session->decap_pdb.gcm.salt,
2515                                 (uint8_t *)&(ipsec_xform->salt), 4);
2516                 }
2517                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2518                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2519                 break;
2520         default:
2521                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2522                               aead_xform->algo);
2523                 return -ENOTSUP;
2524         }
2525         return 0;
2526 }
2527
2528 static int
2529 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2530         struct rte_crypto_auth_xform *auth_xform,
2531         struct rte_security_ipsec_xform *ipsec_xform,
2532         dpaa_sec_session *session)
2533 {
2534         if (cipher_xform) {
2535                 session->cipher_key.data = rte_zmalloc(NULL,
2536                                                        cipher_xform->key.length,
2537                                                        RTE_CACHE_LINE_SIZE);
2538                 if (session->cipher_key.data == NULL &&
2539                                 cipher_xform->key.length > 0) {
2540                         DPAA_SEC_ERR("No Memory for cipher key");
2541                         return -ENOMEM;
2542                 }
2543
2544                 session->cipher_key.length = cipher_xform->key.length;
2545                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2546                                 cipher_xform->key.length);
2547                 session->cipher_alg = cipher_xform->algo;
2548         } else {
2549                 session->cipher_key.data = NULL;
2550                 session->cipher_key.length = 0;
2551                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2552         }
2553
2554         if (auth_xform) {
2555                 session->auth_key.data = rte_zmalloc(NULL,
2556                                                 auth_xform->key.length,
2557                                                 RTE_CACHE_LINE_SIZE);
2558                 if (session->auth_key.data == NULL &&
2559                                 auth_xform->key.length > 0) {
2560                         DPAA_SEC_ERR("No Memory for auth key");
2561                         return -ENOMEM;
2562                 }
2563                 session->auth_key.length = auth_xform->key.length;
2564                 memcpy(session->auth_key.data, auth_xform->key.data,
2565                                 auth_xform->key.length);
2566                 session->auth_alg = auth_xform->algo;
2567                 session->digest_length = auth_xform->digest_length;
2568         } else {
2569                 session->auth_key.data = NULL;
2570                 session->auth_key.length = 0;
2571                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2572         }
2573
2574         switch (session->auth_alg) {
2575         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2576                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2577                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2578                 break;
2579         case RTE_CRYPTO_AUTH_MD5_HMAC:
2580                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2581                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2582                 break;
2583         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2584                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2585                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2586                 if (session->digest_length != 16)
2587                         DPAA_SEC_WARN(
2588                         "+++Using sha256-hmac truncated len is non-standard,"
2589                         "it will not work with lookaside proto");
2590                 break;
2591         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2592                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2593                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2594                 break;
2595         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2596                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2597                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2598                 break;
2599         case RTE_CRYPTO_AUTH_AES_CMAC:
2600                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2601                 break;
2602         case RTE_CRYPTO_AUTH_NULL:
2603                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2604                 break;
2605         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2606         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2607         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2608         case RTE_CRYPTO_AUTH_SHA1:
2609         case RTE_CRYPTO_AUTH_SHA256:
2610         case RTE_CRYPTO_AUTH_SHA512:
2611         case RTE_CRYPTO_AUTH_SHA224:
2612         case RTE_CRYPTO_AUTH_SHA384:
2613         case RTE_CRYPTO_AUTH_MD5:
2614         case RTE_CRYPTO_AUTH_AES_GMAC:
2615         case RTE_CRYPTO_AUTH_KASUMI_F9:
2616         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2617         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2618                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2619                               session->auth_alg);
2620                 return -ENOTSUP;
2621         default:
2622                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2623                               session->auth_alg);
2624                 return -ENOTSUP;
2625         }
2626
2627         switch (session->cipher_alg) {
2628         case RTE_CRYPTO_CIPHER_AES_CBC:
2629                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2630                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2631                 break;
2632         case RTE_CRYPTO_CIPHER_3DES_CBC:
2633                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2634                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2635                 break;
2636         case RTE_CRYPTO_CIPHER_AES_CTR:
2637                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2638                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2639                 if (session->dir == DIR_ENC) {
2640                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2641                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2642                 } else {
2643                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2644                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2645                 }
2646                 break;
2647         case RTE_CRYPTO_CIPHER_NULL:
2648                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2649                 break;
2650         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2651         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2652         case RTE_CRYPTO_CIPHER_3DES_ECB:
2653         case RTE_CRYPTO_CIPHER_AES_ECB:
2654         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2655                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2656                               session->cipher_alg);
2657                 return -ENOTSUP;
2658         default:
2659                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2660                               session->cipher_alg);
2661                 return -ENOTSUP;
2662         }
2663
2664         return 0;
2665 }
2666
2667 static int
2668 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2669                            struct rte_security_session_conf *conf,
2670                            void *sess)
2671 {
2672         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2673         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2674         struct rte_crypto_auth_xform *auth_xform = NULL;
2675         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2676         struct rte_crypto_aead_xform *aead_xform = NULL;
2677         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2678         uint32_t i;
2679         int ret;
2680
2681         PMD_INIT_FUNC_TRACE();
2682
2683         memset(session, 0, sizeof(dpaa_sec_session));
2684         session->proto_alg = conf->protocol;
2685         session->ctxt = DPAA_SEC_IPSEC;
2686
2687         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2688                 session->dir = DIR_ENC;
2689         else
2690                 session->dir = DIR_DEC;
2691
2692         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2693                 cipher_xform = &conf->crypto_xform->cipher;
2694                 if (conf->crypto_xform->next)
2695                         auth_xform = &conf->crypto_xform->next->auth;
2696                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2697                                         ipsec_xform, session);
2698         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2699                 auth_xform = &conf->crypto_xform->auth;
2700                 if (conf->crypto_xform->next)
2701                         cipher_xform = &conf->crypto_xform->next->cipher;
2702                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2703                                         ipsec_xform, session);
2704         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2705                 aead_xform = &conf->crypto_xform->aead;
2706                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2707                                         ipsec_xform, session);
2708         } else {
2709                 DPAA_SEC_ERR("XFORM not specified");
2710                 ret = -EINVAL;
2711                 goto out;
2712         }
2713         if (ret) {
2714                 DPAA_SEC_ERR("Failed to process xform");
2715                 goto out;
2716         }
2717
2718         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2719                 if (ipsec_xform->tunnel.type ==
2720                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2721                         session->ip4_hdr.ip_v = IPVERSION;
2722                         session->ip4_hdr.ip_hl = 5;
2723                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2724                                                 sizeof(session->ip4_hdr));
2725                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2726                         session->ip4_hdr.ip_id = 0;
2727                         session->ip4_hdr.ip_off = 0;
2728                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2729                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2730                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2731                                         IPPROTO_ESP : IPPROTO_AH;
2732                         session->ip4_hdr.ip_sum = 0;
2733                         session->ip4_hdr.ip_src =
2734                                         ipsec_xform->tunnel.ipv4.src_ip;
2735                         session->ip4_hdr.ip_dst =
2736                                         ipsec_xform->tunnel.ipv4.dst_ip;
2737                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2738                                                 (void *)&session->ip4_hdr,
2739                                                 sizeof(struct ip));
2740                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2741                 } else if (ipsec_xform->tunnel.type ==
2742                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2743                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2744                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2745                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2746                                         RTE_IPV6_HDR_TC_SHIFT) &
2747                                         RTE_IPV6_HDR_TC_MASK) |
2748                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2749                                         RTE_IPV6_HDR_FL_SHIFT) &
2750                                         RTE_IPV6_HDR_FL_MASK));
2751                         /* Payload length will be updated by HW */
2752                         session->ip6_hdr.payload_len = 0;
2753                         session->ip6_hdr.hop_limits =
2754                                         ipsec_xform->tunnel.ipv6.hlimit;
2755                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2756                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2757                                         IPPROTO_ESP : IPPROTO_AH;
2758                         memcpy(&session->ip6_hdr.src_addr,
2759                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2760                         memcpy(&session->ip6_hdr.dst_addr,
2761                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2762                         session->encap_pdb.ip_hdr_len =
2763                                                 sizeof(struct rte_ipv6_hdr);
2764                 }
2765                 session->encap_pdb.options =
2766                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2767                         PDBOPTS_ESP_OIHI_PDB_INL |
2768                         PDBOPTS_ESP_IVSRC |
2769                         PDBHMO_ESP_ENCAP_DTTL |
2770                         PDBHMO_ESP_SNR;
2771                 if (ipsec_xform->options.esn)
2772                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2773                 session->encap_pdb.spi = ipsec_xform->spi;
2774
2775         } else if (ipsec_xform->direction ==
2776                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2777                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2778                         session->decap_pdb.options = sizeof(struct ip) << 16;
2779                 else
2780                         session->decap_pdb.options =
2781                                         sizeof(struct rte_ipv6_hdr) << 16;
2782                 if (ipsec_xform->options.esn)
2783                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2784                 if (ipsec_xform->replay_win_sz) {
2785                         uint32_t win_sz;
2786                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2787
2788                         switch (win_sz) {
2789                         case 1:
2790                         case 2:
2791                         case 4:
2792                         case 8:
2793                         case 16:
2794                         case 32:
2795                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2796                                 break;
2797                         case 64:
2798                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2799                                 break;
2800                         default:
2801                                 session->decap_pdb.options |=
2802                                                         PDBOPTS_ESP_ARS128;
2803                         }
2804                 }
2805         } else
2806                 goto out;
2807         rte_spinlock_lock(&internals->lock);
2808         for (i = 0; i < MAX_DPAA_CORES; i++) {
2809                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2810                 if (session->inq[i] == NULL) {
2811                         DPAA_SEC_ERR("unable to attach sec queue");
2812                         rte_spinlock_unlock(&internals->lock);
2813                         goto out;
2814                 }
2815         }
2816         rte_spinlock_unlock(&internals->lock);
2817
2818         return 0;
2819 out:
2820         free_session_data(session);
2821         return -1;
2822 }
2823
2824 static int
2825 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2826                           struct rte_security_session_conf *conf,
2827                           void *sess)
2828 {
2829         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2830         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2831         struct rte_crypto_auth_xform *auth_xform = NULL;
2832         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2833         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2834         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2835         uint32_t i;
2836         int ret;
2837
2838         PMD_INIT_FUNC_TRACE();
2839
2840         memset(session, 0, sizeof(dpaa_sec_session));
2841
2842         /* find xfrm types */
2843         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2844                 cipher_xform = &xform->cipher;
2845                 if (xform->next != NULL)
2846                         auth_xform = &xform->next->auth;
2847         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2848                 auth_xform = &xform->auth;
2849                 if (xform->next != NULL)
2850                         cipher_xform = &xform->next->cipher;
2851         } else {
2852                 DPAA_SEC_ERR("Invalid crypto type");
2853                 return -EINVAL;
2854         }
2855
2856         session->proto_alg = conf->protocol;
2857         session->ctxt = DPAA_SEC_PDCP;
2858
2859         if (cipher_xform) {
2860                 switch (cipher_xform->algo) {
2861                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2862                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2863                         break;
2864                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2865                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2866                         break;
2867                 case RTE_CRYPTO_CIPHER_AES_CTR:
2868                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2869                         break;
2870                 case RTE_CRYPTO_CIPHER_NULL:
2871                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2872                         break;
2873                 default:
2874                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2875                                       session->cipher_alg);
2876                         return -EINVAL;
2877                 }
2878
2879                 session->cipher_key.data = rte_zmalloc(NULL,
2880                                                cipher_xform->key.length,
2881                                                RTE_CACHE_LINE_SIZE);
2882                 if (session->cipher_key.data == NULL &&
2883                                 cipher_xform->key.length > 0) {
2884                         DPAA_SEC_ERR("No Memory for cipher key");
2885                         return -ENOMEM;
2886                 }
2887                 session->cipher_key.length = cipher_xform->key.length;
2888                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2889                         cipher_xform->key.length);
2890                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2891                                         DIR_ENC : DIR_DEC;
2892                 session->cipher_alg = cipher_xform->algo;
2893         } else {
2894                 session->cipher_key.data = NULL;
2895                 session->cipher_key.length = 0;
2896                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2897                 session->dir = DIR_ENC;
2898         }
2899
2900         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2901                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2902                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2903                         DPAA_SEC_ERR(
2904                                 "PDCP Seq Num size should be 5/12 bits for cmode");
2905                         ret = -EINVAL;
2906                         goto out;
2907                 }
2908         }
2909
2910         if (auth_xform) {
2911                 switch (auth_xform->algo) {
2912                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2913                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2914                         break;
2915                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2916                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2917                         break;
2918                 case RTE_CRYPTO_AUTH_AES_CMAC:
2919                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2920                         break;
2921                 case RTE_CRYPTO_AUTH_NULL:
2922                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2923                         break;
2924                 default:
2925                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2926                                       session->auth_alg);
2927                         rte_free(session->cipher_key.data);
2928                         return -EINVAL;
2929                 }
2930                 session->auth_key.data = rte_zmalloc(NULL,
2931                                                      auth_xform->key.length,
2932                                                      RTE_CACHE_LINE_SIZE);
2933                 if (!session->auth_key.data &&
2934                     auth_xform->key.length > 0) {
2935                         DPAA_SEC_ERR("No Memory for auth key");
2936                         rte_free(session->cipher_key.data);
2937                         return -ENOMEM;
2938                 }
2939                 session->auth_key.length = auth_xform->key.length;
2940                 memcpy(session->auth_key.data, auth_xform->key.data,
2941                        auth_xform->key.length);
2942                 session->auth_alg = auth_xform->algo;
2943         } else {
2944                 session->auth_key.data = NULL;
2945                 session->auth_key.length = 0;
2946                 session->auth_alg = 0;
2947         }
2948         session->pdcp.domain = pdcp_xform->domain;
2949         session->pdcp.bearer = pdcp_xform->bearer;
2950         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2951         session->pdcp.sn_size = pdcp_xform->sn_size;
2952         session->pdcp.hfn = pdcp_xform->hfn;
2953         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2954         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2955         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2956
2957         rte_spinlock_lock(&dev_priv->lock);
2958         for (i = 0; i < MAX_DPAA_CORES; i++) {
2959                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2960                 if (session->inq[i] == NULL) {
2961                         DPAA_SEC_ERR("unable to attach sec queue");
2962                         rte_spinlock_unlock(&dev_priv->lock);
2963                         ret = -EBUSY;
2964                         goto out;
2965                 }
2966         }
2967         rte_spinlock_unlock(&dev_priv->lock);
2968         return 0;
2969 out:
2970         rte_free(session->auth_key.data);
2971         rte_free(session->cipher_key.data);
2972         memset(session, 0, sizeof(dpaa_sec_session));
2973         return ret;
2974 }
2975
2976 static int
2977 dpaa_sec_security_session_create(void *dev,
2978                                  struct rte_security_session_conf *conf,
2979                                  struct rte_security_session *sess,
2980                                  struct rte_mempool *mempool)
2981 {
2982         void *sess_private_data;
2983         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
2984         int ret;
2985
2986         if (rte_mempool_get(mempool, &sess_private_data)) {
2987                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2988                 return -ENOMEM;
2989         }
2990
2991         switch (conf->protocol) {
2992         case RTE_SECURITY_PROTOCOL_IPSEC:
2993                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
2994                                 sess_private_data);
2995                 break;
2996         case RTE_SECURITY_PROTOCOL_PDCP:
2997                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
2998                                 sess_private_data);
2999                 break;
3000         case RTE_SECURITY_PROTOCOL_MACSEC:
3001                 return -ENOTSUP;
3002         default:
3003                 return -EINVAL;
3004         }
3005         if (ret != 0) {
3006                 DPAA_SEC_ERR("failed to configure session parameters");
3007                 /* Return session to mempool */
3008                 rte_mempool_put(mempool, sess_private_data);
3009                 return ret;
3010         }
3011
3012         set_sec_session_private_data(sess, sess_private_data);
3013
3014         return ret;
3015 }
3016
3017 /** Clear the memory of session so it doesn't leave key material behind */
3018 static int
3019 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3020                 struct rte_security_session *sess)
3021 {
3022         PMD_INIT_FUNC_TRACE();
3023         void *sess_priv = get_sec_session_private_data(sess);
3024         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3025
3026         if (sess_priv) {
3027                 free_session_memory((struct rte_cryptodev *)dev, s);
3028                 set_sec_session_private_data(sess, NULL);
3029         }
3030         return 0;
3031 }
3032 #endif
3033 static int
3034 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3035                        struct rte_cryptodev_config *config __rte_unused)
3036 {
3037         PMD_INIT_FUNC_TRACE();
3038
3039         return 0;
3040 }
3041
3042 static int
3043 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3044 {
3045         PMD_INIT_FUNC_TRACE();
3046         return 0;
3047 }
3048
3049 static void
3050 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3051 {
3052         PMD_INIT_FUNC_TRACE();
3053 }
3054
3055 static int
3056 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3057 {
3058         PMD_INIT_FUNC_TRACE();
3059
3060         if (dev == NULL)
3061                 return -ENOMEM;
3062
3063         return 0;
3064 }
3065
3066 static void
3067 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3068                        struct rte_cryptodev_info *info)
3069 {
3070         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3071
3072         PMD_INIT_FUNC_TRACE();
3073         if (info != NULL) {
3074                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3075                 info->feature_flags = dev->feature_flags;
3076                 info->capabilities = dpaa_sec_capabilities;
3077                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3078                 info->driver_id = cryptodev_driver_id;
3079         }
3080 }
3081
3082 static enum qman_cb_dqrr_result
3083 dpaa_sec_process_parallel_event(void *event,
3084                         struct qman_portal *qm __always_unused,
3085                         struct qman_fq *outq,
3086                         const struct qm_dqrr_entry *dqrr,
3087                         void **bufs)
3088 {
3089         const struct qm_fd *fd;
3090         struct dpaa_sec_job *job;
3091         struct dpaa_sec_op_ctx *ctx;
3092         struct rte_event *ev = (struct rte_event *)event;
3093
3094         fd = &dqrr->fd;
3095
3096         /* sg is embedded in an op ctx,
3097          * sg[0] is for output
3098          * sg[1] for input
3099          */
3100         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3101
3102         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3103         ctx->fd_status = fd->status;
3104         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3105                 struct qm_sg_entry *sg_out;
3106                 uint32_t len;
3107
3108                 sg_out = &job->sg[0];
3109                 hw_sg_to_cpu(sg_out);
3110                 len = sg_out->length;
3111                 ctx->op->sym->m_src->pkt_len = len;
3112                 ctx->op->sym->m_src->data_len = len;
3113         }
3114         if (!ctx->fd_status) {
3115                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3116         } else {
3117                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3118                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3119         }
3120         ev->event_ptr = (void *)ctx->op;
3121
3122         ev->flow_id = outq->ev.flow_id;
3123         ev->sub_event_type = outq->ev.sub_event_type;
3124         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3125         ev->op = RTE_EVENT_OP_NEW;
3126         ev->sched_type = outq->ev.sched_type;
3127         ev->queue_id = outq->ev.queue_id;
3128         ev->priority = outq->ev.priority;
3129         *bufs = (void *)ctx->op;
3130
3131         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3132
3133         return qman_cb_dqrr_consume;
3134 }
3135
3136 static enum qman_cb_dqrr_result
3137 dpaa_sec_process_atomic_event(void *event,
3138                         struct qman_portal *qm __rte_unused,
3139                         struct qman_fq *outq,
3140                         const struct qm_dqrr_entry *dqrr,
3141                         void **bufs)
3142 {
3143         u8 index;
3144         const struct qm_fd *fd;
3145         struct dpaa_sec_job *job;
3146         struct dpaa_sec_op_ctx *ctx;
3147         struct rte_event *ev = (struct rte_event *)event;
3148
3149         fd = &dqrr->fd;
3150
3151         /* sg is embedded in an op ctx,
3152          * sg[0] is for output
3153          * sg[1] for input
3154          */
3155         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3156
3157         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3158         ctx->fd_status = fd->status;
3159         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3160                 struct qm_sg_entry *sg_out;
3161                 uint32_t len;
3162
3163                 sg_out = &job->sg[0];
3164                 hw_sg_to_cpu(sg_out);
3165                 len = sg_out->length;
3166                 ctx->op->sym->m_src->pkt_len = len;
3167                 ctx->op->sym->m_src->data_len = len;
3168         }
3169         if (!ctx->fd_status) {
3170                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3171         } else {
3172                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3173                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3174         }
3175         ev->event_ptr = (void *)ctx->op;
3176         ev->flow_id = outq->ev.flow_id;
3177         ev->sub_event_type = outq->ev.sub_event_type;
3178         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3179         ev->op = RTE_EVENT_OP_NEW;
3180         ev->sched_type = outq->ev.sched_type;
3181         ev->queue_id = outq->ev.queue_id;
3182         ev->priority = outq->ev.priority;
3183
3184         /* Save active dqrr entries */
3185         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3186         DPAA_PER_LCORE_DQRR_SIZE++;
3187         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3188         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3189         ev->impl_opaque = index + 1;
3190         ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3191         *bufs = (void *)ctx->op;
3192
3193         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3194
3195         return qman_cb_dqrr_defer;
3196 }
3197
3198 int
3199 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3200                 int qp_id,
3201                 uint16_t ch_id,
3202                 const struct rte_event *event)
3203 {
3204         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3205         struct qm_mcc_initfq opts = {0};
3206
3207         int ret;
3208
3209         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3210                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3211         opts.fqd.dest.channel = ch_id;
3212
3213         switch (event->sched_type) {
3214         case RTE_SCHED_TYPE_ATOMIC:
3215                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3216                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3217                  * configuration with HOLD_ACTIVE setting
3218                  */
3219                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3220                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3221                 break;
3222         case RTE_SCHED_TYPE_ORDERED:
3223                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3224                 return -ENOTSUP;
3225         default:
3226                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3227                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3228                 break;
3229         }
3230
3231         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3232         if (unlikely(ret)) {
3233                 DPAA_SEC_ERR("unable to init caam source fq!");
3234                 return ret;
3235         }
3236
3237         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3238
3239         return 0;
3240 }
3241
3242 int
3243 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3244                         int qp_id)
3245 {
3246         struct qm_mcc_initfq opts = {0};
3247         int ret;
3248         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3249
3250         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3251                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3252         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3253         qp->outq.cb.ern  = ern_sec_fq_handler;
3254         qman_retire_fq(&qp->outq, NULL);
3255         qman_oos_fq(&qp->outq);
3256         ret = qman_init_fq(&qp->outq, 0, &opts);
3257         if (ret)
3258                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3259         qp->outq.cb.dqrr = NULL;
3260
3261         return ret;
3262 }
3263
3264 static struct rte_cryptodev_ops crypto_ops = {
3265         .dev_configure        = dpaa_sec_dev_configure,
3266         .dev_start            = dpaa_sec_dev_start,
3267         .dev_stop             = dpaa_sec_dev_stop,
3268         .dev_close            = dpaa_sec_dev_close,
3269         .dev_infos_get        = dpaa_sec_dev_infos_get,
3270         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3271         .queue_pair_release   = dpaa_sec_queue_pair_release,
3272         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3273         .sym_session_configure    = dpaa_sec_sym_session_configure,
3274         .sym_session_clear        = dpaa_sec_sym_session_clear
3275 };
3276
3277 #ifdef RTE_LIBRTE_SECURITY
3278 static const struct rte_security_capability *
3279 dpaa_sec_capabilities_get(void *device __rte_unused)
3280 {
3281         return dpaa_sec_security_cap;
3282 }
3283
3284 static const struct rte_security_ops dpaa_sec_security_ops = {
3285         .session_create = dpaa_sec_security_session_create,
3286         .session_update = NULL,
3287         .session_stats_get = NULL,
3288         .session_destroy = dpaa_sec_security_session_destroy,
3289         .set_pkt_metadata = NULL,
3290         .capabilities_get = dpaa_sec_capabilities_get
3291 };
3292 #endif
3293 static int
3294 dpaa_sec_uninit(struct rte_cryptodev *dev)
3295 {
3296         struct dpaa_sec_dev_private *internals;
3297
3298         if (dev == NULL)
3299                 return -ENODEV;
3300
3301         internals = dev->data->dev_private;
3302         rte_free(dev->security_ctx);
3303
3304         rte_free(internals);
3305
3306         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3307                       dev->data->name, rte_socket_id());
3308
3309         return 0;
3310 }
3311
3312 static int
3313 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3314 {
3315         struct dpaa_sec_dev_private *internals;
3316 #ifdef RTE_LIBRTE_SECURITY
3317         struct rte_security_ctx *security_instance;
3318 #endif
3319         struct dpaa_sec_qp *qp;
3320         uint32_t i, flags;
3321         int ret;
3322
3323         PMD_INIT_FUNC_TRACE();
3324
3325         cryptodev->driver_id = cryptodev_driver_id;
3326         cryptodev->dev_ops = &crypto_ops;
3327
3328         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3329         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3330         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3331                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3332                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3333                         RTE_CRYPTODEV_FF_SECURITY |
3334                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3335                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3336                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3337                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3338                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3339
3340         internals = cryptodev->data->dev_private;
3341         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3342         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3343
3344         /*
3345          * For secondary processes, we don't initialise any further as primary
3346          * has already done this work. Only check we don't need a different
3347          * RX function
3348          */
3349         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3350                 DPAA_SEC_WARN("Device already init by primary process");
3351                 return 0;
3352         }
3353 #ifdef RTE_LIBRTE_SECURITY
3354         /* Initialize security_ctx only for primary process*/
3355         security_instance = rte_malloc("rte_security_instances_ops",
3356                                 sizeof(struct rte_security_ctx), 0);
3357         if (security_instance == NULL)
3358                 return -ENOMEM;
3359         security_instance->device = (void *)cryptodev;
3360         security_instance->ops = &dpaa_sec_security_ops;
3361         security_instance->sess_cnt = 0;
3362         cryptodev->security_ctx = security_instance;
3363 #endif
3364         rte_spinlock_init(&internals->lock);
3365         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3366                 /* init qman fq for queue pair */
3367                 qp = &internals->qps[i];
3368                 ret = dpaa_sec_init_tx(&qp->outq);
3369                 if (ret) {
3370                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3371                         goto init_error;
3372                 }
3373         }
3374
3375         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3376                 QMAN_FQ_FLAG_TO_DCPORTAL;
3377         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3378                 /* create rx qman fq for sessions*/
3379                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3380                 if (unlikely(ret != 0)) {
3381                         DPAA_SEC_ERR("sec qman_create_fq failed");
3382                         goto init_error;
3383                 }
3384         }
3385
3386         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3387         return 0;
3388
3389 init_error:
3390         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3391
3392         rte_free(cryptodev->security_ctx);
3393         return -EFAULT;
3394 }
3395
3396 static int
3397 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3398                                 struct rte_dpaa_device *dpaa_dev)
3399 {
3400         struct rte_cryptodev *cryptodev;
3401         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3402
3403         int retval;
3404
3405         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3406
3407         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3408         if (cryptodev == NULL)
3409                 return -ENOMEM;
3410
3411         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3412                 cryptodev->data->dev_private = rte_zmalloc_socket(
3413                                         "cryptodev private structure",
3414                                         sizeof(struct dpaa_sec_dev_private),
3415                                         RTE_CACHE_LINE_SIZE,
3416                                         rte_socket_id());
3417
3418                 if (cryptodev->data->dev_private == NULL)
3419                         rte_panic("Cannot allocate memzone for private "
3420                                         "device data");
3421         }
3422
3423         dpaa_dev->crypto_dev = cryptodev;
3424         cryptodev->device = &dpaa_dev->device;
3425
3426         /* init user callbacks */
3427         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3428
3429         /* if sec device version is not configured */
3430         if (!rta_get_sec_era()) {
3431                 const struct device_node *caam_node;
3432
3433                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3434                         const uint32_t *prop = of_get_property(caam_node,
3435                                         "fsl,sec-era",
3436                                         NULL);
3437                         if (prop) {
3438                                 rta_set_sec_era(
3439                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3440                                 break;
3441                         }
3442                 }
3443         }
3444
3445         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3446                 retval = rte_dpaa_portal_init((void *)1);
3447                 if (retval) {
3448                         DPAA_SEC_ERR("Unable to initialize portal");
3449                         goto out;
3450                 }
3451         }
3452
3453         /* Invoke PMD device initialization function */
3454         retval = dpaa_sec_dev_init(cryptodev);
3455         if (retval == 0)
3456                 return 0;
3457
3458         retval = -ENXIO;
3459 out:
3460         /* In case of error, cleanup is done */
3461         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3462                 rte_free(cryptodev->data->dev_private);
3463
3464         rte_cryptodev_pmd_release_device(cryptodev);
3465
3466         return retval;
3467 }
3468
3469 static int
3470 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3471 {
3472         struct rte_cryptodev *cryptodev;
3473         int ret;
3474
3475         cryptodev = dpaa_dev->crypto_dev;
3476         if (cryptodev == NULL)
3477                 return -ENODEV;
3478
3479         ret = dpaa_sec_uninit(cryptodev);
3480         if (ret)
3481                 return ret;
3482
3483         return rte_cryptodev_pmd_destroy(cryptodev);
3484 }
3485
3486 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3487         .drv_type = FSL_DPAA_CRYPTO,
3488         .driver = {
3489                 .name = "DPAA SEC PMD"
3490         },
3491         .probe = cryptodev_dpaa_sec_probe,
3492         .remove = cryptodev_dpaa_sec_remove,
3493 };
3494
3495 static struct cryptodev_driver dpaa_sec_crypto_drv;
3496
3497 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3498 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3499                 cryptodev_driver_id);
3500 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);