log: introduce logtype register macro
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIBRTE_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_mbuf.h>
26 #include <rte_memcpy.h>
27 #include <rte_string_fns.h>
28 #include <rte_spinlock.h>
29
30 #include <fsl_usd.h>
31 #include <fsl_qman.h>
32 #include <dpaa_of.h>
33
34 /* RTA header files */
35 #include <desc/common.h>
36 #include <desc/algo.h>
37 #include <desc/ipsec.h>
38 #include <desc/pdcp.h>
39
40 #include <rte_dpaa_bus.h>
41 #include <dpaa_sec.h>
42 #include <dpaa_sec_event.h>
43 #include <dpaa_sec_log.h>
44 #include <dpaax_iova_table.h>
45
46 static uint8_t cryptodev_driver_id;
47
48 static __thread struct rte_crypto_op **dpaa_sec_ops;
49 static __thread int dpaa_sec_op_nb;
50
51 static int
52 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
53
54 static inline void
55 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
56 {
57         if (!ctx->fd_status) {
58                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
59         } else {
60                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
61                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
62         }
63 }
64
65 static inline struct dpaa_sec_op_ctx *
66 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
67 {
68         struct dpaa_sec_op_ctx *ctx;
69         int i, retval;
70
71         retval = rte_mempool_get(
72                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
73                         (void **)(&ctx));
74         if (!ctx || retval) {
75                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
76                 return NULL;
77         }
78         /*
79          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
80          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
81          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
82          * each packet, memset is costlier than dcbz_64().
83          */
84         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
85                 dcbz_64(&ctx->job.sg[i]);
86
87         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
88         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
89
90         return ctx;
91 }
92
93 static void
94 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
95                    struct qman_fq *fq,
96                    const struct qm_mr_entry *msg)
97 {
98         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
99                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
100 }
101
102 /* initialize the queue with dest chan as caam chan so that
103  * all the packets in this queue could be dispatched into caam
104  */
105 static int
106 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
107                  uint32_t fqid_out)
108 {
109         struct qm_mcc_initfq fq_opts;
110         uint32_t flags;
111         int ret = -1;
112
113         /* Clear FQ options */
114         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
115
116         flags = QMAN_INITFQ_FLAG_SCHED;
117         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
118                           QM_INITFQ_WE_CONTEXTB;
119
120         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
121         fq_opts.fqd.context_b = fqid_out;
122         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
123         fq_opts.fqd.dest.wq = 0;
124
125         fq_in->cb.ern  = ern_sec_fq_handler;
126
127         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
128
129         ret = qman_init_fq(fq_in, flags, &fq_opts);
130         if (unlikely(ret != 0))
131                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
132
133         return ret;
134 }
135
136 /* something is put into in_fq and caam put the crypto result into out_fq */
137 static enum qman_cb_dqrr_result
138 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
139                   struct qman_fq *fq __always_unused,
140                   const struct qm_dqrr_entry *dqrr)
141 {
142         const struct qm_fd *fd;
143         struct dpaa_sec_job *job;
144         struct dpaa_sec_op_ctx *ctx;
145
146         if (dpaa_sec_op_nb >= DPAA_SEC_BURST)
147                 return qman_cb_dqrr_defer;
148
149         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
150                 return qman_cb_dqrr_consume;
151
152         fd = &dqrr->fd;
153         /* sg is embedded in an op ctx,
154          * sg[0] is for output
155          * sg[1] for input
156          */
157         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
158
159         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
160         ctx->fd_status = fd->status;
161         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
162                 struct qm_sg_entry *sg_out;
163                 uint32_t len;
164                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
165                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
166
167                 sg_out = &job->sg[0];
168                 hw_sg_to_cpu(sg_out);
169                 len = sg_out->length;
170                 mbuf->pkt_len = len;
171                 while (mbuf->next != NULL) {
172                         len -= mbuf->data_len;
173                         mbuf = mbuf->next;
174                 }
175                 mbuf->data_len = len;
176         }
177         dpaa_sec_ops[dpaa_sec_op_nb++] = ctx->op;
178         dpaa_sec_op_ending(ctx);
179
180         return qman_cb_dqrr_consume;
181 }
182
183 /* caam result is put into this queue */
184 static int
185 dpaa_sec_init_tx(struct qman_fq *fq)
186 {
187         int ret;
188         struct qm_mcc_initfq opts;
189         uint32_t flags;
190
191         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
192                 QMAN_FQ_FLAG_DYNAMIC_FQID;
193
194         ret = qman_create_fq(0, flags, fq);
195         if (unlikely(ret)) {
196                 DPAA_SEC_ERR("qman_create_fq failed");
197                 return ret;
198         }
199
200         memset(&opts, 0, sizeof(opts));
201         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
202                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
203
204         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
205
206         fq->cb.dqrr = dqrr_out_fq_cb_rx;
207         fq->cb.ern  = ern_sec_fq_handler;
208
209         ret = qman_init_fq(fq, 0, &opts);
210         if (unlikely(ret)) {
211                 DPAA_SEC_ERR("unable to init caam source fq!");
212                 return ret;
213         }
214
215         return ret;
216 }
217
218 static inline int is_aead(dpaa_sec_session *ses)
219 {
220         return ((ses->cipher_alg == 0) &&
221                 (ses->auth_alg == 0) &&
222                 (ses->aead_alg != 0));
223 }
224
225 static inline int is_encode(dpaa_sec_session *ses)
226 {
227         return ses->dir == DIR_ENC;
228 }
229
230 static inline int is_decode(dpaa_sec_session *ses)
231 {
232         return ses->dir == DIR_DEC;
233 }
234
235 #ifdef RTE_LIBRTE_SECURITY
236 static int
237 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
238 {
239         struct alginfo authdata = {0}, cipherdata = {0};
240         struct sec_cdb *cdb = &ses->cdb;
241         struct alginfo *p_authdata = NULL;
242         int32_t shared_desc_len = 0;
243         int err;
244 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
245         int swap = false;
246 #else
247         int swap = true;
248 #endif
249
250         cipherdata.key = (size_t)ses->cipher_key.data;
251         cipherdata.keylen = ses->cipher_key.length;
252         cipherdata.key_enc_flags = 0;
253         cipherdata.key_type = RTA_DATA_IMM;
254         cipherdata.algtype = ses->cipher_key.alg;
255         cipherdata.algmode = ses->cipher_key.algmode;
256
257         cdb->sh_desc[0] = cipherdata.keylen;
258         cdb->sh_desc[1] = 0;
259         cdb->sh_desc[2] = 0;
260
261         if (ses->auth_alg) {
262                 authdata.key = (size_t)ses->auth_key.data;
263                 authdata.keylen = ses->auth_key.length;
264                 authdata.key_enc_flags = 0;
265                 authdata.key_type = RTA_DATA_IMM;
266                 authdata.algtype = ses->auth_key.alg;
267                 authdata.algmode = ses->auth_key.algmode;
268
269                 p_authdata = &authdata;
270
271                 cdb->sh_desc[1] = authdata.keylen;
272         }
273
274         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
275                                MIN_JOB_DESC_SIZE,
276                                (unsigned int *)cdb->sh_desc,
277                                &cdb->sh_desc[2], 2);
278         if (err < 0) {
279                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
280                 return err;
281         }
282
283         if (!(cdb->sh_desc[2] & 1) && cipherdata.keylen) {
284                 cipherdata.key =
285                         (size_t)rte_dpaa_mem_vtop((void *)(size_t)cipherdata.key);
286                 cipherdata.key_type = RTA_DATA_PTR;
287         }
288         if (!(cdb->sh_desc[2] & (1 << 1)) &&  authdata.keylen) {
289                 authdata.key =
290                         (size_t)rte_dpaa_mem_vtop((void *)(size_t)authdata.key);
291                 authdata.key_type = RTA_DATA_PTR;
292         }
293
294         cdb->sh_desc[0] = 0;
295         cdb->sh_desc[1] = 0;
296         cdb->sh_desc[2] = 0;
297
298         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
299                 if (ses->dir == DIR_ENC)
300                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
301                                         cdb->sh_desc, 1, swap,
302                                         ses->pdcp.hfn,
303                                         ses->pdcp.sn_size,
304                                         ses->pdcp.bearer,
305                                         ses->pdcp.pkt_dir,
306                                         ses->pdcp.hfn_threshold,
307                                         &cipherdata, &authdata,
308                                         0);
309                 else if (ses->dir == DIR_DEC)
310                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
311                                         cdb->sh_desc, 1, swap,
312                                         ses->pdcp.hfn,
313                                         ses->pdcp.sn_size,
314                                         ses->pdcp.bearer,
315                                         ses->pdcp.pkt_dir,
316                                         ses->pdcp.hfn_threshold,
317                                         &cipherdata, &authdata,
318                                         0);
319         } else {
320                 if (ses->dir == DIR_ENC)
321                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_encap(
322                                         cdb->sh_desc, 1, swap,
323                                         ses->pdcp.sn_size,
324                                         ses->pdcp.hfn,
325                                         ses->pdcp.bearer,
326                                         ses->pdcp.pkt_dir,
327                                         ses->pdcp.hfn_threshold,
328                                         &cipherdata, p_authdata, 0);
329                 else if (ses->dir == DIR_DEC)
330                         shared_desc_len = cnstr_shdsc_pdcp_u_plane_decap(
331                                         cdb->sh_desc, 1, swap,
332                                         ses->pdcp.sn_size,
333                                         ses->pdcp.hfn,
334                                         ses->pdcp.bearer,
335                                         ses->pdcp.pkt_dir,
336                                         ses->pdcp.hfn_threshold,
337                                         &cipherdata, p_authdata, 0);
338         }
339         return shared_desc_len;
340 }
341
342 /* prepare ipsec proto command block of the session */
343 static int
344 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
345 {
346         struct alginfo cipherdata = {0}, authdata = {0};
347         struct sec_cdb *cdb = &ses->cdb;
348         int32_t shared_desc_len = 0;
349         int err;
350 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
351         int swap = false;
352 #else
353         int swap = true;
354 #endif
355
356         cipherdata.key = (size_t)ses->cipher_key.data;
357         cipherdata.keylen = ses->cipher_key.length;
358         cipherdata.key_enc_flags = 0;
359         cipherdata.key_type = RTA_DATA_IMM;
360         cipherdata.algtype = ses->cipher_key.alg;
361         cipherdata.algmode = ses->cipher_key.algmode;
362
363         if (ses->auth_key.length) {
364                 authdata.key = (size_t)ses->auth_key.data;
365                 authdata.keylen = ses->auth_key.length;
366                 authdata.key_enc_flags = 0;
367                 authdata.key_type = RTA_DATA_IMM;
368                 authdata.algtype = ses->auth_key.alg;
369                 authdata.algmode = ses->auth_key.algmode;
370         }
371
372         cdb->sh_desc[0] = cipherdata.keylen;
373         cdb->sh_desc[1] = authdata.keylen;
374         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
375                                MIN_JOB_DESC_SIZE,
376                                (unsigned int *)cdb->sh_desc,
377                                &cdb->sh_desc[2], 2);
378
379         if (err < 0) {
380                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
381                 return err;
382         }
383         if (cdb->sh_desc[2] & 1)
384                 cipherdata.key_type = RTA_DATA_IMM;
385         else {
386                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
387                                         (void *)(size_t)cipherdata.key);
388                 cipherdata.key_type = RTA_DATA_PTR;
389         }
390         if (cdb->sh_desc[2] & (1<<1))
391                 authdata.key_type = RTA_DATA_IMM;
392         else {
393                 authdata.key = (size_t)rte_dpaa_mem_vtop(
394                                         (void *)(size_t)authdata.key);
395                 authdata.key_type = RTA_DATA_PTR;
396         }
397
398         cdb->sh_desc[0] = 0;
399         cdb->sh_desc[1] = 0;
400         cdb->sh_desc[2] = 0;
401         if (ses->dir == DIR_ENC) {
402                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
403                                 cdb->sh_desc,
404                                 true, swap, SHR_SERIAL,
405                                 &ses->encap_pdb,
406                                 (uint8_t *)&ses->ip4_hdr,
407                                 &cipherdata, &authdata);
408         } else if (ses->dir == DIR_DEC) {
409                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
410                                 cdb->sh_desc,
411                                 true, swap, SHR_SERIAL,
412                                 &ses->decap_pdb,
413                                 &cipherdata, &authdata);
414         }
415         return shared_desc_len;
416 }
417 #endif
418 /* prepare command block of the session */
419 static int
420 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
421 {
422         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
423         int32_t shared_desc_len = 0;
424         struct sec_cdb *cdb = &ses->cdb;
425         int err;
426 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
427         int swap = false;
428 #else
429         int swap = true;
430 #endif
431
432         memset(cdb, 0, sizeof(struct sec_cdb));
433
434         switch (ses->ctxt) {
435 #ifdef RTE_LIBRTE_SECURITY
436         case DPAA_SEC_IPSEC:
437                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
438                 break;
439         case DPAA_SEC_PDCP:
440                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
441                 break;
442 #endif
443         case DPAA_SEC_CIPHER:
444                 alginfo_c.key = (size_t)ses->cipher_key.data;
445                 alginfo_c.keylen = ses->cipher_key.length;
446                 alginfo_c.key_enc_flags = 0;
447                 alginfo_c.key_type = RTA_DATA_IMM;
448                 alginfo_c.algtype = ses->cipher_key.alg;
449                 alginfo_c.algmode = ses->cipher_key.algmode;
450
451                 switch (ses->cipher_alg) {
452                 case RTE_CRYPTO_CIPHER_AES_CBC:
453                 case RTE_CRYPTO_CIPHER_3DES_CBC:
454                 case RTE_CRYPTO_CIPHER_AES_CTR:
455                 case RTE_CRYPTO_CIPHER_3DES_CTR:
456                         shared_desc_len = cnstr_shdsc_blkcipher(
457                                         cdb->sh_desc, true,
458                                         swap, SHR_NEVER, &alginfo_c,
459                                         ses->iv.length,
460                                         ses->dir);
461                         break;
462                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
463                         shared_desc_len = cnstr_shdsc_snow_f8(
464                                         cdb->sh_desc, true, swap,
465                                         &alginfo_c,
466                                         ses->dir);
467                         break;
468                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
469                         shared_desc_len = cnstr_shdsc_zuce(
470                                         cdb->sh_desc, true, swap,
471                                         &alginfo_c,
472                                         ses->dir);
473                         break;
474                 default:
475                         DPAA_SEC_ERR("unsupported cipher alg %d",
476                                      ses->cipher_alg);
477                         return -ENOTSUP;
478                 }
479                 break;
480         case DPAA_SEC_AUTH:
481                 alginfo_a.key = (size_t)ses->auth_key.data;
482                 alginfo_a.keylen = ses->auth_key.length;
483                 alginfo_a.key_enc_flags = 0;
484                 alginfo_a.key_type = RTA_DATA_IMM;
485                 alginfo_a.algtype = ses->auth_key.alg;
486                 alginfo_a.algmode = ses->auth_key.algmode;
487                 switch (ses->auth_alg) {
488                 case RTE_CRYPTO_AUTH_MD5_HMAC:
489                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
490                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
491                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
492                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
493                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
494                         shared_desc_len = cnstr_shdsc_hmac(
495                                                 cdb->sh_desc, true,
496                                                 swap, SHR_NEVER, &alginfo_a,
497                                                 !ses->dir,
498                                                 ses->digest_length);
499                         break;
500                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
501                         shared_desc_len = cnstr_shdsc_snow_f9(
502                                                 cdb->sh_desc, true, swap,
503                                                 &alginfo_a,
504                                                 !ses->dir,
505                                                 ses->digest_length);
506                         break;
507                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
508                         shared_desc_len = cnstr_shdsc_zuca(
509                                                 cdb->sh_desc, true, swap,
510                                                 &alginfo_a,
511                                                 !ses->dir,
512                                                 ses->digest_length);
513                         break;
514                 default:
515                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
516                 }
517                 break;
518         case DPAA_SEC_AEAD:
519                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
520                         DPAA_SEC_ERR("not supported aead alg");
521                         return -ENOTSUP;
522                 }
523                 alginfo.key = (size_t)ses->aead_key.data;
524                 alginfo.keylen = ses->aead_key.length;
525                 alginfo.key_enc_flags = 0;
526                 alginfo.key_type = RTA_DATA_IMM;
527                 alginfo.algtype = ses->aead_key.alg;
528                 alginfo.algmode = ses->aead_key.algmode;
529
530                 if (ses->dir == DIR_ENC)
531                         shared_desc_len = cnstr_shdsc_gcm_encap(
532                                         cdb->sh_desc, true, swap, SHR_NEVER,
533                                         &alginfo,
534                                         ses->iv.length,
535                                         ses->digest_length);
536                 else
537                         shared_desc_len = cnstr_shdsc_gcm_decap(
538                                         cdb->sh_desc, true, swap, SHR_NEVER,
539                                         &alginfo,
540                                         ses->iv.length,
541                                         ses->digest_length);
542                 break;
543         case DPAA_SEC_CIPHER_HASH:
544                 alginfo_c.key = (size_t)ses->cipher_key.data;
545                 alginfo_c.keylen = ses->cipher_key.length;
546                 alginfo_c.key_enc_flags = 0;
547                 alginfo_c.key_type = RTA_DATA_IMM;
548                 alginfo_c.algtype = ses->cipher_key.alg;
549                 alginfo_c.algmode = ses->cipher_key.algmode;
550
551                 alginfo_a.key = (size_t)ses->auth_key.data;
552                 alginfo_a.keylen = ses->auth_key.length;
553                 alginfo_a.key_enc_flags = 0;
554                 alginfo_a.key_type = RTA_DATA_IMM;
555                 alginfo_a.algtype = ses->auth_key.alg;
556                 alginfo_a.algmode = ses->auth_key.algmode;
557
558                 cdb->sh_desc[0] = alginfo_c.keylen;
559                 cdb->sh_desc[1] = alginfo_a.keylen;
560                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
561                                        MIN_JOB_DESC_SIZE,
562                                        (unsigned int *)cdb->sh_desc,
563                                        &cdb->sh_desc[2], 2);
564
565                 if (err < 0) {
566                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
567                         return err;
568                 }
569                 if (cdb->sh_desc[2] & 1)
570                         alginfo_c.key_type = RTA_DATA_IMM;
571                 else {
572                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
573                                                 (void *)(size_t)alginfo_c.key);
574                         alginfo_c.key_type = RTA_DATA_PTR;
575                 }
576                 if (cdb->sh_desc[2] & (1<<1))
577                         alginfo_a.key_type = RTA_DATA_IMM;
578                 else {
579                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
580                                                 (void *)(size_t)alginfo_a.key);
581                         alginfo_a.key_type = RTA_DATA_PTR;
582                 }
583                 cdb->sh_desc[0] = 0;
584                 cdb->sh_desc[1] = 0;
585                 cdb->sh_desc[2] = 0;
586                 /* Auth_only_len is set as 0 here and it will be
587                  * overwritten in fd for each packet.
588                  */
589                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
590                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
591                                 ses->iv.length,
592                                 ses->digest_length, ses->dir);
593                 break;
594         case DPAA_SEC_HASH_CIPHER:
595         default:
596                 DPAA_SEC_ERR("error: Unsupported session");
597                 return -ENOTSUP;
598         }
599
600         if (shared_desc_len < 0) {
601                 DPAA_SEC_ERR("error in preparing command block");
602                 return shared_desc_len;
603         }
604
605         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
606         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
607         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
608
609         return 0;
610 }
611
612 /* qp is lockless, should be accessed by only one thread */
613 static int
614 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
615 {
616         struct qman_fq *fq;
617         unsigned int pkts = 0;
618         int num_rx_bufs, ret;
619         struct qm_dqrr_entry *dq;
620         uint32_t vdqcr_flags = 0;
621
622         fq = &qp->outq;
623         /*
624          * Until request for four buffers, we provide exact number of buffers.
625          * Otherwise we do not set the QM_VDQCR_EXACT flag.
626          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
627          * requested, so we request two less in this case.
628          */
629         if (nb_ops < 4) {
630                 vdqcr_flags = QM_VDQCR_EXACT;
631                 num_rx_bufs = nb_ops;
632         } else {
633                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
634                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
635         }
636         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
637         if (ret)
638                 return 0;
639
640         do {
641                 const struct qm_fd *fd;
642                 struct dpaa_sec_job *job;
643                 struct dpaa_sec_op_ctx *ctx;
644                 struct rte_crypto_op *op;
645
646                 dq = qman_dequeue(fq);
647                 if (!dq)
648                         continue;
649
650                 fd = &dq->fd;
651                 /* sg is embedded in an op ctx,
652                  * sg[0] is for output
653                  * sg[1] for input
654                  */
655                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
656
657                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
658                 ctx->fd_status = fd->status;
659                 op = ctx->op;
660                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
661                         struct qm_sg_entry *sg_out;
662                         uint32_t len;
663                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
664                                                 op->sym->m_src : op->sym->m_dst;
665
666                         sg_out = &job->sg[0];
667                         hw_sg_to_cpu(sg_out);
668                         len = sg_out->length;
669                         mbuf->pkt_len = len;
670                         while (mbuf->next != NULL) {
671                                 len -= mbuf->data_len;
672                                 mbuf = mbuf->next;
673                         }
674                         mbuf->data_len = len;
675                 }
676                 if (!ctx->fd_status) {
677                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
678                 } else {
679                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
680                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
681                 }
682                 ops[pkts++] = op;
683
684                 /* report op status to sym->op and then free the ctx memeory */
685                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
686
687                 qman_dqrr_consume(fq, dq);
688         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
689
690         return pkts;
691 }
692
693 static inline struct dpaa_sec_job *
694 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
695 {
696         struct rte_crypto_sym_op *sym = op->sym;
697         struct rte_mbuf *mbuf = sym->m_src;
698         struct dpaa_sec_job *cf;
699         struct dpaa_sec_op_ctx *ctx;
700         struct qm_sg_entry *sg, *out_sg, *in_sg;
701         phys_addr_t start_addr;
702         uint8_t *old_digest, extra_segs;
703         int data_len, data_offset;
704
705         data_len = sym->auth.data.length;
706         data_offset = sym->auth.data.offset;
707
708         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
709             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
710                 if ((data_len & 7) || (data_offset & 7)) {
711                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
712                         return NULL;
713                 }
714
715                 data_len = data_len >> 3;
716                 data_offset = data_offset >> 3;
717         }
718
719         if (is_decode(ses))
720                 extra_segs = 3;
721         else
722                 extra_segs = 2;
723
724         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
725                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
726                                 MAX_SG_ENTRIES);
727                 return NULL;
728         }
729         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
730         if (!ctx)
731                 return NULL;
732
733         cf = &ctx->job;
734         ctx->op = op;
735         old_digest = ctx->digest;
736
737         /* output */
738         out_sg = &cf->sg[0];
739         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
740         out_sg->length = ses->digest_length;
741         cpu_to_hw_sg(out_sg);
742
743         /* input */
744         in_sg = &cf->sg[1];
745         /* need to extend the input to a compound frame */
746         in_sg->extension = 1;
747         in_sg->final = 1;
748         in_sg->length = data_len;
749         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
750
751         /* 1st seg */
752         sg = in_sg + 1;
753
754         if (ses->iv.length) {
755                 uint8_t *iv_ptr;
756
757                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
758                                                    ses->iv.offset);
759
760                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
761                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
762                         sg->length = 12;
763                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
764                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
765                         sg->length = 8;
766                 } else {
767                         sg->length = ses->iv.length;
768                 }
769                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
770                 in_sg->length += sg->length;
771                 cpu_to_hw_sg(sg);
772                 sg++;
773         }
774
775         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
776         sg->offset = data_offset;
777
778         if (data_len <= (mbuf->data_len - data_offset)) {
779                 sg->length = data_len;
780         } else {
781                 sg->length = mbuf->data_len - data_offset;
782
783                 /* remaining i/p segs */
784                 while ((data_len = data_len - sg->length) &&
785                        (mbuf = mbuf->next)) {
786                         cpu_to_hw_sg(sg);
787                         sg++;
788                         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
789                         if (data_len > mbuf->data_len)
790                                 sg->length = mbuf->data_len;
791                         else
792                                 sg->length = data_len;
793                 }
794         }
795
796         if (is_decode(ses)) {
797                 /* Digest verification case */
798                 cpu_to_hw_sg(sg);
799                 sg++;
800                 rte_memcpy(old_digest, sym->auth.digest.data,
801                                 ses->digest_length);
802                 start_addr = rte_dpaa_mem_vtop(old_digest);
803                 qm_sg_entry_set64(sg, start_addr);
804                 sg->length = ses->digest_length;
805                 in_sg->length += ses->digest_length;
806         }
807         sg->final = 1;
808         cpu_to_hw_sg(sg);
809         cpu_to_hw_sg(in_sg);
810
811         return cf;
812 }
813
814 /**
815  * packet looks like:
816  *              |<----data_len------->|
817  *    |ip_header|ah_header|icv|payload|
818  *              ^
819  *              |
820  *         mbuf->pkt.data
821  */
822 static inline struct dpaa_sec_job *
823 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
824 {
825         struct rte_crypto_sym_op *sym = op->sym;
826         struct rte_mbuf *mbuf = sym->m_src;
827         struct dpaa_sec_job *cf;
828         struct dpaa_sec_op_ctx *ctx;
829         struct qm_sg_entry *sg, *in_sg;
830         rte_iova_t start_addr;
831         uint8_t *old_digest;
832         int data_len, data_offset;
833
834         data_len = sym->auth.data.length;
835         data_offset = sym->auth.data.offset;
836
837         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
838             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
839                 if ((data_len & 7) || (data_offset & 7)) {
840                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
841                         return NULL;
842                 }
843
844                 data_len = data_len >> 3;
845                 data_offset = data_offset >> 3;
846         }
847
848         ctx = dpaa_sec_alloc_ctx(ses, 4);
849         if (!ctx)
850                 return NULL;
851
852         cf = &ctx->job;
853         ctx->op = op;
854         old_digest = ctx->digest;
855
856         start_addr = rte_pktmbuf_iova(mbuf);
857         /* output */
858         sg = &cf->sg[0];
859         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
860         sg->length = ses->digest_length;
861         cpu_to_hw_sg(sg);
862
863         /* input */
864         in_sg = &cf->sg[1];
865         /* need to extend the input to a compound frame */
866         in_sg->extension = 1;
867         in_sg->final = 1;
868         in_sg->length = data_len;
869         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
870         sg = &cf->sg[2];
871
872         if (ses->iv.length) {
873                 uint8_t *iv_ptr;
874
875                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
876                                                    ses->iv.offset);
877
878                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
879                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
880                         sg->length = 12;
881                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
882                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
883                         sg->length = 8;
884                 } else {
885                         sg->length = ses->iv.length;
886                 }
887                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
888                 in_sg->length += sg->length;
889                 cpu_to_hw_sg(sg);
890                 sg++;
891         }
892
893         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
894         sg->offset = data_offset;
895         sg->length = data_len;
896
897         if (is_decode(ses)) {
898                 /* Digest verification case */
899                 cpu_to_hw_sg(sg);
900                 /* hash result or digest, save digest first */
901                 rte_memcpy(old_digest, sym->auth.digest.data,
902                                 ses->digest_length);
903                 /* let's check digest by hw */
904                 start_addr = rte_dpaa_mem_vtop(old_digest);
905                 sg++;
906                 qm_sg_entry_set64(sg, start_addr);
907                 sg->length = ses->digest_length;
908                 in_sg->length += ses->digest_length;
909         }
910         sg->final = 1;
911         cpu_to_hw_sg(sg);
912         cpu_to_hw_sg(in_sg);
913
914         return cf;
915 }
916
917 static inline struct dpaa_sec_job *
918 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
919 {
920         struct rte_crypto_sym_op *sym = op->sym;
921         struct dpaa_sec_job *cf;
922         struct dpaa_sec_op_ctx *ctx;
923         struct qm_sg_entry *sg, *out_sg, *in_sg;
924         struct rte_mbuf *mbuf;
925         uint8_t req_segs;
926         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
927                         ses->iv.offset);
928         int data_len, data_offset;
929
930         data_len = sym->cipher.data.length;
931         data_offset = sym->cipher.data.offset;
932
933         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
934                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
935                 if ((data_len & 7) || (data_offset & 7)) {
936                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
937                         return NULL;
938                 }
939
940                 data_len = data_len >> 3;
941                 data_offset = data_offset >> 3;
942         }
943
944         if (sym->m_dst) {
945                 mbuf = sym->m_dst;
946                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
947         } else {
948                 mbuf = sym->m_src;
949                 req_segs = mbuf->nb_segs * 2 + 3;
950         }
951         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
952                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
953                                 MAX_SG_ENTRIES);
954                 return NULL;
955         }
956
957         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
958         if (!ctx)
959                 return NULL;
960
961         cf = &ctx->job;
962         ctx->op = op;
963
964         /* output */
965         out_sg = &cf->sg[0];
966         out_sg->extension = 1;
967         out_sg->length = data_len;
968         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
969         cpu_to_hw_sg(out_sg);
970
971         /* 1st seg */
972         sg = &cf->sg[2];
973         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
974         sg->length = mbuf->data_len - data_offset;
975         sg->offset = data_offset;
976
977         /* Successive segs */
978         mbuf = mbuf->next;
979         while (mbuf) {
980                 cpu_to_hw_sg(sg);
981                 sg++;
982                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
983                 sg->length = mbuf->data_len;
984                 mbuf = mbuf->next;
985         }
986         sg->final = 1;
987         cpu_to_hw_sg(sg);
988
989         /* input */
990         mbuf = sym->m_src;
991         in_sg = &cf->sg[1];
992         in_sg->extension = 1;
993         in_sg->final = 1;
994         in_sg->length = data_len + ses->iv.length;
995
996         sg++;
997         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
998         cpu_to_hw_sg(in_sg);
999
1000         /* IV */
1001         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1002         sg->length = ses->iv.length;
1003         cpu_to_hw_sg(sg);
1004
1005         /* 1st seg */
1006         sg++;
1007         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1008         sg->length = mbuf->data_len - data_offset;
1009         sg->offset = data_offset;
1010
1011         /* Successive segs */
1012         mbuf = mbuf->next;
1013         while (mbuf) {
1014                 cpu_to_hw_sg(sg);
1015                 sg++;
1016                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1017                 sg->length = mbuf->data_len;
1018                 mbuf = mbuf->next;
1019         }
1020         sg->final = 1;
1021         cpu_to_hw_sg(sg);
1022
1023         return cf;
1024 }
1025
1026 static inline struct dpaa_sec_job *
1027 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1028 {
1029         struct rte_crypto_sym_op *sym = op->sym;
1030         struct dpaa_sec_job *cf;
1031         struct dpaa_sec_op_ctx *ctx;
1032         struct qm_sg_entry *sg;
1033         rte_iova_t src_start_addr, dst_start_addr;
1034         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1035                         ses->iv.offset);
1036         int data_len, data_offset;
1037
1038         data_len = sym->cipher.data.length;
1039         data_offset = sym->cipher.data.offset;
1040
1041         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1042                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1043                 if ((data_len & 7) || (data_offset & 7)) {
1044                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1045                         return NULL;
1046                 }
1047
1048                 data_len = data_len >> 3;
1049                 data_offset = data_offset >> 3;
1050         }
1051
1052         ctx = dpaa_sec_alloc_ctx(ses, 4);
1053         if (!ctx)
1054                 return NULL;
1055
1056         cf = &ctx->job;
1057         ctx->op = op;
1058
1059         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1060
1061         if (sym->m_dst)
1062                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1063         else
1064                 dst_start_addr = src_start_addr;
1065
1066         /* output */
1067         sg = &cf->sg[0];
1068         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1069         sg->length = data_len + ses->iv.length;
1070         cpu_to_hw_sg(sg);
1071
1072         /* input */
1073         sg = &cf->sg[1];
1074
1075         /* need to extend the input to a compound frame */
1076         sg->extension = 1;
1077         sg->final = 1;
1078         sg->length = data_len + ses->iv.length;
1079         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1080         cpu_to_hw_sg(sg);
1081
1082         sg = &cf->sg[2];
1083         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1084         sg->length = ses->iv.length;
1085         cpu_to_hw_sg(sg);
1086
1087         sg++;
1088         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1089         sg->length = data_len;
1090         sg->final = 1;
1091         cpu_to_hw_sg(sg);
1092
1093         return cf;
1094 }
1095
1096 static inline struct dpaa_sec_job *
1097 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1098 {
1099         struct rte_crypto_sym_op *sym = op->sym;
1100         struct dpaa_sec_job *cf;
1101         struct dpaa_sec_op_ctx *ctx;
1102         struct qm_sg_entry *sg, *out_sg, *in_sg;
1103         struct rte_mbuf *mbuf;
1104         uint8_t req_segs;
1105         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1106                         ses->iv.offset);
1107
1108         if (sym->m_dst) {
1109                 mbuf = sym->m_dst;
1110                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1111         } else {
1112                 mbuf = sym->m_src;
1113                 req_segs = mbuf->nb_segs * 2 + 4;
1114         }
1115
1116         if (ses->auth_only_len)
1117                 req_segs++;
1118
1119         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1120                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1121                                 MAX_SG_ENTRIES);
1122                 return NULL;
1123         }
1124
1125         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1126         if (!ctx)
1127                 return NULL;
1128
1129         cf = &ctx->job;
1130         ctx->op = op;
1131
1132         rte_prefetch0(cf->sg);
1133
1134         /* output */
1135         out_sg = &cf->sg[0];
1136         out_sg->extension = 1;
1137         if (is_encode(ses))
1138                 out_sg->length = sym->aead.data.length + ses->digest_length;
1139         else
1140                 out_sg->length = sym->aead.data.length;
1141
1142         /* output sg entries */
1143         sg = &cf->sg[2];
1144         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1145         cpu_to_hw_sg(out_sg);
1146
1147         /* 1st seg */
1148         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1149         sg->length = mbuf->data_len - sym->aead.data.offset;
1150         sg->offset = sym->aead.data.offset;
1151
1152         /* Successive segs */
1153         mbuf = mbuf->next;
1154         while (mbuf) {
1155                 cpu_to_hw_sg(sg);
1156                 sg++;
1157                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1158                 sg->length = mbuf->data_len;
1159                 mbuf = mbuf->next;
1160         }
1161         sg->length -= ses->digest_length;
1162
1163         if (is_encode(ses)) {
1164                 cpu_to_hw_sg(sg);
1165                 /* set auth output */
1166                 sg++;
1167                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1168                 sg->length = ses->digest_length;
1169         }
1170         sg->final = 1;
1171         cpu_to_hw_sg(sg);
1172
1173         /* input */
1174         mbuf = sym->m_src;
1175         in_sg = &cf->sg[1];
1176         in_sg->extension = 1;
1177         in_sg->final = 1;
1178         if (is_encode(ses))
1179                 in_sg->length = ses->iv.length + sym->aead.data.length
1180                                                         + ses->auth_only_len;
1181         else
1182                 in_sg->length = ses->iv.length + sym->aead.data.length
1183                                 + ses->auth_only_len + ses->digest_length;
1184
1185         /* input sg entries */
1186         sg++;
1187         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1188         cpu_to_hw_sg(in_sg);
1189
1190         /* 1st seg IV */
1191         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1192         sg->length = ses->iv.length;
1193         cpu_to_hw_sg(sg);
1194
1195         /* 2nd seg auth only */
1196         if (ses->auth_only_len) {
1197                 sg++;
1198                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1199                 sg->length = ses->auth_only_len;
1200                 cpu_to_hw_sg(sg);
1201         }
1202
1203         /* 3rd seg */
1204         sg++;
1205         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1206         sg->length = mbuf->data_len - sym->aead.data.offset;
1207         sg->offset = sym->aead.data.offset;
1208
1209         /* Successive segs */
1210         mbuf = mbuf->next;
1211         while (mbuf) {
1212                 cpu_to_hw_sg(sg);
1213                 sg++;
1214                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1215                 sg->length = mbuf->data_len;
1216                 mbuf = mbuf->next;
1217         }
1218
1219         if (is_decode(ses)) {
1220                 cpu_to_hw_sg(sg);
1221                 sg++;
1222                 memcpy(ctx->digest, sym->aead.digest.data,
1223                         ses->digest_length);
1224                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1225                 sg->length = ses->digest_length;
1226         }
1227         sg->final = 1;
1228         cpu_to_hw_sg(sg);
1229
1230         return cf;
1231 }
1232
1233 static inline struct dpaa_sec_job *
1234 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1235 {
1236         struct rte_crypto_sym_op *sym = op->sym;
1237         struct dpaa_sec_job *cf;
1238         struct dpaa_sec_op_ctx *ctx;
1239         struct qm_sg_entry *sg;
1240         uint32_t length = 0;
1241         rte_iova_t src_start_addr, dst_start_addr;
1242         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1243                         ses->iv.offset);
1244
1245         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1246
1247         if (sym->m_dst)
1248                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1249         else
1250                 dst_start_addr = src_start_addr;
1251
1252         ctx = dpaa_sec_alloc_ctx(ses, 7);
1253         if (!ctx)
1254                 return NULL;
1255
1256         cf = &ctx->job;
1257         ctx->op = op;
1258
1259         /* input */
1260         rte_prefetch0(cf->sg);
1261         sg = &cf->sg[2];
1262         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1263         if (is_encode(ses)) {
1264                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1265                 sg->length = ses->iv.length;
1266                 length += sg->length;
1267                 cpu_to_hw_sg(sg);
1268
1269                 sg++;
1270                 if (ses->auth_only_len) {
1271                         qm_sg_entry_set64(sg,
1272                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1273                         sg->length = ses->auth_only_len;
1274                         length += sg->length;
1275                         cpu_to_hw_sg(sg);
1276                         sg++;
1277                 }
1278                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1279                 sg->length = sym->aead.data.length;
1280                 length += sg->length;
1281                 sg->final = 1;
1282                 cpu_to_hw_sg(sg);
1283         } else {
1284                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1285                 sg->length = ses->iv.length;
1286                 length += sg->length;
1287                 cpu_to_hw_sg(sg);
1288
1289                 sg++;
1290                 if (ses->auth_only_len) {
1291                         qm_sg_entry_set64(sg,
1292                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1293                         sg->length = ses->auth_only_len;
1294                         length += sg->length;
1295                         cpu_to_hw_sg(sg);
1296                         sg++;
1297                 }
1298                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1299                 sg->length = sym->aead.data.length;
1300                 length += sg->length;
1301                 cpu_to_hw_sg(sg);
1302
1303                 memcpy(ctx->digest, sym->aead.digest.data,
1304                        ses->digest_length);
1305                 sg++;
1306
1307                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1308                 sg->length = ses->digest_length;
1309                 length += sg->length;
1310                 sg->final = 1;
1311                 cpu_to_hw_sg(sg);
1312         }
1313         /* input compound frame */
1314         cf->sg[1].length = length;
1315         cf->sg[1].extension = 1;
1316         cf->sg[1].final = 1;
1317         cpu_to_hw_sg(&cf->sg[1]);
1318
1319         /* output */
1320         sg++;
1321         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1322         qm_sg_entry_set64(sg,
1323                 dst_start_addr + sym->aead.data.offset);
1324         sg->length = sym->aead.data.length;
1325         length = sg->length;
1326         if (is_encode(ses)) {
1327                 cpu_to_hw_sg(sg);
1328                 /* set auth output */
1329                 sg++;
1330                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1331                 sg->length = ses->digest_length;
1332                 length += sg->length;
1333         }
1334         sg->final = 1;
1335         cpu_to_hw_sg(sg);
1336
1337         /* output compound frame */
1338         cf->sg[0].length = length;
1339         cf->sg[0].extension = 1;
1340         cpu_to_hw_sg(&cf->sg[0]);
1341
1342         return cf;
1343 }
1344
1345 static inline struct dpaa_sec_job *
1346 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1347 {
1348         struct rte_crypto_sym_op *sym = op->sym;
1349         struct dpaa_sec_job *cf;
1350         struct dpaa_sec_op_ctx *ctx;
1351         struct qm_sg_entry *sg, *out_sg, *in_sg;
1352         struct rte_mbuf *mbuf;
1353         uint8_t req_segs;
1354         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1355                         ses->iv.offset);
1356
1357         if (sym->m_dst) {
1358                 mbuf = sym->m_dst;
1359                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1360         } else {
1361                 mbuf = sym->m_src;
1362                 req_segs = mbuf->nb_segs * 2 + 4;
1363         }
1364
1365         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1366                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1367                                 MAX_SG_ENTRIES);
1368                 return NULL;
1369         }
1370
1371         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1372         if (!ctx)
1373                 return NULL;
1374
1375         cf = &ctx->job;
1376         ctx->op = op;
1377
1378         rte_prefetch0(cf->sg);
1379
1380         /* output */
1381         out_sg = &cf->sg[0];
1382         out_sg->extension = 1;
1383         if (is_encode(ses))
1384                 out_sg->length = sym->auth.data.length + ses->digest_length;
1385         else
1386                 out_sg->length = sym->auth.data.length;
1387
1388         /* output sg entries */
1389         sg = &cf->sg[2];
1390         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1391         cpu_to_hw_sg(out_sg);
1392
1393         /* 1st seg */
1394         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1395         sg->length = mbuf->data_len - sym->auth.data.offset;
1396         sg->offset = sym->auth.data.offset;
1397
1398         /* Successive segs */
1399         mbuf = mbuf->next;
1400         while (mbuf) {
1401                 cpu_to_hw_sg(sg);
1402                 sg++;
1403                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1404                 sg->length = mbuf->data_len;
1405                 mbuf = mbuf->next;
1406         }
1407         sg->length -= ses->digest_length;
1408
1409         if (is_encode(ses)) {
1410                 cpu_to_hw_sg(sg);
1411                 /* set auth output */
1412                 sg++;
1413                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1414                 sg->length = ses->digest_length;
1415         }
1416         sg->final = 1;
1417         cpu_to_hw_sg(sg);
1418
1419         /* input */
1420         mbuf = sym->m_src;
1421         in_sg = &cf->sg[1];
1422         in_sg->extension = 1;
1423         in_sg->final = 1;
1424         if (is_encode(ses))
1425                 in_sg->length = ses->iv.length + sym->auth.data.length;
1426         else
1427                 in_sg->length = ses->iv.length + sym->auth.data.length
1428                                                 + ses->digest_length;
1429
1430         /* input sg entries */
1431         sg++;
1432         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1433         cpu_to_hw_sg(in_sg);
1434
1435         /* 1st seg IV */
1436         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1437         sg->length = ses->iv.length;
1438         cpu_to_hw_sg(sg);
1439
1440         /* 2nd seg */
1441         sg++;
1442         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1443         sg->length = mbuf->data_len - sym->auth.data.offset;
1444         sg->offset = sym->auth.data.offset;
1445
1446         /* Successive segs */
1447         mbuf = mbuf->next;
1448         while (mbuf) {
1449                 cpu_to_hw_sg(sg);
1450                 sg++;
1451                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1452                 sg->length = mbuf->data_len;
1453                 mbuf = mbuf->next;
1454         }
1455
1456         sg->length -= ses->digest_length;
1457         if (is_decode(ses)) {
1458                 cpu_to_hw_sg(sg);
1459                 sg++;
1460                 memcpy(ctx->digest, sym->auth.digest.data,
1461                         ses->digest_length);
1462                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1463                 sg->length = ses->digest_length;
1464         }
1465         sg->final = 1;
1466         cpu_to_hw_sg(sg);
1467
1468         return cf;
1469 }
1470
1471 static inline struct dpaa_sec_job *
1472 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1473 {
1474         struct rte_crypto_sym_op *sym = op->sym;
1475         struct dpaa_sec_job *cf;
1476         struct dpaa_sec_op_ctx *ctx;
1477         struct qm_sg_entry *sg;
1478         rte_iova_t src_start_addr, dst_start_addr;
1479         uint32_t length = 0;
1480         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1481                         ses->iv.offset);
1482
1483         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1484         if (sym->m_dst)
1485                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1486         else
1487                 dst_start_addr = src_start_addr;
1488
1489         ctx = dpaa_sec_alloc_ctx(ses, 7);
1490         if (!ctx)
1491                 return NULL;
1492
1493         cf = &ctx->job;
1494         ctx->op = op;
1495
1496         /* input */
1497         rte_prefetch0(cf->sg);
1498         sg = &cf->sg[2];
1499         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1500         if (is_encode(ses)) {
1501                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1502                 sg->length = ses->iv.length;
1503                 length += sg->length;
1504                 cpu_to_hw_sg(sg);
1505
1506                 sg++;
1507                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1508                 sg->length = sym->auth.data.length;
1509                 length += sg->length;
1510                 sg->final = 1;
1511                 cpu_to_hw_sg(sg);
1512         } else {
1513                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1514                 sg->length = ses->iv.length;
1515                 length += sg->length;
1516                 cpu_to_hw_sg(sg);
1517
1518                 sg++;
1519
1520                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1521                 sg->length = sym->auth.data.length;
1522                 length += sg->length;
1523                 cpu_to_hw_sg(sg);
1524
1525                 memcpy(ctx->digest, sym->auth.digest.data,
1526                        ses->digest_length);
1527                 sg++;
1528
1529                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1530                 sg->length = ses->digest_length;
1531                 length += sg->length;
1532                 sg->final = 1;
1533                 cpu_to_hw_sg(sg);
1534         }
1535         /* input compound frame */
1536         cf->sg[1].length = length;
1537         cf->sg[1].extension = 1;
1538         cf->sg[1].final = 1;
1539         cpu_to_hw_sg(&cf->sg[1]);
1540
1541         /* output */
1542         sg++;
1543         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1544         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1545         sg->length = sym->cipher.data.length;
1546         length = sg->length;
1547         if (is_encode(ses)) {
1548                 cpu_to_hw_sg(sg);
1549                 /* set auth output */
1550                 sg++;
1551                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1552                 sg->length = ses->digest_length;
1553                 length += sg->length;
1554         }
1555         sg->final = 1;
1556         cpu_to_hw_sg(sg);
1557
1558         /* output compound frame */
1559         cf->sg[0].length = length;
1560         cf->sg[0].extension = 1;
1561         cpu_to_hw_sg(&cf->sg[0]);
1562
1563         return cf;
1564 }
1565
1566 #ifdef RTE_LIBRTE_SECURITY
1567 static inline struct dpaa_sec_job *
1568 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1569 {
1570         struct rte_crypto_sym_op *sym = op->sym;
1571         struct dpaa_sec_job *cf;
1572         struct dpaa_sec_op_ctx *ctx;
1573         struct qm_sg_entry *sg;
1574         phys_addr_t src_start_addr, dst_start_addr;
1575
1576         ctx = dpaa_sec_alloc_ctx(ses, 2);
1577         if (!ctx)
1578                 return NULL;
1579         cf = &ctx->job;
1580         ctx->op = op;
1581
1582         src_start_addr = rte_pktmbuf_mtophys(sym->m_src);
1583
1584         if (sym->m_dst)
1585                 dst_start_addr = rte_pktmbuf_mtophys(sym->m_dst);
1586         else
1587                 dst_start_addr = src_start_addr;
1588
1589         /* input */
1590         sg = &cf->sg[1];
1591         qm_sg_entry_set64(sg, src_start_addr);
1592         sg->length = sym->m_src->pkt_len;
1593         sg->final = 1;
1594         cpu_to_hw_sg(sg);
1595
1596         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1597         /* output */
1598         sg = &cf->sg[0];
1599         qm_sg_entry_set64(sg, dst_start_addr);
1600         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1601         cpu_to_hw_sg(sg);
1602
1603         return cf;
1604 }
1605
1606 static inline struct dpaa_sec_job *
1607 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1608 {
1609         struct rte_crypto_sym_op *sym = op->sym;
1610         struct dpaa_sec_job *cf;
1611         struct dpaa_sec_op_ctx *ctx;
1612         struct qm_sg_entry *sg, *out_sg, *in_sg;
1613         struct rte_mbuf *mbuf;
1614         uint8_t req_segs;
1615         uint32_t in_len = 0, out_len = 0;
1616
1617         if (sym->m_dst)
1618                 mbuf = sym->m_dst;
1619         else
1620                 mbuf = sym->m_src;
1621
1622         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1623         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1624                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1625                                 MAX_SG_ENTRIES);
1626                 return NULL;
1627         }
1628
1629         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1630         if (!ctx)
1631                 return NULL;
1632         cf = &ctx->job;
1633         ctx->op = op;
1634         /* output */
1635         out_sg = &cf->sg[0];
1636         out_sg->extension = 1;
1637         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1638
1639         /* 1st seg */
1640         sg = &cf->sg[2];
1641         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1642         sg->offset = 0;
1643
1644         /* Successive segs */
1645         while (mbuf->next) {
1646                 sg->length = mbuf->data_len;
1647                 out_len += sg->length;
1648                 mbuf = mbuf->next;
1649                 cpu_to_hw_sg(sg);
1650                 sg++;
1651                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1652                 sg->offset = 0;
1653         }
1654         sg->length = mbuf->buf_len - mbuf->data_off;
1655         out_len += sg->length;
1656         sg->final = 1;
1657         cpu_to_hw_sg(sg);
1658
1659         out_sg->length = out_len;
1660         cpu_to_hw_sg(out_sg);
1661
1662         /* input */
1663         mbuf = sym->m_src;
1664         in_sg = &cf->sg[1];
1665         in_sg->extension = 1;
1666         in_sg->final = 1;
1667         in_len = mbuf->data_len;
1668
1669         sg++;
1670         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1671
1672         /* 1st seg */
1673         qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1674         sg->length = mbuf->data_len;
1675         sg->offset = 0;
1676
1677         /* Successive segs */
1678         mbuf = mbuf->next;
1679         while (mbuf) {
1680                 cpu_to_hw_sg(sg);
1681                 sg++;
1682                 qm_sg_entry_set64(sg, rte_pktmbuf_mtophys(mbuf));
1683                 sg->length = mbuf->data_len;
1684                 sg->offset = 0;
1685                 in_len += sg->length;
1686                 mbuf = mbuf->next;
1687         }
1688         sg->final = 1;
1689         cpu_to_hw_sg(sg);
1690
1691         in_sg->length = in_len;
1692         cpu_to_hw_sg(in_sg);
1693
1694         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1695
1696         return cf;
1697 }
1698 #endif
1699
1700 static uint16_t
1701 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1702                        uint16_t nb_ops)
1703 {
1704         /* Function to transmit the frames to given device and queuepair */
1705         uint32_t loop;
1706         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1707         uint16_t num_tx = 0;
1708         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1709         uint32_t frames_to_send;
1710         struct rte_crypto_op *op;
1711         struct dpaa_sec_job *cf;
1712         dpaa_sec_session *ses;
1713         uint16_t auth_hdr_len, auth_tail_len;
1714         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1715         struct qman_fq *inq[DPAA_SEC_BURST];
1716
1717         while (nb_ops) {
1718                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1719                                 DPAA_SEC_BURST : nb_ops;
1720                 for (loop = 0; loop < frames_to_send; loop++) {
1721                         op = *(ops++);
1722                         if (op->sym->m_src->seqn != 0) {
1723                                 index = op->sym->m_src->seqn - 1;
1724                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1725                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1726                                         flags[loop] = ((index & 0x0f) << 8);
1727                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1728                                         DPAA_PER_LCORE_DQRR_SIZE--;
1729                                         DPAA_PER_LCORE_DQRR_HELD &=
1730                                                                 ~(1 << index);
1731                                 }
1732                         }
1733
1734                         switch (op->sess_type) {
1735                         case RTE_CRYPTO_OP_WITH_SESSION:
1736                                 ses = (dpaa_sec_session *)
1737                                         get_sym_session_private_data(
1738                                                         op->sym->session,
1739                                                         cryptodev_driver_id);
1740                                 break;
1741 #ifdef RTE_LIBRTE_SECURITY
1742                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1743                                 ses = (dpaa_sec_session *)
1744                                         get_sec_session_private_data(
1745                                                         op->sym->sec_session);
1746                                 break;
1747 #endif
1748                         default:
1749                                 DPAA_SEC_DP_ERR(
1750                                         "sessionless crypto op not supported");
1751                                 frames_to_send = loop;
1752                                 nb_ops = loop;
1753                                 goto send_pkts;
1754                         }
1755
1756                         if (!ses) {
1757                                 DPAA_SEC_DP_ERR("session not available");
1758                                 frames_to_send = loop;
1759                                 nb_ops = loop;
1760                                 goto send_pkts;
1761                         }
1762
1763                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1764                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1765                                         frames_to_send = loop;
1766                                         nb_ops = loop;
1767                                         goto send_pkts;
1768                                 }
1769                         } else if (unlikely(ses->qp[rte_lcore_id() %
1770                                                 MAX_DPAA_CORES] != qp)) {
1771                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1772                                         " New qp = %p\n",
1773                                         ses->qp[rte_lcore_id() %
1774                                         MAX_DPAA_CORES], qp);
1775                                 frames_to_send = loop;
1776                                 nb_ops = loop;
1777                                 goto send_pkts;
1778                         }
1779
1780                         auth_hdr_len = op->sym->auth.data.length -
1781                                                 op->sym->cipher.data.length;
1782                         auth_tail_len = 0;
1783
1784                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1785                                   ((op->sym->m_dst == NULL) ||
1786                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1787                                 switch (ses->ctxt) {
1788 #ifdef RTE_LIBRTE_SECURITY
1789                                 case DPAA_SEC_PDCP:
1790                                 case DPAA_SEC_IPSEC:
1791                                         cf = build_proto(op, ses);
1792                                         break;
1793 #endif
1794                                 case DPAA_SEC_AUTH:
1795                                         cf = build_auth_only(op, ses);
1796                                         break;
1797                                 case DPAA_SEC_CIPHER:
1798                                         cf = build_cipher_only(op, ses);
1799                                         break;
1800                                 case DPAA_SEC_AEAD:
1801                                         cf = build_cipher_auth_gcm(op, ses);
1802                                         auth_hdr_len = ses->auth_only_len;
1803                                         break;
1804                                 case DPAA_SEC_CIPHER_HASH:
1805                                         auth_hdr_len =
1806                                                 op->sym->cipher.data.offset
1807                                                 - op->sym->auth.data.offset;
1808                                         auth_tail_len =
1809                                                 op->sym->auth.data.length
1810                                                 - op->sym->cipher.data.length
1811                                                 - auth_hdr_len;
1812                                         cf = build_cipher_auth(op, ses);
1813                                         break;
1814                                 default:
1815                                         DPAA_SEC_DP_ERR("not supported ops");
1816                                         frames_to_send = loop;
1817                                         nb_ops = loop;
1818                                         goto send_pkts;
1819                                 }
1820                         } else {
1821                                 switch (ses->ctxt) {
1822 #ifdef RTE_LIBRTE_SECURITY
1823                                 case DPAA_SEC_PDCP:
1824                                 case DPAA_SEC_IPSEC:
1825                                         cf = build_proto_sg(op, ses);
1826                                         break;
1827 #endif
1828                                 case DPAA_SEC_AUTH:
1829                                         cf = build_auth_only_sg(op, ses);
1830                                         break;
1831                                 case DPAA_SEC_CIPHER:
1832                                         cf = build_cipher_only_sg(op, ses);
1833                                         break;
1834                                 case DPAA_SEC_AEAD:
1835                                         cf = build_cipher_auth_gcm_sg(op, ses);
1836                                         auth_hdr_len = ses->auth_only_len;
1837                                         break;
1838                                 case DPAA_SEC_CIPHER_HASH:
1839                                         auth_hdr_len =
1840                                                 op->sym->cipher.data.offset
1841                                                 - op->sym->auth.data.offset;
1842                                         auth_tail_len =
1843                                                 op->sym->auth.data.length
1844                                                 - op->sym->cipher.data.length
1845                                                 - auth_hdr_len;
1846                                         cf = build_cipher_auth_sg(op, ses);
1847                                         break;
1848                                 default:
1849                                         DPAA_SEC_DP_ERR("not supported ops");
1850                                         frames_to_send = loop;
1851                                         nb_ops = loop;
1852                                         goto send_pkts;
1853                                 }
1854                         }
1855                         if (unlikely(!cf)) {
1856                                 frames_to_send = loop;
1857                                 nb_ops = loop;
1858                                 goto send_pkts;
1859                         }
1860
1861                         fd = &fds[loop];
1862                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1863                         fd->opaque_addr = 0;
1864                         fd->cmd = 0;
1865                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1866                         fd->_format1 = qm_fd_compound;
1867                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1868
1869                         /* Auth_only_len is set as 0 in descriptor and it is
1870                          * overwritten here in the fd.cmd which will update
1871                          * the DPOVRD reg.
1872                          */
1873                         if (auth_hdr_len || auth_tail_len) {
1874                                 fd->cmd = 0x80000000;
1875                                 fd->cmd |=
1876                                         ((auth_tail_len << 16) | auth_hdr_len);
1877                         }
1878
1879 #ifdef RTE_LIBRTE_SECURITY
1880                         /* In case of PDCP, per packet HFN is stored in
1881                          * mbuf priv after sym_op.
1882                          */
1883                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1884                                 fd->cmd = 0x80000000 |
1885                                         *((uint32_t *)((uint8_t *)op +
1886                                         ses->pdcp.hfn_ovd_offset));
1887                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1888                                         *((uint32_t *)((uint8_t *)op +
1889                                         ses->pdcp.hfn_ovd_offset)),
1890                                         ses->pdcp.hfn_ovd);
1891                         }
1892 #endif
1893                 }
1894 send_pkts:
1895                 loop = 0;
1896                 while (loop < frames_to_send) {
1897                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1898                                         &flags[loop], frames_to_send - loop);
1899                 }
1900                 nb_ops -= frames_to_send;
1901                 num_tx += frames_to_send;
1902         }
1903
1904         dpaa_qp->tx_pkts += num_tx;
1905         dpaa_qp->tx_errs += nb_ops - num_tx;
1906
1907         return num_tx;
1908 }
1909
1910 static uint16_t
1911 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1912                        uint16_t nb_ops)
1913 {
1914         uint16_t num_rx;
1915         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1916
1917         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1918
1919         dpaa_qp->rx_pkts += num_rx;
1920         dpaa_qp->rx_errs += nb_ops - num_rx;
1921
1922         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1923
1924         return num_rx;
1925 }
1926
1927 /** Release queue pair */
1928 static int
1929 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1930                             uint16_t qp_id)
1931 {
1932         struct dpaa_sec_dev_private *internals;
1933         struct dpaa_sec_qp *qp = NULL;
1934
1935         PMD_INIT_FUNC_TRACE();
1936
1937         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1938
1939         internals = dev->data->dev_private;
1940         if (qp_id >= internals->max_nb_queue_pairs) {
1941                 DPAA_SEC_ERR("Max supported qpid %d",
1942                              internals->max_nb_queue_pairs);
1943                 return -EINVAL;
1944         }
1945
1946         qp = &internals->qps[qp_id];
1947         rte_mempool_free(qp->ctx_pool);
1948         qp->internals = NULL;
1949         dev->data->queue_pairs[qp_id] = NULL;
1950
1951         return 0;
1952 }
1953
1954 /** Setup a queue pair */
1955 static int
1956 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1957                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1958                 __rte_unused int socket_id)
1959 {
1960         struct dpaa_sec_dev_private *internals;
1961         struct dpaa_sec_qp *qp = NULL;
1962         char str[20];
1963
1964         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1965
1966         internals = dev->data->dev_private;
1967         if (qp_id >= internals->max_nb_queue_pairs) {
1968                 DPAA_SEC_ERR("Max supported qpid %d",
1969                              internals->max_nb_queue_pairs);
1970                 return -EINVAL;
1971         }
1972
1973         qp = &internals->qps[qp_id];
1974         qp->internals = internals;
1975         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1976                         dev->data->dev_id, qp_id);
1977         if (!qp->ctx_pool) {
1978                 qp->ctx_pool = rte_mempool_create((const char *)str,
1979                                                         CTX_POOL_NUM_BUFS,
1980                                                         CTX_POOL_BUF_SIZE,
1981                                                         CTX_POOL_CACHE_SIZE, 0,
1982                                                         NULL, NULL, NULL, NULL,
1983                                                         SOCKET_ID_ANY, 0);
1984                 if (!qp->ctx_pool) {
1985                         DPAA_SEC_ERR("%s create failed\n", str);
1986                         return -ENOMEM;
1987                 }
1988         } else
1989                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
1990                                 dev->data->dev_id, qp_id);
1991         dev->data->queue_pairs[qp_id] = qp;
1992
1993         return 0;
1994 }
1995
1996 /** Returns the size of session structure */
1997 static unsigned int
1998 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
1999 {
2000         PMD_INIT_FUNC_TRACE();
2001
2002         return sizeof(dpaa_sec_session);
2003 }
2004
2005 static int
2006 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2007                      struct rte_crypto_sym_xform *xform,
2008                      dpaa_sec_session *session)
2009 {
2010         session->ctxt = DPAA_SEC_CIPHER;
2011         session->cipher_alg = xform->cipher.algo;
2012         session->iv.length = xform->cipher.iv.length;
2013         session->iv.offset = xform->cipher.iv.offset;
2014         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2015                                                RTE_CACHE_LINE_SIZE);
2016         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2017                 DPAA_SEC_ERR("No Memory for cipher key");
2018                 return -ENOMEM;
2019         }
2020         session->cipher_key.length = xform->cipher.key.length;
2021
2022         memcpy(session->cipher_key.data, xform->cipher.key.data,
2023                xform->cipher.key.length);
2024         switch (xform->cipher.algo) {
2025         case RTE_CRYPTO_CIPHER_AES_CBC:
2026                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2027                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2028                 break;
2029         case RTE_CRYPTO_CIPHER_3DES_CBC:
2030                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2031                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2032                 break;
2033         case RTE_CRYPTO_CIPHER_AES_CTR:
2034                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2035                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2036                 break;
2037         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2038                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2039                 break;
2040         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2041                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2042                 break;
2043         default:
2044                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2045                               xform->cipher.algo);
2046                 return -ENOTSUP;
2047         }
2048         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2049                         DIR_ENC : DIR_DEC;
2050
2051         return 0;
2052 }
2053
2054 static int
2055 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2056                    struct rte_crypto_sym_xform *xform,
2057                    dpaa_sec_session *session)
2058 {
2059         session->ctxt = DPAA_SEC_AUTH;
2060         session->auth_alg = xform->auth.algo;
2061         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2062                                              RTE_CACHE_LINE_SIZE);
2063         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2064                 DPAA_SEC_ERR("No Memory for auth key");
2065                 return -ENOMEM;
2066         }
2067         session->auth_key.length = xform->auth.key.length;
2068         session->digest_length = xform->auth.digest_length;
2069         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2070                 session->iv.offset = xform->auth.iv.offset;
2071                 session->iv.length = xform->auth.iv.length;
2072         }
2073
2074         memcpy(session->auth_key.data, xform->auth.key.data,
2075                xform->auth.key.length);
2076
2077         switch (xform->auth.algo) {
2078         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2079                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2080                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2081                 break;
2082         case RTE_CRYPTO_AUTH_MD5_HMAC:
2083                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2084                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2085                 break;
2086         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2087                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2088                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2089                 break;
2090         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2091                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2092                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2093                 break;
2094         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2095                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2096                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2097                 break;
2098         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2099                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2100                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2101                 break;
2102         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2103                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2104                 session->auth_key.algmode = OP_ALG_AAI_F9;
2105                 break;
2106         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2107                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2108                 session->auth_key.algmode = OP_ALG_AAI_F9;
2109                 break;
2110         default:
2111                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2112                               xform->auth.algo);
2113                 return -ENOTSUP;
2114         }
2115
2116         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2117                         DIR_ENC : DIR_DEC;
2118
2119         return 0;
2120 }
2121
2122 static int
2123 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2124                    struct rte_crypto_sym_xform *xform,
2125                    dpaa_sec_session *session)
2126 {
2127
2128         struct rte_crypto_cipher_xform *cipher_xform;
2129         struct rte_crypto_auth_xform *auth_xform;
2130
2131         session->ctxt = DPAA_SEC_CIPHER_HASH;
2132         if (session->auth_cipher_text) {
2133                 cipher_xform = &xform->cipher;
2134                 auth_xform = &xform->next->auth;
2135         } else {
2136                 cipher_xform = &xform->next->cipher;
2137                 auth_xform = &xform->auth;
2138         }
2139
2140         /* Set IV parameters */
2141         session->iv.offset = cipher_xform->iv.offset;
2142         session->iv.length = cipher_xform->iv.length;
2143
2144         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2145                                                RTE_CACHE_LINE_SIZE);
2146         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2147                 DPAA_SEC_ERR("No Memory for cipher key");
2148                 return -ENOMEM;
2149         }
2150         session->cipher_key.length = cipher_xform->key.length;
2151         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2152                                              RTE_CACHE_LINE_SIZE);
2153         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2154                 DPAA_SEC_ERR("No Memory for auth key");
2155                 return -ENOMEM;
2156         }
2157         session->auth_key.length = auth_xform->key.length;
2158         memcpy(session->cipher_key.data, cipher_xform->key.data,
2159                cipher_xform->key.length);
2160         memcpy(session->auth_key.data, auth_xform->key.data,
2161                auth_xform->key.length);
2162
2163         session->digest_length = auth_xform->digest_length;
2164         session->auth_alg = auth_xform->algo;
2165
2166         switch (auth_xform->algo) {
2167         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2168                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2169                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2170                 break;
2171         case RTE_CRYPTO_AUTH_MD5_HMAC:
2172                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2173                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2174                 break;
2175         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2176                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2177                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2178                 break;
2179         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2180                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2181                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2182                 break;
2183         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2184                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2185                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2186                 break;
2187         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2188                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2189                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2190                 break;
2191         default:
2192                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2193                               auth_xform->algo);
2194                 return -ENOTSUP;
2195         }
2196
2197         session->cipher_alg = cipher_xform->algo;
2198
2199         switch (cipher_xform->algo) {
2200         case RTE_CRYPTO_CIPHER_AES_CBC:
2201                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2202                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2203                 break;
2204         case RTE_CRYPTO_CIPHER_3DES_CBC:
2205                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2206                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2207                 break;
2208         case RTE_CRYPTO_CIPHER_AES_CTR:
2209                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2210                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2211                 break;
2212         default:
2213                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2214                               cipher_xform->algo);
2215                 return -ENOTSUP;
2216         }
2217         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2218                                 DIR_ENC : DIR_DEC;
2219         return 0;
2220 }
2221
2222 static int
2223 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2224                    struct rte_crypto_sym_xform *xform,
2225                    dpaa_sec_session *session)
2226 {
2227         session->aead_alg = xform->aead.algo;
2228         session->ctxt = DPAA_SEC_AEAD;
2229         session->iv.length = xform->aead.iv.length;
2230         session->iv.offset = xform->aead.iv.offset;
2231         session->auth_only_len = xform->aead.aad_length;
2232         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2233                                              RTE_CACHE_LINE_SIZE);
2234         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2235                 DPAA_SEC_ERR("No Memory for aead key\n");
2236                 return -ENOMEM;
2237         }
2238         session->aead_key.length = xform->aead.key.length;
2239         session->digest_length = xform->aead.digest_length;
2240
2241         memcpy(session->aead_key.data, xform->aead.key.data,
2242                xform->aead.key.length);
2243
2244         switch (session->aead_alg) {
2245         case RTE_CRYPTO_AEAD_AES_GCM:
2246                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2247                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2248                 break;
2249         default:
2250                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2251                 return -ENOTSUP;
2252         }
2253
2254         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2255                         DIR_ENC : DIR_DEC;
2256
2257         return 0;
2258 }
2259
2260 static struct qman_fq *
2261 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2262 {
2263         unsigned int i;
2264
2265         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2266                 if (qi->inq_attach[i] == 0) {
2267                         qi->inq_attach[i] = 1;
2268                         return &qi->inq[i];
2269                 }
2270         }
2271         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2272
2273         return NULL;
2274 }
2275
2276 static int
2277 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2278 {
2279         unsigned int i;
2280
2281         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2282                 if (&qi->inq[i] == fq) {
2283                         if (qman_retire_fq(fq, NULL) != 0)
2284                                 DPAA_SEC_WARN("Queue is not retired\n");
2285                         qman_oos_fq(fq);
2286                         qi->inq_attach[i] = 0;
2287                         return 0;
2288                 }
2289         }
2290         return -1;
2291 }
2292
2293 static int
2294 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2295 {
2296         int ret;
2297
2298         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2299         ret = dpaa_sec_prep_cdb(sess);
2300         if (ret) {
2301                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2302                 return ret;
2303         }
2304         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
2305                 ret = rte_dpaa_portal_init((void *)0);
2306                 if (ret) {
2307                         DPAA_SEC_ERR("Failure in affining portal");
2308                         return ret;
2309                 }
2310         }
2311         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2312                                rte_dpaa_mem_vtop(&sess->cdb),
2313                                qman_fq_fqid(&qp->outq));
2314         if (ret)
2315                 DPAA_SEC_ERR("Unable to init sec queue");
2316
2317         return ret;
2318 }
2319
2320 static inline void
2321 free_session_data(dpaa_sec_session *s)
2322 {
2323         if (is_aead(s))
2324                 rte_free(s->aead_key.data);
2325         else {
2326                 rte_free(s->auth_key.data);
2327                 rte_free(s->cipher_key.data);
2328         }
2329         memset(s, 0, sizeof(dpaa_sec_session));
2330 }
2331
2332 static int
2333 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2334                             struct rte_crypto_sym_xform *xform, void *sess)
2335 {
2336         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2337         dpaa_sec_session *session = sess;
2338         uint32_t i;
2339         int ret;
2340
2341         PMD_INIT_FUNC_TRACE();
2342
2343         if (unlikely(sess == NULL)) {
2344                 DPAA_SEC_ERR("invalid session struct");
2345                 return -EINVAL;
2346         }
2347         memset(session, 0, sizeof(dpaa_sec_session));
2348
2349         /* Default IV length = 0 */
2350         session->iv.length = 0;
2351
2352         /* Cipher Only */
2353         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2354                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2355                 ret = dpaa_sec_cipher_init(dev, xform, session);
2356
2357         /* Authentication Only */
2358         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2359                    xform->next == NULL) {
2360                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2361                 session->ctxt = DPAA_SEC_AUTH;
2362                 ret = dpaa_sec_auth_init(dev, xform, session);
2363
2364         /* Cipher then Authenticate */
2365         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2366                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2367                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2368                         session->auth_cipher_text = 1;
2369                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2370                                 ret = dpaa_sec_auth_init(dev, xform, session);
2371                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2372                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2373                         else
2374                                 ret = dpaa_sec_chain_init(dev, xform, session);
2375                 } else {
2376                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2377                         return -ENOTSUP;
2378                 }
2379         /* Authenticate then Cipher */
2380         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2381                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2382                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2383                         session->auth_cipher_text = 0;
2384                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2385                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2386                         else if (xform->next->cipher.algo
2387                                         == RTE_CRYPTO_CIPHER_NULL)
2388                                 ret = dpaa_sec_auth_init(dev, xform, session);
2389                         else
2390                                 ret = dpaa_sec_chain_init(dev, xform, session);
2391                 } else {
2392                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2393                         return -ENOTSUP;
2394                 }
2395
2396         /* AEAD operation for AES-GCM kind of Algorithms */
2397         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2398                    xform->next == NULL) {
2399                 ret = dpaa_sec_aead_init(dev, xform, session);
2400
2401         } else {
2402                 DPAA_SEC_ERR("Invalid crypto type");
2403                 return -EINVAL;
2404         }
2405         if (ret) {
2406                 DPAA_SEC_ERR("unable to init session");
2407                 goto err1;
2408         }
2409
2410         rte_spinlock_lock(&internals->lock);
2411         for (i = 0; i < MAX_DPAA_CORES; i++) {
2412                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2413                 if (session->inq[i] == NULL) {
2414                         DPAA_SEC_ERR("unable to attach sec queue");
2415                         rte_spinlock_unlock(&internals->lock);
2416                         ret = -EBUSY;
2417                         goto err1;
2418                 }
2419         }
2420         rte_spinlock_unlock(&internals->lock);
2421
2422         return 0;
2423
2424 err1:
2425         free_session_data(session);
2426         return ret;
2427 }
2428
2429 static int
2430 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2431                 struct rte_crypto_sym_xform *xform,
2432                 struct rte_cryptodev_sym_session *sess,
2433                 struct rte_mempool *mempool)
2434 {
2435         void *sess_private_data;
2436         int ret;
2437
2438         PMD_INIT_FUNC_TRACE();
2439
2440         if (rte_mempool_get(mempool, &sess_private_data)) {
2441                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2442                 return -ENOMEM;
2443         }
2444
2445         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2446         if (ret != 0) {
2447                 DPAA_SEC_ERR("failed to configure session parameters");
2448
2449                 /* Return session to mempool */
2450                 rte_mempool_put(mempool, sess_private_data);
2451                 return ret;
2452         }
2453
2454         set_sym_session_private_data(sess, dev->driver_id,
2455                         sess_private_data);
2456
2457
2458         return 0;
2459 }
2460
2461 static inline void
2462 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2463 {
2464         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2465         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2466         uint8_t i;
2467
2468         for (i = 0; i < MAX_DPAA_CORES; i++) {
2469                 if (s->inq[i])
2470                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2471                 s->inq[i] = NULL;
2472                 s->qp[i] = NULL;
2473         }
2474         free_session_data(s);
2475         rte_mempool_put(sess_mp, (void *)s);
2476 }
2477
2478 /** Clear the memory of session so it doesn't leave key material behind */
2479 static void
2480 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2481                 struct rte_cryptodev_sym_session *sess)
2482 {
2483         PMD_INIT_FUNC_TRACE();
2484         uint8_t index = dev->driver_id;
2485         void *sess_priv = get_sym_session_private_data(sess, index);
2486         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2487
2488         if (sess_priv) {
2489                 free_session_memory(dev, s);
2490                 set_sym_session_private_data(sess, index, NULL);
2491         }
2492 }
2493
2494 #ifdef RTE_LIBRTE_SECURITY
2495 static int
2496 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2497                         struct rte_security_ipsec_xform *ipsec_xform,
2498                         dpaa_sec_session *session)
2499 {
2500         PMD_INIT_FUNC_TRACE();
2501
2502         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2503                                                RTE_CACHE_LINE_SIZE);
2504         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2505                 DPAA_SEC_ERR("No Memory for aead key");
2506                 return -ENOMEM;
2507         }
2508         memcpy(session->aead_key.data, aead_xform->key.data,
2509                aead_xform->key.length);
2510
2511         session->digest_length = aead_xform->digest_length;
2512         session->aead_key.length = aead_xform->key.length;
2513
2514         switch (aead_xform->algo) {
2515         case RTE_CRYPTO_AEAD_AES_GCM:
2516                 switch (session->digest_length) {
2517                 case 8:
2518                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2519                         break;
2520                 case 12:
2521                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2522                         break;
2523                 case 16:
2524                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2525                         break;
2526                 default:
2527                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2528                                      session->digest_length);
2529                         return -EINVAL;
2530                 }
2531                 if (session->dir == DIR_ENC) {
2532                         memcpy(session->encap_pdb.gcm.salt,
2533                                 (uint8_t *)&(ipsec_xform->salt), 4);
2534                 } else {
2535                         memcpy(session->decap_pdb.gcm.salt,
2536                                 (uint8_t *)&(ipsec_xform->salt), 4);
2537                 }
2538                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2539                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2540                 break;
2541         default:
2542                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2543                               aead_xform->algo);
2544                 return -ENOTSUP;
2545         }
2546         return 0;
2547 }
2548
2549 static int
2550 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2551         struct rte_crypto_auth_xform *auth_xform,
2552         struct rte_security_ipsec_xform *ipsec_xform,
2553         dpaa_sec_session *session)
2554 {
2555         if (cipher_xform) {
2556                 session->cipher_key.data = rte_zmalloc(NULL,
2557                                                        cipher_xform->key.length,
2558                                                        RTE_CACHE_LINE_SIZE);
2559                 if (session->cipher_key.data == NULL &&
2560                                 cipher_xform->key.length > 0) {
2561                         DPAA_SEC_ERR("No Memory for cipher key");
2562                         return -ENOMEM;
2563                 }
2564
2565                 session->cipher_key.length = cipher_xform->key.length;
2566                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2567                                 cipher_xform->key.length);
2568                 session->cipher_alg = cipher_xform->algo;
2569         } else {
2570                 session->cipher_key.data = NULL;
2571                 session->cipher_key.length = 0;
2572                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2573         }
2574
2575         if (auth_xform) {
2576                 session->auth_key.data = rte_zmalloc(NULL,
2577                                                 auth_xform->key.length,
2578                                                 RTE_CACHE_LINE_SIZE);
2579                 if (session->auth_key.data == NULL &&
2580                                 auth_xform->key.length > 0) {
2581                         DPAA_SEC_ERR("No Memory for auth key");
2582                         return -ENOMEM;
2583                 }
2584                 session->auth_key.length = auth_xform->key.length;
2585                 memcpy(session->auth_key.data, auth_xform->key.data,
2586                                 auth_xform->key.length);
2587                 session->auth_alg = auth_xform->algo;
2588                 session->digest_length = auth_xform->digest_length;
2589         } else {
2590                 session->auth_key.data = NULL;
2591                 session->auth_key.length = 0;
2592                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2593         }
2594
2595         switch (session->auth_alg) {
2596         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2597                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2598                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2599                 break;
2600         case RTE_CRYPTO_AUTH_MD5_HMAC:
2601                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2602                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2603                 break;
2604         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2605                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2606                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2607                 if (session->digest_length != 16)
2608                         DPAA_SEC_WARN(
2609                         "+++Using sha256-hmac truncated len is non-standard,"
2610                         "it will not work with lookaside proto");
2611                 break;
2612         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2613                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2614                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2615                 break;
2616         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2617                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2618                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2619                 break;
2620         case RTE_CRYPTO_AUTH_AES_CMAC:
2621                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2622                 break;
2623         case RTE_CRYPTO_AUTH_NULL:
2624                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2625                 break;
2626         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2627         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2628         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2629         case RTE_CRYPTO_AUTH_SHA1:
2630         case RTE_CRYPTO_AUTH_SHA256:
2631         case RTE_CRYPTO_AUTH_SHA512:
2632         case RTE_CRYPTO_AUTH_SHA224:
2633         case RTE_CRYPTO_AUTH_SHA384:
2634         case RTE_CRYPTO_AUTH_MD5:
2635         case RTE_CRYPTO_AUTH_AES_GMAC:
2636         case RTE_CRYPTO_AUTH_KASUMI_F9:
2637         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2638         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2639                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2640                               session->auth_alg);
2641                 return -ENOTSUP;
2642         default:
2643                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2644                               session->auth_alg);
2645                 return -ENOTSUP;
2646         }
2647
2648         switch (session->cipher_alg) {
2649         case RTE_CRYPTO_CIPHER_AES_CBC:
2650                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2651                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2652                 break;
2653         case RTE_CRYPTO_CIPHER_3DES_CBC:
2654                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2655                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2656                 break;
2657         case RTE_CRYPTO_CIPHER_AES_CTR:
2658                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2659                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2660                 if (session->dir == DIR_ENC) {
2661                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2662                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2663                 } else {
2664                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2665                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2666                 }
2667                 break;
2668         case RTE_CRYPTO_CIPHER_NULL:
2669                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2670                 break;
2671         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2672         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2673         case RTE_CRYPTO_CIPHER_3DES_ECB:
2674         case RTE_CRYPTO_CIPHER_AES_ECB:
2675         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2676                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2677                               session->cipher_alg);
2678                 return -ENOTSUP;
2679         default:
2680                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2681                               session->cipher_alg);
2682                 return -ENOTSUP;
2683         }
2684
2685         return 0;
2686 }
2687
2688 static int
2689 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2690                            struct rte_security_session_conf *conf,
2691                            void *sess)
2692 {
2693         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2694         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2695         struct rte_crypto_auth_xform *auth_xform = NULL;
2696         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2697         struct rte_crypto_aead_xform *aead_xform = NULL;
2698         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2699         uint32_t i;
2700         int ret;
2701
2702         PMD_INIT_FUNC_TRACE();
2703
2704         memset(session, 0, sizeof(dpaa_sec_session));
2705         session->proto_alg = conf->protocol;
2706         session->ctxt = DPAA_SEC_IPSEC;
2707
2708         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2709                 session->dir = DIR_ENC;
2710         else
2711                 session->dir = DIR_DEC;
2712
2713         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2714                 cipher_xform = &conf->crypto_xform->cipher;
2715                 if (conf->crypto_xform->next)
2716                         auth_xform = &conf->crypto_xform->next->auth;
2717                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2718                                         ipsec_xform, session);
2719         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2720                 auth_xform = &conf->crypto_xform->auth;
2721                 if (conf->crypto_xform->next)
2722                         cipher_xform = &conf->crypto_xform->next->cipher;
2723                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2724                                         ipsec_xform, session);
2725         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2726                 aead_xform = &conf->crypto_xform->aead;
2727                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2728                                         ipsec_xform, session);
2729         } else {
2730                 DPAA_SEC_ERR("XFORM not specified");
2731                 ret = -EINVAL;
2732                 goto out;
2733         }
2734         if (ret) {
2735                 DPAA_SEC_ERR("Failed to process xform");
2736                 goto out;
2737         }
2738
2739         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2740                 if (ipsec_xform->tunnel.type ==
2741                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2742                         session->ip4_hdr.ip_v = IPVERSION;
2743                         session->ip4_hdr.ip_hl = 5;
2744                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2745                                                 sizeof(session->ip4_hdr));
2746                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2747                         session->ip4_hdr.ip_id = 0;
2748                         session->ip4_hdr.ip_off = 0;
2749                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2750                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2751                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2752                                         IPPROTO_ESP : IPPROTO_AH;
2753                         session->ip4_hdr.ip_sum = 0;
2754                         session->ip4_hdr.ip_src =
2755                                         ipsec_xform->tunnel.ipv4.src_ip;
2756                         session->ip4_hdr.ip_dst =
2757                                         ipsec_xform->tunnel.ipv4.dst_ip;
2758                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2759                                                 (void *)&session->ip4_hdr,
2760                                                 sizeof(struct ip));
2761                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2762                 } else if (ipsec_xform->tunnel.type ==
2763                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2764                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2765                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2766                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2767                                         RTE_IPV6_HDR_TC_SHIFT) &
2768                                         RTE_IPV6_HDR_TC_MASK) |
2769                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2770                                         RTE_IPV6_HDR_FL_SHIFT) &
2771                                         RTE_IPV6_HDR_FL_MASK));
2772                         /* Payload length will be updated by HW */
2773                         session->ip6_hdr.payload_len = 0;
2774                         session->ip6_hdr.hop_limits =
2775                                         ipsec_xform->tunnel.ipv6.hlimit;
2776                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2777                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2778                                         IPPROTO_ESP : IPPROTO_AH;
2779                         memcpy(&session->ip6_hdr.src_addr,
2780                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2781                         memcpy(&session->ip6_hdr.dst_addr,
2782                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2783                         session->encap_pdb.ip_hdr_len =
2784                                                 sizeof(struct rte_ipv6_hdr);
2785                 }
2786                 session->encap_pdb.options =
2787                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2788                         PDBOPTS_ESP_OIHI_PDB_INL |
2789                         PDBOPTS_ESP_IVSRC |
2790                         PDBHMO_ESP_ENCAP_DTTL |
2791                         PDBHMO_ESP_SNR;
2792                 if (ipsec_xform->options.esn)
2793                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2794                 session->encap_pdb.spi = ipsec_xform->spi;
2795
2796         } else if (ipsec_xform->direction ==
2797                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2798                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2799                         session->decap_pdb.options = sizeof(struct ip) << 16;
2800                 else
2801                         session->decap_pdb.options =
2802                                         sizeof(struct rte_ipv6_hdr) << 16;
2803                 if (ipsec_xform->options.esn)
2804                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2805                 if (ipsec_xform->replay_win_sz) {
2806                         uint32_t win_sz;
2807                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2808
2809                         switch (win_sz) {
2810                         case 1:
2811                         case 2:
2812                         case 4:
2813                         case 8:
2814                         case 16:
2815                         case 32:
2816                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2817                                 break;
2818                         case 64:
2819                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2820                                 break;
2821                         default:
2822                                 session->decap_pdb.options |=
2823                                                         PDBOPTS_ESP_ARS128;
2824                         }
2825                 }
2826         } else
2827                 goto out;
2828         rte_spinlock_lock(&internals->lock);
2829         for (i = 0; i < MAX_DPAA_CORES; i++) {
2830                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2831                 if (session->inq[i] == NULL) {
2832                         DPAA_SEC_ERR("unable to attach sec queue");
2833                         rte_spinlock_unlock(&internals->lock);
2834                         goto out;
2835                 }
2836         }
2837         rte_spinlock_unlock(&internals->lock);
2838
2839         return 0;
2840 out:
2841         free_session_data(session);
2842         return -1;
2843 }
2844
2845 static int
2846 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2847                           struct rte_security_session_conf *conf,
2848                           void *sess)
2849 {
2850         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2851         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2852         struct rte_crypto_auth_xform *auth_xform = NULL;
2853         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2854         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2855         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2856         uint32_t i;
2857         int ret;
2858
2859         PMD_INIT_FUNC_TRACE();
2860
2861         memset(session, 0, sizeof(dpaa_sec_session));
2862
2863         /* find xfrm types */
2864         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2865                 cipher_xform = &xform->cipher;
2866                 if (xform->next != NULL)
2867                         auth_xform = &xform->next->auth;
2868         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2869                 auth_xform = &xform->auth;
2870                 if (xform->next != NULL)
2871                         cipher_xform = &xform->next->cipher;
2872         } else {
2873                 DPAA_SEC_ERR("Invalid crypto type");
2874                 return -EINVAL;
2875         }
2876
2877         session->proto_alg = conf->protocol;
2878         session->ctxt = DPAA_SEC_PDCP;
2879
2880         if (cipher_xform) {
2881                 switch (cipher_xform->algo) {
2882                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2883                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2884                         break;
2885                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2886                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2887                         break;
2888                 case RTE_CRYPTO_CIPHER_AES_CTR:
2889                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2890                         break;
2891                 case RTE_CRYPTO_CIPHER_NULL:
2892                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2893                         break;
2894                 default:
2895                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2896                                       session->cipher_alg);
2897                         return -EINVAL;
2898                 }
2899
2900                 session->cipher_key.data = rte_zmalloc(NULL,
2901                                                cipher_xform->key.length,
2902                                                RTE_CACHE_LINE_SIZE);
2903                 if (session->cipher_key.data == NULL &&
2904                                 cipher_xform->key.length > 0) {
2905                         DPAA_SEC_ERR("No Memory for cipher key");
2906                         return -ENOMEM;
2907                 }
2908                 session->cipher_key.length = cipher_xform->key.length;
2909                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2910                         cipher_xform->key.length);
2911                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2912                                         DIR_ENC : DIR_DEC;
2913                 session->cipher_alg = cipher_xform->algo;
2914         } else {
2915                 session->cipher_key.data = NULL;
2916                 session->cipher_key.length = 0;
2917                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2918                 session->dir = DIR_ENC;
2919         }
2920
2921         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2922                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2923                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2924                         DPAA_SEC_ERR(
2925                                 "PDCP Seq Num size should be 5/12 bits for cmode");
2926                         ret = -EINVAL;
2927                         goto out;
2928                 }
2929         }
2930
2931         if (auth_xform) {
2932                 switch (auth_xform->algo) {
2933                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2934                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2935                         break;
2936                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2937                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2938                         break;
2939                 case RTE_CRYPTO_AUTH_AES_CMAC:
2940                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2941                         break;
2942                 case RTE_CRYPTO_AUTH_NULL:
2943                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2944                         break;
2945                 default:
2946                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2947                                       session->auth_alg);
2948                         rte_free(session->cipher_key.data);
2949                         return -EINVAL;
2950                 }
2951                 session->auth_key.data = rte_zmalloc(NULL,
2952                                                      auth_xform->key.length,
2953                                                      RTE_CACHE_LINE_SIZE);
2954                 if (!session->auth_key.data &&
2955                     auth_xform->key.length > 0) {
2956                         DPAA_SEC_ERR("No Memory for auth key");
2957                         rte_free(session->cipher_key.data);
2958                         return -ENOMEM;
2959                 }
2960                 session->auth_key.length = auth_xform->key.length;
2961                 memcpy(session->auth_key.data, auth_xform->key.data,
2962                        auth_xform->key.length);
2963                 session->auth_alg = auth_xform->algo;
2964         } else {
2965                 session->auth_key.data = NULL;
2966                 session->auth_key.length = 0;
2967                 session->auth_alg = 0;
2968         }
2969         session->pdcp.domain = pdcp_xform->domain;
2970         session->pdcp.bearer = pdcp_xform->bearer;
2971         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
2972         session->pdcp.sn_size = pdcp_xform->sn_size;
2973         session->pdcp.hfn = pdcp_xform->hfn;
2974         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
2975         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
2976         session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
2977
2978         rte_spinlock_lock(&dev_priv->lock);
2979         for (i = 0; i < MAX_DPAA_CORES; i++) {
2980                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
2981                 if (session->inq[i] == NULL) {
2982                         DPAA_SEC_ERR("unable to attach sec queue");
2983                         rte_spinlock_unlock(&dev_priv->lock);
2984                         ret = -EBUSY;
2985                         goto out;
2986                 }
2987         }
2988         rte_spinlock_unlock(&dev_priv->lock);
2989         return 0;
2990 out:
2991         rte_free(session->auth_key.data);
2992         rte_free(session->cipher_key.data);
2993         memset(session, 0, sizeof(dpaa_sec_session));
2994         return ret;
2995 }
2996
2997 static int
2998 dpaa_sec_security_session_create(void *dev,
2999                                  struct rte_security_session_conf *conf,
3000                                  struct rte_security_session *sess,
3001                                  struct rte_mempool *mempool)
3002 {
3003         void *sess_private_data;
3004         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3005         int ret;
3006
3007         if (rte_mempool_get(mempool, &sess_private_data)) {
3008                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3009                 return -ENOMEM;
3010         }
3011
3012         switch (conf->protocol) {
3013         case RTE_SECURITY_PROTOCOL_IPSEC:
3014                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3015                                 sess_private_data);
3016                 break;
3017         case RTE_SECURITY_PROTOCOL_PDCP:
3018                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3019                                 sess_private_data);
3020                 break;
3021         case RTE_SECURITY_PROTOCOL_MACSEC:
3022                 return -ENOTSUP;
3023         default:
3024                 return -EINVAL;
3025         }
3026         if (ret != 0) {
3027                 DPAA_SEC_ERR("failed to configure session parameters");
3028                 /* Return session to mempool */
3029                 rte_mempool_put(mempool, sess_private_data);
3030                 return ret;
3031         }
3032
3033         set_sec_session_private_data(sess, sess_private_data);
3034
3035         return ret;
3036 }
3037
3038 /** Clear the memory of session so it doesn't leave key material behind */
3039 static int
3040 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3041                 struct rte_security_session *sess)
3042 {
3043         PMD_INIT_FUNC_TRACE();
3044         void *sess_priv = get_sec_session_private_data(sess);
3045         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3046
3047         if (sess_priv) {
3048                 free_session_memory((struct rte_cryptodev *)dev, s);
3049                 set_sec_session_private_data(sess, NULL);
3050         }
3051         return 0;
3052 }
3053 #endif
3054 static int
3055 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3056                        struct rte_cryptodev_config *config __rte_unused)
3057 {
3058         PMD_INIT_FUNC_TRACE();
3059
3060         return 0;
3061 }
3062
3063 static int
3064 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3065 {
3066         PMD_INIT_FUNC_TRACE();
3067         return 0;
3068 }
3069
3070 static void
3071 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3072 {
3073         PMD_INIT_FUNC_TRACE();
3074 }
3075
3076 static int
3077 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3078 {
3079         PMD_INIT_FUNC_TRACE();
3080
3081         if (dev == NULL)
3082                 return -ENOMEM;
3083
3084         return 0;
3085 }
3086
3087 static void
3088 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3089                        struct rte_cryptodev_info *info)
3090 {
3091         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3092
3093         PMD_INIT_FUNC_TRACE();
3094         if (info != NULL) {
3095                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3096                 info->feature_flags = dev->feature_flags;
3097                 info->capabilities = dpaa_sec_capabilities;
3098                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3099                 info->driver_id = cryptodev_driver_id;
3100         }
3101 }
3102
3103 static enum qman_cb_dqrr_result
3104 dpaa_sec_process_parallel_event(void *event,
3105                         struct qman_portal *qm __always_unused,
3106                         struct qman_fq *outq,
3107                         const struct qm_dqrr_entry *dqrr,
3108                         void **bufs)
3109 {
3110         const struct qm_fd *fd;
3111         struct dpaa_sec_job *job;
3112         struct dpaa_sec_op_ctx *ctx;
3113         struct rte_event *ev = (struct rte_event *)event;
3114
3115         fd = &dqrr->fd;
3116
3117         /* sg is embedded in an op ctx,
3118          * sg[0] is for output
3119          * sg[1] for input
3120          */
3121         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3122
3123         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3124         ctx->fd_status = fd->status;
3125         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3126                 struct qm_sg_entry *sg_out;
3127                 uint32_t len;
3128
3129                 sg_out = &job->sg[0];
3130                 hw_sg_to_cpu(sg_out);
3131                 len = sg_out->length;
3132                 ctx->op->sym->m_src->pkt_len = len;
3133                 ctx->op->sym->m_src->data_len = len;
3134         }
3135         if (!ctx->fd_status) {
3136                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3137         } else {
3138                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3139                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3140         }
3141         ev->event_ptr = (void *)ctx->op;
3142
3143         ev->flow_id = outq->ev.flow_id;
3144         ev->sub_event_type = outq->ev.sub_event_type;
3145         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3146         ev->op = RTE_EVENT_OP_NEW;
3147         ev->sched_type = outq->ev.sched_type;
3148         ev->queue_id = outq->ev.queue_id;
3149         ev->priority = outq->ev.priority;
3150         *bufs = (void *)ctx->op;
3151
3152         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3153
3154         return qman_cb_dqrr_consume;
3155 }
3156
3157 static enum qman_cb_dqrr_result
3158 dpaa_sec_process_atomic_event(void *event,
3159                         struct qman_portal *qm __rte_unused,
3160                         struct qman_fq *outq,
3161                         const struct qm_dqrr_entry *dqrr,
3162                         void **bufs)
3163 {
3164         u8 index;
3165         const struct qm_fd *fd;
3166         struct dpaa_sec_job *job;
3167         struct dpaa_sec_op_ctx *ctx;
3168         struct rte_event *ev = (struct rte_event *)event;
3169
3170         fd = &dqrr->fd;
3171
3172         /* sg is embedded in an op ctx,
3173          * sg[0] is for output
3174          * sg[1] for input
3175          */
3176         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3177
3178         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3179         ctx->fd_status = fd->status;
3180         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3181                 struct qm_sg_entry *sg_out;
3182                 uint32_t len;
3183
3184                 sg_out = &job->sg[0];
3185                 hw_sg_to_cpu(sg_out);
3186                 len = sg_out->length;
3187                 ctx->op->sym->m_src->pkt_len = len;
3188                 ctx->op->sym->m_src->data_len = len;
3189         }
3190         if (!ctx->fd_status) {
3191                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3192         } else {
3193                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3194                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3195         }
3196         ev->event_ptr = (void *)ctx->op;
3197         ev->flow_id = outq->ev.flow_id;
3198         ev->sub_event_type = outq->ev.sub_event_type;
3199         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3200         ev->op = RTE_EVENT_OP_NEW;
3201         ev->sched_type = outq->ev.sched_type;
3202         ev->queue_id = outq->ev.queue_id;
3203         ev->priority = outq->ev.priority;
3204
3205         /* Save active dqrr entries */
3206         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3207         DPAA_PER_LCORE_DQRR_SIZE++;
3208         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3209         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3210         ev->impl_opaque = index + 1;
3211         ctx->op->sym->m_src->seqn = (uint32_t)index + 1;
3212         *bufs = (void *)ctx->op;
3213
3214         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3215
3216         return qman_cb_dqrr_defer;
3217 }
3218
3219 int
3220 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3221                 int qp_id,
3222                 uint16_t ch_id,
3223                 const struct rte_event *event)
3224 {
3225         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3226         struct qm_mcc_initfq opts = {0};
3227
3228         int ret;
3229
3230         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3231                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3232         opts.fqd.dest.channel = ch_id;
3233
3234         switch (event->sched_type) {
3235         case RTE_SCHED_TYPE_ATOMIC:
3236                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3237                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3238                  * configuration with HOLD_ACTIVE setting
3239                  */
3240                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3241                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3242                 break;
3243         case RTE_SCHED_TYPE_ORDERED:
3244                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3245                 return -ENOTSUP;
3246         default:
3247                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3248                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3249                 break;
3250         }
3251
3252         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3253         if (unlikely(ret)) {
3254                 DPAA_SEC_ERR("unable to init caam source fq!");
3255                 return ret;
3256         }
3257
3258         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3259
3260         return 0;
3261 }
3262
3263 int
3264 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3265                         int qp_id)
3266 {
3267         struct qm_mcc_initfq opts = {0};
3268         int ret;
3269         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3270
3271         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3272                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3273         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3274         qp->outq.cb.ern  = ern_sec_fq_handler;
3275         qman_retire_fq(&qp->outq, NULL);
3276         qman_oos_fq(&qp->outq);
3277         ret = qman_init_fq(&qp->outq, 0, &opts);
3278         if (ret)
3279                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3280         qp->outq.cb.dqrr = NULL;
3281
3282         return ret;
3283 }
3284
3285 static struct rte_cryptodev_ops crypto_ops = {
3286         .dev_configure        = dpaa_sec_dev_configure,
3287         .dev_start            = dpaa_sec_dev_start,
3288         .dev_stop             = dpaa_sec_dev_stop,
3289         .dev_close            = dpaa_sec_dev_close,
3290         .dev_infos_get        = dpaa_sec_dev_infos_get,
3291         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3292         .queue_pair_release   = dpaa_sec_queue_pair_release,
3293         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3294         .sym_session_configure    = dpaa_sec_sym_session_configure,
3295         .sym_session_clear        = dpaa_sec_sym_session_clear
3296 };
3297
3298 #ifdef RTE_LIBRTE_SECURITY
3299 static const struct rte_security_capability *
3300 dpaa_sec_capabilities_get(void *device __rte_unused)
3301 {
3302         return dpaa_sec_security_cap;
3303 }
3304
3305 static const struct rte_security_ops dpaa_sec_security_ops = {
3306         .session_create = dpaa_sec_security_session_create,
3307         .session_update = NULL,
3308         .session_stats_get = NULL,
3309         .session_destroy = dpaa_sec_security_session_destroy,
3310         .set_pkt_metadata = NULL,
3311         .capabilities_get = dpaa_sec_capabilities_get
3312 };
3313 #endif
3314 static int
3315 dpaa_sec_uninit(struct rte_cryptodev *dev)
3316 {
3317         struct dpaa_sec_dev_private *internals;
3318
3319         if (dev == NULL)
3320                 return -ENODEV;
3321
3322         internals = dev->data->dev_private;
3323         rte_free(dev->security_ctx);
3324
3325         rte_free(internals);
3326
3327         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3328                       dev->data->name, rte_socket_id());
3329
3330         return 0;
3331 }
3332
3333 static int
3334 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3335 {
3336         struct dpaa_sec_dev_private *internals;
3337 #ifdef RTE_LIBRTE_SECURITY
3338         struct rte_security_ctx *security_instance;
3339 #endif
3340         struct dpaa_sec_qp *qp;
3341         uint32_t i, flags;
3342         int ret;
3343
3344         PMD_INIT_FUNC_TRACE();
3345
3346         cryptodev->driver_id = cryptodev_driver_id;
3347         cryptodev->dev_ops = &crypto_ops;
3348
3349         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3350         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3351         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3352                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3353                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3354                         RTE_CRYPTODEV_FF_SECURITY |
3355                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3356                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3357                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3358                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3359                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3360
3361         internals = cryptodev->data->dev_private;
3362         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3363         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3364
3365         /*
3366          * For secondary processes, we don't initialise any further as primary
3367          * has already done this work. Only check we don't need a different
3368          * RX function
3369          */
3370         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3371                 DPAA_SEC_WARN("Device already init by primary process");
3372                 return 0;
3373         }
3374 #ifdef RTE_LIBRTE_SECURITY
3375         /* Initialize security_ctx only for primary process*/
3376         security_instance = rte_malloc("rte_security_instances_ops",
3377                                 sizeof(struct rte_security_ctx), 0);
3378         if (security_instance == NULL)
3379                 return -ENOMEM;
3380         security_instance->device = (void *)cryptodev;
3381         security_instance->ops = &dpaa_sec_security_ops;
3382         security_instance->sess_cnt = 0;
3383         cryptodev->security_ctx = security_instance;
3384 #endif
3385         rte_spinlock_init(&internals->lock);
3386         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3387                 /* init qman fq for queue pair */
3388                 qp = &internals->qps[i];
3389                 ret = dpaa_sec_init_tx(&qp->outq);
3390                 if (ret) {
3391                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3392                         goto init_error;
3393                 }
3394         }
3395
3396         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3397                 QMAN_FQ_FLAG_TO_DCPORTAL;
3398         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3399                 /* create rx qman fq for sessions*/
3400                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3401                 if (unlikely(ret != 0)) {
3402                         DPAA_SEC_ERR("sec qman_create_fq failed");
3403                         goto init_error;
3404                 }
3405         }
3406
3407         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3408         return 0;
3409
3410 init_error:
3411         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3412
3413         rte_free(cryptodev->security_ctx);
3414         return -EFAULT;
3415 }
3416
3417 static int
3418 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3419                                 struct rte_dpaa_device *dpaa_dev)
3420 {
3421         struct rte_cryptodev *cryptodev;
3422         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3423
3424         int retval;
3425
3426         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3427
3428         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3429         if (cryptodev == NULL)
3430                 return -ENOMEM;
3431
3432         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3433                 cryptodev->data->dev_private = rte_zmalloc_socket(
3434                                         "cryptodev private structure",
3435                                         sizeof(struct dpaa_sec_dev_private),
3436                                         RTE_CACHE_LINE_SIZE,
3437                                         rte_socket_id());
3438
3439                 if (cryptodev->data->dev_private == NULL)
3440                         rte_panic("Cannot allocate memzone for private "
3441                                         "device data");
3442         }
3443
3444         dpaa_dev->crypto_dev = cryptodev;
3445         cryptodev->device = &dpaa_dev->device;
3446
3447         /* init user callbacks */
3448         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3449
3450         /* if sec device version is not configured */
3451         if (!rta_get_sec_era()) {
3452                 const struct device_node *caam_node;
3453
3454                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3455                         const uint32_t *prop = of_get_property(caam_node,
3456                                         "fsl,sec-era",
3457                                         NULL);
3458                         if (prop) {
3459                                 rta_set_sec_era(
3460                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3461                                 break;
3462                         }
3463                 }
3464         }
3465
3466         if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
3467                 retval = rte_dpaa_portal_init((void *)1);
3468                 if (retval) {
3469                         DPAA_SEC_ERR("Unable to initialize portal");
3470                         goto out;
3471                 }
3472         }
3473
3474         /* Invoke PMD device initialization function */
3475         retval = dpaa_sec_dev_init(cryptodev);
3476         if (retval == 0)
3477                 return 0;
3478
3479         retval = -ENXIO;
3480 out:
3481         /* In case of error, cleanup is done */
3482         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3483                 rte_free(cryptodev->data->dev_private);
3484
3485         rte_cryptodev_pmd_release_device(cryptodev);
3486
3487         return retval;
3488 }
3489
3490 static int
3491 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3492 {
3493         struct rte_cryptodev *cryptodev;
3494         int ret;
3495
3496         cryptodev = dpaa_dev->crypto_dev;
3497         if (cryptodev == NULL)
3498                 return -ENODEV;
3499
3500         ret = dpaa_sec_uninit(cryptodev);
3501         if (ret)
3502                 return ret;
3503
3504         return rte_cryptodev_pmd_destroy(cryptodev);
3505 }
3506
3507 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3508         .drv_type = FSL_DPAA_CRYPTO,
3509         .driver = {
3510                 .name = "DPAA SEC PMD"
3511         },
3512         .probe = cryptodev_dpaa_sec_probe,
3513         .remove = cryptodev_dpaa_sec_remove,
3514 };
3515
3516 static struct cryptodev_driver dpaa_sec_crypto_drv;
3517
3518 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3519 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3520                 cryptodev_driver_id);
3521 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);