crypto/dpaa_sec: support DES-CBC
[dpdk.git] / drivers / crypto / dpaa_sec / dpaa_sec.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
4  *   Copyright 2017-2019 NXP
5  *
6  */
7
8 #include <fcntl.h>
9 #include <unistd.h>
10 #include <sched.h>
11 #include <net/if.h>
12
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <cryptodev_pmd.h>
16 #include <rte_crypto.h>
17 #include <rte_cryptodev.h>
18 #ifdef RTE_LIB_SECURITY
19 #include <rte_security_driver.h>
20 #endif
21 #include <rte_cycles.h>
22 #include <rte_dev.h>
23 #include <rte_ip.h>
24 #include <rte_kvargs.h>
25 #include <rte_malloc.h>
26 #include <rte_mbuf.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_spinlock.h>
30
31 #include <fsl_usd.h>
32 #include <fsl_qman.h>
33 #include <dpaa_of.h>
34
35 /* RTA header files */
36 #include <desc/common.h>
37 #include <desc/algo.h>
38 #include <desc/ipsec.h>
39 #include <desc/pdcp.h>
40 #include <desc/sdap.h>
41
42 #include <rte_dpaa_bus.h>
43 #include <dpaa_sec.h>
44 #include <dpaa_sec_event.h>
45 #include <dpaa_sec_log.h>
46 #include <dpaax_iova_table.h>
47
48 static uint8_t cryptodev_driver_id;
49
50 static int
51 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess);
52
53 static inline void
54 dpaa_sec_op_ending(struct dpaa_sec_op_ctx *ctx)
55 {
56         if (!ctx->fd_status) {
57                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
58         } else {
59                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
60                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
61         }
62 }
63
64 static inline struct dpaa_sec_op_ctx *
65 dpaa_sec_alloc_ctx(dpaa_sec_session *ses, int sg_count)
66 {
67         struct dpaa_sec_op_ctx *ctx;
68         int i, retval;
69
70         retval = rte_mempool_get(
71                         ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool,
72                         (void **)(&ctx));
73         if (!ctx || retval) {
74                 DPAA_SEC_DP_WARN("Alloc sec descriptor failed!");
75                 return NULL;
76         }
77         /*
78          * Clear SG memory. There are 16 SG entries of 16 Bytes each.
79          * one call to dcbz_64() clear 64 bytes, hence calling it 4 times
80          * to clear all the SG entries. dpaa_sec_alloc_ctx() is called for
81          * each packet, memset is costlier than dcbz_64().
82          */
83         for (i = 0; i < sg_count && i < MAX_JOB_SG_ENTRIES; i += 4)
84                 dcbz_64(&ctx->job.sg[i]);
85
86         ctx->ctx_pool = ses->qp[rte_lcore_id() % MAX_DPAA_CORES]->ctx_pool;
87         ctx->vtop_offset = (size_t) ctx - rte_mempool_virt2iova(ctx);
88
89         return ctx;
90 }
91
92 static void
93 ern_sec_fq_handler(struct qman_portal *qm __rte_unused,
94                    struct qman_fq *fq,
95                    const struct qm_mr_entry *msg)
96 {
97         DPAA_SEC_DP_ERR("sec fq %d error, RC = %x, seqnum = %x\n",
98                         fq->fqid, msg->ern.rc, msg->ern.seqnum);
99 }
100
101 /* initialize the queue with dest chan as caam chan so that
102  * all the packets in this queue could be dispatched into caam
103  */
104 static int
105 dpaa_sec_init_rx(struct qman_fq *fq_in, rte_iova_t hwdesc,
106                  uint32_t fqid_out)
107 {
108         struct qm_mcc_initfq fq_opts;
109         uint32_t flags;
110         int ret = -1;
111
112         /* Clear FQ options */
113         memset(&fq_opts, 0x00, sizeof(struct qm_mcc_initfq));
114
115         flags = QMAN_INITFQ_FLAG_SCHED;
116         fq_opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTA |
117                           QM_INITFQ_WE_CONTEXTB;
118
119         qm_fqd_context_a_set64(&fq_opts.fqd, hwdesc);
120         fq_opts.fqd.context_b = fqid_out;
121         fq_opts.fqd.dest.channel = dpaa_get_qm_channel_caam();
122         fq_opts.fqd.dest.wq = 0;
123
124         fq_in->cb.ern  = ern_sec_fq_handler;
125
126         DPAA_SEC_DEBUG("in-%x out-%x", fq_in->fqid, fqid_out);
127
128         ret = qman_init_fq(fq_in, flags, &fq_opts);
129         if (unlikely(ret != 0))
130                 DPAA_SEC_ERR("qman_init_fq failed %d", ret);
131
132         return ret;
133 }
134
135 /* something is put into in_fq and caam put the crypto result into out_fq */
136 static enum qman_cb_dqrr_result
137 dqrr_out_fq_cb_rx(struct qman_portal *qm __always_unused,
138                   struct qman_fq *fq __always_unused,
139                   const struct qm_dqrr_entry *dqrr)
140 {
141         const struct qm_fd *fd;
142         struct dpaa_sec_job *job;
143         struct dpaa_sec_op_ctx *ctx;
144
145         if (DPAA_PER_LCORE_DPAA_SEC_OP_NB >= DPAA_SEC_BURST)
146                 return qman_cb_dqrr_defer;
147
148         if (!(dqrr->stat & QM_DQRR_STAT_FD_VALID))
149                 return qman_cb_dqrr_consume;
150
151         fd = &dqrr->fd;
152         /* sg is embedded in an op ctx,
153          * sg[0] is for output
154          * sg[1] for input
155          */
156         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
157
158         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
159         ctx->fd_status = fd->status;
160         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
161                 struct qm_sg_entry *sg_out;
162                 uint32_t len;
163                 struct rte_mbuf *mbuf = (ctx->op->sym->m_dst == NULL) ?
164                                 ctx->op->sym->m_src : ctx->op->sym->m_dst;
165
166                 sg_out = &job->sg[0];
167                 hw_sg_to_cpu(sg_out);
168                 len = sg_out->length;
169                 mbuf->pkt_len = len;
170                 while (mbuf->next != NULL) {
171                         len -= mbuf->data_len;
172                         mbuf = mbuf->next;
173                 }
174                 mbuf->data_len = len;
175         }
176         DPAA_PER_LCORE_RTE_CRYPTO_OP[DPAA_PER_LCORE_DPAA_SEC_OP_NB++] = ctx->op;
177         dpaa_sec_op_ending(ctx);
178
179         return qman_cb_dqrr_consume;
180 }
181
182 /* caam result is put into this queue */
183 static int
184 dpaa_sec_init_tx(struct qman_fq *fq)
185 {
186         int ret;
187         struct qm_mcc_initfq opts;
188         uint32_t flags;
189
190         flags = QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_LOCKED |
191                 QMAN_FQ_FLAG_DYNAMIC_FQID;
192
193         ret = qman_create_fq(0, flags, fq);
194         if (unlikely(ret)) {
195                 DPAA_SEC_ERR("qman_create_fq failed");
196                 return ret;
197         }
198
199         memset(&opts, 0, sizeof(opts));
200         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
201                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
202
203         /* opts.fqd.dest.channel = dpaa_sec_pool_chan; */
204
205         fq->cb.dqrr = dqrr_out_fq_cb_rx;
206         fq->cb.ern  = ern_sec_fq_handler;
207
208         ret = qman_init_fq(fq, 0, &opts);
209         if (unlikely(ret)) {
210                 DPAA_SEC_ERR("unable to init caam source fq!");
211                 return ret;
212         }
213
214         return ret;
215 }
216
217 static inline int is_aead(dpaa_sec_session *ses)
218 {
219         return ((ses->cipher_alg == 0) &&
220                 (ses->auth_alg == 0) &&
221                 (ses->aead_alg != 0));
222 }
223
224 static inline int is_encode(dpaa_sec_session *ses)
225 {
226         return ses->dir == DIR_ENC;
227 }
228
229 static inline int is_decode(dpaa_sec_session *ses)
230 {
231         return ses->dir == DIR_DEC;
232 }
233
234 #ifdef RTE_LIB_SECURITY
235 static int
236 dpaa_sec_prep_pdcp_cdb(dpaa_sec_session *ses)
237 {
238         struct alginfo authdata = {0}, cipherdata = {0};
239         struct sec_cdb *cdb = &ses->cdb;
240         struct alginfo *p_authdata = NULL;
241         int32_t shared_desc_len = 0;
242 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
243         int swap = false;
244 #else
245         int swap = true;
246 #endif
247
248         cipherdata.key = (size_t)ses->cipher_key.data;
249         cipherdata.keylen = ses->cipher_key.length;
250         cipherdata.key_enc_flags = 0;
251         cipherdata.key_type = RTA_DATA_IMM;
252         cipherdata.algtype = ses->cipher_key.alg;
253         cipherdata.algmode = ses->cipher_key.algmode;
254
255         if (ses->auth_alg) {
256                 authdata.key = (size_t)ses->auth_key.data;
257                 authdata.keylen = ses->auth_key.length;
258                 authdata.key_enc_flags = 0;
259                 authdata.key_type = RTA_DATA_IMM;
260                 authdata.algtype = ses->auth_key.alg;
261                 authdata.algmode = ses->auth_key.algmode;
262
263                 p_authdata = &authdata;
264         }
265
266         if (rta_inline_pdcp_query(authdata.algtype,
267                                 cipherdata.algtype,
268                                 ses->pdcp.sn_size,
269                                 ses->pdcp.hfn_ovd)) {
270                 cipherdata.key =
271                         (size_t)rte_dpaa_mem_vtop((void *)
272                                         (size_t)cipherdata.key);
273                 cipherdata.key_type = RTA_DATA_PTR;
274         }
275
276         if (ses->pdcp.domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
277                 if (ses->dir == DIR_ENC)
278                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_encap(
279                                         cdb->sh_desc, 1, swap,
280                                         ses->pdcp.hfn,
281                                         ses->pdcp.sn_size,
282                                         ses->pdcp.bearer,
283                                         ses->pdcp.pkt_dir,
284                                         ses->pdcp.hfn_threshold,
285                                         &cipherdata, &authdata,
286                                         0);
287                 else if (ses->dir == DIR_DEC)
288                         shared_desc_len = cnstr_shdsc_pdcp_c_plane_decap(
289                                         cdb->sh_desc, 1, swap,
290                                         ses->pdcp.hfn,
291                                         ses->pdcp.sn_size,
292                                         ses->pdcp.bearer,
293                                         ses->pdcp.pkt_dir,
294                                         ses->pdcp.hfn_threshold,
295                                         &cipherdata, &authdata,
296                                         0);
297         } else {
298                 if (ses->dir == DIR_ENC) {
299                         if (ses->pdcp.sdap_enabled)
300                                 shared_desc_len =
301                                         cnstr_shdsc_pdcp_sdap_u_plane_encap(
302                                                 cdb->sh_desc, 1, swap,
303                                                 ses->pdcp.sn_size,
304                                                 ses->pdcp.hfn,
305                                                 ses->pdcp.bearer,
306                                                 ses->pdcp.pkt_dir,
307                                                 ses->pdcp.hfn_threshold,
308                                                 &cipherdata, p_authdata, 0);
309                         else
310                                 shared_desc_len =
311                                         cnstr_shdsc_pdcp_u_plane_encap(
312                                                 cdb->sh_desc, 1, swap,
313                                                 ses->pdcp.sn_size,
314                                                 ses->pdcp.hfn,
315                                                 ses->pdcp.bearer,
316                                                 ses->pdcp.pkt_dir,
317                                                 ses->pdcp.hfn_threshold,
318                                                 &cipherdata, p_authdata, 0);
319                 } else if (ses->dir == DIR_DEC) {
320                         if (ses->pdcp.sdap_enabled)
321                                 shared_desc_len =
322                                         cnstr_shdsc_pdcp_sdap_u_plane_decap(
323                                                 cdb->sh_desc, 1, swap,
324                                                 ses->pdcp.sn_size,
325                                                 ses->pdcp.hfn,
326                                                 ses->pdcp.bearer,
327                                                 ses->pdcp.pkt_dir,
328                                                 ses->pdcp.hfn_threshold,
329                                                 &cipherdata, p_authdata, 0);
330                         else
331                                 shared_desc_len =
332                                         cnstr_shdsc_pdcp_u_plane_decap(
333                                                 cdb->sh_desc, 1, swap,
334                                                 ses->pdcp.sn_size,
335                                                 ses->pdcp.hfn,
336                                                 ses->pdcp.bearer,
337                                                 ses->pdcp.pkt_dir,
338                                                 ses->pdcp.hfn_threshold,
339                                                 &cipherdata, p_authdata, 0);
340                 }
341         }
342         return shared_desc_len;
343 }
344
345 /* prepare ipsec proto command block of the session */
346 static int
347 dpaa_sec_prep_ipsec_cdb(dpaa_sec_session *ses)
348 {
349         struct alginfo cipherdata = {0}, authdata = {0};
350         struct sec_cdb *cdb = &ses->cdb;
351         int32_t shared_desc_len = 0;
352         int err;
353 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
354         int swap = false;
355 #else
356         int swap = true;
357 #endif
358
359         cipherdata.key = (size_t)ses->cipher_key.data;
360         cipherdata.keylen = ses->cipher_key.length;
361         cipherdata.key_enc_flags = 0;
362         cipherdata.key_type = RTA_DATA_IMM;
363         cipherdata.algtype = ses->cipher_key.alg;
364         cipherdata.algmode = ses->cipher_key.algmode;
365
366         if (ses->auth_key.length) {
367                 authdata.key = (size_t)ses->auth_key.data;
368                 authdata.keylen = ses->auth_key.length;
369                 authdata.key_enc_flags = 0;
370                 authdata.key_type = RTA_DATA_IMM;
371                 authdata.algtype = ses->auth_key.alg;
372                 authdata.algmode = ses->auth_key.algmode;
373         }
374
375         cdb->sh_desc[0] = cipherdata.keylen;
376         cdb->sh_desc[1] = authdata.keylen;
377         err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
378                                DESC_JOB_IO_LEN,
379                                (unsigned int *)cdb->sh_desc,
380                                &cdb->sh_desc[2], 2);
381
382         if (err < 0) {
383                 DPAA_SEC_ERR("Crypto: Incorrect key lengths");
384                 return err;
385         }
386         if (cdb->sh_desc[2] & 1)
387                 cipherdata.key_type = RTA_DATA_IMM;
388         else {
389                 cipherdata.key = (size_t)rte_dpaa_mem_vtop(
390                                         (void *)(size_t)cipherdata.key);
391                 cipherdata.key_type = RTA_DATA_PTR;
392         }
393         if (cdb->sh_desc[2] & (1<<1))
394                 authdata.key_type = RTA_DATA_IMM;
395         else {
396                 authdata.key = (size_t)rte_dpaa_mem_vtop(
397                                         (void *)(size_t)authdata.key);
398                 authdata.key_type = RTA_DATA_PTR;
399         }
400
401         cdb->sh_desc[0] = 0;
402         cdb->sh_desc[1] = 0;
403         cdb->sh_desc[2] = 0;
404         if (ses->dir == DIR_ENC) {
405                 shared_desc_len = cnstr_shdsc_ipsec_new_encap(
406                                 cdb->sh_desc,
407                                 true, swap, SHR_SERIAL,
408                                 &ses->encap_pdb,
409                                 (uint8_t *)&ses->ip4_hdr,
410                                 &cipherdata, &authdata);
411         } else if (ses->dir == DIR_DEC) {
412                 shared_desc_len = cnstr_shdsc_ipsec_new_decap(
413                                 cdb->sh_desc,
414                                 true, swap, SHR_SERIAL,
415                                 &ses->decap_pdb,
416                                 &cipherdata, &authdata);
417         }
418         return shared_desc_len;
419 }
420 #endif
421 /* prepare command block of the session */
422 static int
423 dpaa_sec_prep_cdb(dpaa_sec_session *ses)
424 {
425         struct alginfo alginfo_c = {0}, alginfo_a = {0}, alginfo = {0};
426         int32_t shared_desc_len = 0;
427         struct sec_cdb *cdb = &ses->cdb;
428         int err;
429 #if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
430         int swap = false;
431 #else
432         int swap = true;
433 #endif
434
435         memset(cdb, 0, sizeof(struct sec_cdb));
436
437         switch (ses->ctxt) {
438 #ifdef RTE_LIB_SECURITY
439         case DPAA_SEC_IPSEC:
440                 shared_desc_len = dpaa_sec_prep_ipsec_cdb(ses);
441                 break;
442         case DPAA_SEC_PDCP:
443                 shared_desc_len = dpaa_sec_prep_pdcp_cdb(ses);
444                 break;
445 #endif
446         case DPAA_SEC_CIPHER:
447                 alginfo_c.key = (size_t)ses->cipher_key.data;
448                 alginfo_c.keylen = ses->cipher_key.length;
449                 alginfo_c.key_enc_flags = 0;
450                 alginfo_c.key_type = RTA_DATA_IMM;
451                 alginfo_c.algtype = ses->cipher_key.alg;
452                 alginfo_c.algmode = ses->cipher_key.algmode;
453
454                 switch (ses->cipher_alg) {
455                 case RTE_CRYPTO_CIPHER_AES_CBC:
456                 case RTE_CRYPTO_CIPHER_3DES_CBC:
457                 case RTE_CRYPTO_CIPHER_DES_CBC:
458                 case RTE_CRYPTO_CIPHER_AES_CTR:
459                 case RTE_CRYPTO_CIPHER_3DES_CTR:
460                         shared_desc_len = cnstr_shdsc_blkcipher(
461                                         cdb->sh_desc, true,
462                                         swap, SHR_NEVER, &alginfo_c,
463                                         ses->iv.length,
464                                         ses->dir);
465                         break;
466                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
467                         shared_desc_len = cnstr_shdsc_snow_f8(
468                                         cdb->sh_desc, true, swap,
469                                         &alginfo_c,
470                                         ses->dir);
471                         break;
472                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
473                         shared_desc_len = cnstr_shdsc_zuce(
474                                         cdb->sh_desc, true, swap,
475                                         &alginfo_c,
476                                         ses->dir);
477                         break;
478                 default:
479                         DPAA_SEC_ERR("unsupported cipher alg %d",
480                                      ses->cipher_alg);
481                         return -ENOTSUP;
482                 }
483                 break;
484         case DPAA_SEC_AUTH:
485                 alginfo_a.key = (size_t)ses->auth_key.data;
486                 alginfo_a.keylen = ses->auth_key.length;
487                 alginfo_a.key_enc_flags = 0;
488                 alginfo_a.key_type = RTA_DATA_IMM;
489                 alginfo_a.algtype = ses->auth_key.alg;
490                 alginfo_a.algmode = ses->auth_key.algmode;
491                 switch (ses->auth_alg) {
492                 case RTE_CRYPTO_AUTH_MD5_HMAC:
493                 case RTE_CRYPTO_AUTH_SHA1_HMAC:
494                 case RTE_CRYPTO_AUTH_SHA224_HMAC:
495                 case RTE_CRYPTO_AUTH_SHA256_HMAC:
496                 case RTE_CRYPTO_AUTH_SHA384_HMAC:
497                 case RTE_CRYPTO_AUTH_SHA512_HMAC:
498                         shared_desc_len = cnstr_shdsc_hmac(
499                                                 cdb->sh_desc, true,
500                                                 swap, SHR_NEVER, &alginfo_a,
501                                                 !ses->dir,
502                                                 ses->digest_length);
503                         break;
504                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
505                         shared_desc_len = cnstr_shdsc_snow_f9(
506                                                 cdb->sh_desc, true, swap,
507                                                 &alginfo_a,
508                                                 !ses->dir,
509                                                 ses->digest_length);
510                         break;
511                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
512                         shared_desc_len = cnstr_shdsc_zuca(
513                                                 cdb->sh_desc, true, swap,
514                                                 &alginfo_a,
515                                                 !ses->dir,
516                                                 ses->digest_length);
517                         break;
518                 default:
519                         DPAA_SEC_ERR("unsupported auth alg %u", ses->auth_alg);
520                 }
521                 break;
522         case DPAA_SEC_AEAD:
523                 if (alginfo.algtype == (unsigned int)DPAA_SEC_ALG_UNSUPPORT) {
524                         DPAA_SEC_ERR("not supported aead alg");
525                         return -ENOTSUP;
526                 }
527                 alginfo.key = (size_t)ses->aead_key.data;
528                 alginfo.keylen = ses->aead_key.length;
529                 alginfo.key_enc_flags = 0;
530                 alginfo.key_type = RTA_DATA_IMM;
531                 alginfo.algtype = ses->aead_key.alg;
532                 alginfo.algmode = ses->aead_key.algmode;
533
534                 if (ses->dir == DIR_ENC)
535                         shared_desc_len = cnstr_shdsc_gcm_encap(
536                                         cdb->sh_desc, true, swap, SHR_NEVER,
537                                         &alginfo,
538                                         ses->iv.length,
539                                         ses->digest_length);
540                 else
541                         shared_desc_len = cnstr_shdsc_gcm_decap(
542                                         cdb->sh_desc, true, swap, SHR_NEVER,
543                                         &alginfo,
544                                         ses->iv.length,
545                                         ses->digest_length);
546                 break;
547         case DPAA_SEC_CIPHER_HASH:
548                 alginfo_c.key = (size_t)ses->cipher_key.data;
549                 alginfo_c.keylen = ses->cipher_key.length;
550                 alginfo_c.key_enc_flags = 0;
551                 alginfo_c.key_type = RTA_DATA_IMM;
552                 alginfo_c.algtype = ses->cipher_key.alg;
553                 alginfo_c.algmode = ses->cipher_key.algmode;
554
555                 alginfo_a.key = (size_t)ses->auth_key.data;
556                 alginfo_a.keylen = ses->auth_key.length;
557                 alginfo_a.key_enc_flags = 0;
558                 alginfo_a.key_type = RTA_DATA_IMM;
559                 alginfo_a.algtype = ses->auth_key.alg;
560                 alginfo_a.algmode = ses->auth_key.algmode;
561
562                 cdb->sh_desc[0] = alginfo_c.keylen;
563                 cdb->sh_desc[1] = alginfo_a.keylen;
564                 err = rta_inline_query(IPSEC_AUTH_VAR_AES_DEC_BASE_DESC_LEN,
565                                        DESC_JOB_IO_LEN,
566                                        (unsigned int *)cdb->sh_desc,
567                                        &cdb->sh_desc[2], 2);
568
569                 if (err < 0) {
570                         DPAA_SEC_ERR("Crypto: Incorrect key lengths");
571                         return err;
572                 }
573                 if (cdb->sh_desc[2] & 1)
574                         alginfo_c.key_type = RTA_DATA_IMM;
575                 else {
576                         alginfo_c.key = (size_t)rte_dpaa_mem_vtop(
577                                                 (void *)(size_t)alginfo_c.key);
578                         alginfo_c.key_type = RTA_DATA_PTR;
579                 }
580                 if (cdb->sh_desc[2] & (1<<1))
581                         alginfo_a.key_type = RTA_DATA_IMM;
582                 else {
583                         alginfo_a.key = (size_t)rte_dpaa_mem_vtop(
584                                                 (void *)(size_t)alginfo_a.key);
585                         alginfo_a.key_type = RTA_DATA_PTR;
586                 }
587                 cdb->sh_desc[0] = 0;
588                 cdb->sh_desc[1] = 0;
589                 cdb->sh_desc[2] = 0;
590                 /* Auth_only_len is set as 0 here and it will be
591                  * overwritten in fd for each packet.
592                  */
593                 shared_desc_len = cnstr_shdsc_authenc(cdb->sh_desc,
594                                 true, swap, SHR_SERIAL, &alginfo_c, &alginfo_a,
595                                 ses->iv.length,
596                                 ses->digest_length, ses->dir);
597                 break;
598         case DPAA_SEC_HASH_CIPHER:
599         default:
600                 DPAA_SEC_ERR("error: Unsupported session");
601                 return -ENOTSUP;
602         }
603
604         if (shared_desc_len < 0) {
605                 DPAA_SEC_ERR("error in preparing command block");
606                 return shared_desc_len;
607         }
608
609         cdb->sh_hdr.hi.field.idlen = shared_desc_len;
610         cdb->sh_hdr.hi.word = rte_cpu_to_be_32(cdb->sh_hdr.hi.word);
611         cdb->sh_hdr.lo.word = rte_cpu_to_be_32(cdb->sh_hdr.lo.word);
612
613         return 0;
614 }
615
616 /* qp is lockless, should be accessed by only one thread */
617 static int
618 dpaa_sec_deq(struct dpaa_sec_qp *qp, struct rte_crypto_op **ops, int nb_ops)
619 {
620         struct qman_fq *fq;
621         unsigned int pkts = 0;
622         int num_rx_bufs, ret;
623         struct qm_dqrr_entry *dq;
624         uint32_t vdqcr_flags = 0;
625
626         fq = &qp->outq;
627         /*
628          * Until request for four buffers, we provide exact number of buffers.
629          * Otherwise we do not set the QM_VDQCR_EXACT flag.
630          * Not setting QM_VDQCR_EXACT flag can provide two more buffers than
631          * requested, so we request two less in this case.
632          */
633         if (nb_ops < 4) {
634                 vdqcr_flags = QM_VDQCR_EXACT;
635                 num_rx_bufs = nb_ops;
636         } else {
637                 num_rx_bufs = nb_ops > DPAA_MAX_DEQUEUE_NUM_FRAMES ?
638                         (DPAA_MAX_DEQUEUE_NUM_FRAMES - 2) : (nb_ops - 2);
639         }
640         ret = qman_set_vdq(fq, num_rx_bufs, vdqcr_flags);
641         if (ret)
642                 return 0;
643
644         do {
645                 const struct qm_fd *fd;
646                 struct dpaa_sec_job *job;
647                 struct dpaa_sec_op_ctx *ctx;
648                 struct rte_crypto_op *op;
649
650                 dq = qman_dequeue(fq);
651                 if (!dq)
652                         continue;
653
654                 fd = &dq->fd;
655                 /* sg is embedded in an op ctx,
656                  * sg[0] is for output
657                  * sg[1] for input
658                  */
659                 job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
660
661                 ctx = container_of(job, struct dpaa_sec_op_ctx, job);
662                 ctx->fd_status = fd->status;
663                 op = ctx->op;
664                 if (op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
665                         struct qm_sg_entry *sg_out;
666                         uint32_t len;
667                         struct rte_mbuf *mbuf = (op->sym->m_dst == NULL) ?
668                                                 op->sym->m_src : op->sym->m_dst;
669
670                         sg_out = &job->sg[0];
671                         hw_sg_to_cpu(sg_out);
672                         len = sg_out->length;
673                         mbuf->pkt_len = len;
674                         while (mbuf->next != NULL) {
675                                 len -= mbuf->data_len;
676                                 mbuf = mbuf->next;
677                         }
678                         mbuf->data_len = len;
679                 }
680                 if (!ctx->fd_status) {
681                         op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
682                 } else {
683                         DPAA_SEC_DP_WARN("SEC return err:0x%x", ctx->fd_status);
684                         op->status = RTE_CRYPTO_OP_STATUS_ERROR;
685                 }
686                 ops[pkts++] = op;
687
688                 /* report op status to sym->op and then free the ctx memeory */
689                 rte_mempool_put(ctx->ctx_pool, (void *)ctx);
690
691                 qman_dqrr_consume(fq, dq);
692         } while (fq->flags & QMAN_FQ_STATE_VDQCR);
693
694         return pkts;
695 }
696
697 static inline struct dpaa_sec_job *
698 build_auth_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
699 {
700         struct rte_crypto_sym_op *sym = op->sym;
701         struct rte_mbuf *mbuf = sym->m_src;
702         struct dpaa_sec_job *cf;
703         struct dpaa_sec_op_ctx *ctx;
704         struct qm_sg_entry *sg, *out_sg, *in_sg;
705         phys_addr_t start_addr;
706         uint8_t *old_digest, extra_segs;
707         int data_len, data_offset;
708
709         data_len = sym->auth.data.length;
710         data_offset = sym->auth.data.offset;
711
712         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
713             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
714                 if ((data_len & 7) || (data_offset & 7)) {
715                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
716                         return NULL;
717                 }
718
719                 data_len = data_len >> 3;
720                 data_offset = data_offset >> 3;
721         }
722
723         if (is_decode(ses))
724                 extra_segs = 3;
725         else
726                 extra_segs = 2;
727
728         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
729                 DPAA_SEC_DP_ERR("Auth: Max sec segs supported is %d",
730                                 MAX_SG_ENTRIES);
731                 return NULL;
732         }
733         ctx = dpaa_sec_alloc_ctx(ses, mbuf->nb_segs + extra_segs);
734         if (!ctx)
735                 return NULL;
736
737         cf = &ctx->job;
738         ctx->op = op;
739         old_digest = ctx->digest;
740
741         /* output */
742         out_sg = &cf->sg[0];
743         qm_sg_entry_set64(out_sg, sym->auth.digest.phys_addr);
744         out_sg->length = ses->digest_length;
745         cpu_to_hw_sg(out_sg);
746
747         /* input */
748         in_sg = &cf->sg[1];
749         /* need to extend the input to a compound frame */
750         in_sg->extension = 1;
751         in_sg->final = 1;
752         in_sg->length = data_len;
753         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
754
755         /* 1st seg */
756         sg = in_sg + 1;
757
758         if (ses->iv.length) {
759                 uint8_t *iv_ptr;
760
761                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
762                                                    ses->iv.offset);
763
764                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
765                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
766                         sg->length = 12;
767                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
768                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
769                         sg->length = 8;
770                 } else {
771                         sg->length = ses->iv.length;
772                 }
773                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
774                 in_sg->length += sg->length;
775                 cpu_to_hw_sg(sg);
776                 sg++;
777         }
778
779         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
780         sg->offset = data_offset;
781
782         if (data_len <= (mbuf->data_len - data_offset)) {
783                 sg->length = data_len;
784         } else {
785                 sg->length = mbuf->data_len - data_offset;
786
787                 /* remaining i/p segs */
788                 while ((data_len = data_len - sg->length) &&
789                        (mbuf = mbuf->next)) {
790                         cpu_to_hw_sg(sg);
791                         sg++;
792                         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
793                         if (data_len > mbuf->data_len)
794                                 sg->length = mbuf->data_len;
795                         else
796                                 sg->length = data_len;
797                 }
798         }
799
800         if (is_decode(ses)) {
801                 /* Digest verification case */
802                 cpu_to_hw_sg(sg);
803                 sg++;
804                 rte_memcpy(old_digest, sym->auth.digest.data,
805                                 ses->digest_length);
806                 start_addr = rte_dpaa_mem_vtop(old_digest);
807                 qm_sg_entry_set64(sg, start_addr);
808                 sg->length = ses->digest_length;
809                 in_sg->length += ses->digest_length;
810         }
811         sg->final = 1;
812         cpu_to_hw_sg(sg);
813         cpu_to_hw_sg(in_sg);
814
815         return cf;
816 }
817
818 /**
819  * packet looks like:
820  *              |<----data_len------->|
821  *    |ip_header|ah_header|icv|payload|
822  *              ^
823  *              |
824  *         mbuf->pkt.data
825  */
826 static inline struct dpaa_sec_job *
827 build_auth_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
828 {
829         struct rte_crypto_sym_op *sym = op->sym;
830         struct rte_mbuf *mbuf = sym->m_src;
831         struct dpaa_sec_job *cf;
832         struct dpaa_sec_op_ctx *ctx;
833         struct qm_sg_entry *sg, *in_sg;
834         rte_iova_t start_addr;
835         uint8_t *old_digest;
836         int data_len, data_offset;
837
838         data_len = sym->auth.data.length;
839         data_offset = sym->auth.data.offset;
840
841         if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2 ||
842             ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
843                 if ((data_len & 7) || (data_offset & 7)) {
844                         DPAA_SEC_ERR("AUTH: len/offset must be full bytes");
845                         return NULL;
846                 }
847
848                 data_len = data_len >> 3;
849                 data_offset = data_offset >> 3;
850         }
851
852         ctx = dpaa_sec_alloc_ctx(ses, 4);
853         if (!ctx)
854                 return NULL;
855
856         cf = &ctx->job;
857         ctx->op = op;
858         old_digest = ctx->digest;
859
860         start_addr = rte_pktmbuf_iova(mbuf);
861         /* output */
862         sg = &cf->sg[0];
863         qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
864         sg->length = ses->digest_length;
865         cpu_to_hw_sg(sg);
866
867         /* input */
868         in_sg = &cf->sg[1];
869         /* need to extend the input to a compound frame */
870         in_sg->extension = 1;
871         in_sg->final = 1;
872         in_sg->length = data_len;
873         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
874         sg = &cf->sg[2];
875
876         if (ses->iv.length) {
877                 uint8_t *iv_ptr;
878
879                 iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
880                                                    ses->iv.offset);
881
882                 if (ses->auth_alg == RTE_CRYPTO_AUTH_SNOW3G_UIA2) {
883                         iv_ptr = conv_to_snow_f9_iv(iv_ptr);
884                         sg->length = 12;
885                 } else if (ses->auth_alg == RTE_CRYPTO_AUTH_ZUC_EIA3) {
886                         iv_ptr = conv_to_zuc_eia_iv(iv_ptr);
887                         sg->length = 8;
888                 } else {
889                         sg->length = ses->iv.length;
890                 }
891                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(iv_ptr));
892                 in_sg->length += sg->length;
893                 cpu_to_hw_sg(sg);
894                 sg++;
895         }
896
897         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
898         sg->offset = data_offset;
899         sg->length = data_len;
900
901         if (is_decode(ses)) {
902                 /* Digest verification case */
903                 cpu_to_hw_sg(sg);
904                 /* hash result or digest, save digest first */
905                 rte_memcpy(old_digest, sym->auth.digest.data,
906                                 ses->digest_length);
907                 /* let's check digest by hw */
908                 start_addr = rte_dpaa_mem_vtop(old_digest);
909                 sg++;
910                 qm_sg_entry_set64(sg, start_addr);
911                 sg->length = ses->digest_length;
912                 in_sg->length += ses->digest_length;
913         }
914         sg->final = 1;
915         cpu_to_hw_sg(sg);
916         cpu_to_hw_sg(in_sg);
917
918         return cf;
919 }
920
921 static inline struct dpaa_sec_job *
922 build_cipher_only_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
923 {
924         struct rte_crypto_sym_op *sym = op->sym;
925         struct dpaa_sec_job *cf;
926         struct dpaa_sec_op_ctx *ctx;
927         struct qm_sg_entry *sg, *out_sg, *in_sg;
928         struct rte_mbuf *mbuf;
929         uint8_t req_segs;
930         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
931                         ses->iv.offset);
932         int data_len, data_offset;
933
934         data_len = sym->cipher.data.length;
935         data_offset = sym->cipher.data.offset;
936
937         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
938                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
939                 if ((data_len & 7) || (data_offset & 7)) {
940                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
941                         return NULL;
942                 }
943
944                 data_len = data_len >> 3;
945                 data_offset = data_offset >> 3;
946         }
947
948         if (sym->m_dst) {
949                 mbuf = sym->m_dst;
950                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 3;
951         } else {
952                 mbuf = sym->m_src;
953                 req_segs = mbuf->nb_segs * 2 + 3;
954         }
955         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
956                 DPAA_SEC_DP_ERR("Cipher: Max sec segs supported is %d",
957                                 MAX_SG_ENTRIES);
958                 return NULL;
959         }
960
961         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
962         if (!ctx)
963                 return NULL;
964
965         cf = &ctx->job;
966         ctx->op = op;
967
968         /* output */
969         out_sg = &cf->sg[0];
970         out_sg->extension = 1;
971         out_sg->length = data_len;
972         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
973         cpu_to_hw_sg(out_sg);
974
975         /* 1st seg */
976         sg = &cf->sg[2];
977         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
978         sg->length = mbuf->data_len - data_offset;
979         sg->offset = data_offset;
980
981         /* Successive segs */
982         mbuf = mbuf->next;
983         while (mbuf) {
984                 cpu_to_hw_sg(sg);
985                 sg++;
986                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
987                 sg->length = mbuf->data_len;
988                 mbuf = mbuf->next;
989         }
990         sg->final = 1;
991         cpu_to_hw_sg(sg);
992
993         /* input */
994         mbuf = sym->m_src;
995         in_sg = &cf->sg[1];
996         in_sg->extension = 1;
997         in_sg->final = 1;
998         in_sg->length = data_len + ses->iv.length;
999
1000         sg++;
1001         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1002         cpu_to_hw_sg(in_sg);
1003
1004         /* IV */
1005         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1006         sg->length = ses->iv.length;
1007         cpu_to_hw_sg(sg);
1008
1009         /* 1st seg */
1010         sg++;
1011         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1012         sg->length = mbuf->data_len - data_offset;
1013         sg->offset = data_offset;
1014
1015         /* Successive segs */
1016         mbuf = mbuf->next;
1017         while (mbuf) {
1018                 cpu_to_hw_sg(sg);
1019                 sg++;
1020                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1021                 sg->length = mbuf->data_len;
1022                 mbuf = mbuf->next;
1023         }
1024         sg->final = 1;
1025         cpu_to_hw_sg(sg);
1026
1027         return cf;
1028 }
1029
1030 static inline struct dpaa_sec_job *
1031 build_cipher_only(struct rte_crypto_op *op, dpaa_sec_session *ses)
1032 {
1033         struct rte_crypto_sym_op *sym = op->sym;
1034         struct dpaa_sec_job *cf;
1035         struct dpaa_sec_op_ctx *ctx;
1036         struct qm_sg_entry *sg;
1037         rte_iova_t src_start_addr, dst_start_addr;
1038         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1039                         ses->iv.offset);
1040         int data_len, data_offset;
1041
1042         data_len = sym->cipher.data.length;
1043         data_offset = sym->cipher.data.offset;
1044
1045         if (ses->cipher_alg == RTE_CRYPTO_CIPHER_SNOW3G_UEA2 ||
1046                 ses->cipher_alg == RTE_CRYPTO_CIPHER_ZUC_EEA3) {
1047                 if ((data_len & 7) || (data_offset & 7)) {
1048                         DPAA_SEC_ERR("CIPHER: len/offset must be full bytes");
1049                         return NULL;
1050                 }
1051
1052                 data_len = data_len >> 3;
1053                 data_offset = data_offset >> 3;
1054         }
1055
1056         ctx = dpaa_sec_alloc_ctx(ses, 4);
1057         if (!ctx)
1058                 return NULL;
1059
1060         cf = &ctx->job;
1061         ctx->op = op;
1062
1063         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1064
1065         if (sym->m_dst)
1066                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1067         else
1068                 dst_start_addr = src_start_addr;
1069
1070         /* output */
1071         sg = &cf->sg[0];
1072         qm_sg_entry_set64(sg, dst_start_addr + data_offset);
1073         sg->length = data_len + ses->iv.length;
1074         cpu_to_hw_sg(sg);
1075
1076         /* input */
1077         sg = &cf->sg[1];
1078
1079         /* need to extend the input to a compound frame */
1080         sg->extension = 1;
1081         sg->final = 1;
1082         sg->length = data_len + ses->iv.length;
1083         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1084         cpu_to_hw_sg(sg);
1085
1086         sg = &cf->sg[2];
1087         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1088         sg->length = ses->iv.length;
1089         cpu_to_hw_sg(sg);
1090
1091         sg++;
1092         qm_sg_entry_set64(sg, src_start_addr + data_offset);
1093         sg->length = data_len;
1094         sg->final = 1;
1095         cpu_to_hw_sg(sg);
1096
1097         return cf;
1098 }
1099
1100 static inline struct dpaa_sec_job *
1101 build_cipher_auth_gcm_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1102 {
1103         struct rte_crypto_sym_op *sym = op->sym;
1104         struct dpaa_sec_job *cf;
1105         struct dpaa_sec_op_ctx *ctx;
1106         struct qm_sg_entry *sg, *out_sg, *in_sg;
1107         struct rte_mbuf *mbuf;
1108         uint8_t req_segs;
1109         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1110                         ses->iv.offset);
1111
1112         if (sym->m_dst) {
1113                 mbuf = sym->m_dst;
1114                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1115         } else {
1116                 mbuf = sym->m_src;
1117                 req_segs = mbuf->nb_segs * 2 + 4;
1118         }
1119
1120         if (ses->auth_only_len)
1121                 req_segs++;
1122
1123         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1124                 DPAA_SEC_DP_ERR("AEAD: Max sec segs supported is %d",
1125                                 MAX_SG_ENTRIES);
1126                 return NULL;
1127         }
1128
1129         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1130         if (!ctx)
1131                 return NULL;
1132
1133         cf = &ctx->job;
1134         ctx->op = op;
1135
1136         rte_prefetch0(cf->sg);
1137
1138         /* output */
1139         out_sg = &cf->sg[0];
1140         out_sg->extension = 1;
1141         if (is_encode(ses))
1142                 out_sg->length = sym->aead.data.length + ses->digest_length;
1143         else
1144                 out_sg->length = sym->aead.data.length;
1145
1146         /* output sg entries */
1147         sg = &cf->sg[2];
1148         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1149         cpu_to_hw_sg(out_sg);
1150
1151         /* 1st seg */
1152         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1153         sg->length = mbuf->data_len - sym->aead.data.offset;
1154         sg->offset = sym->aead.data.offset;
1155
1156         /* Successive segs */
1157         mbuf = mbuf->next;
1158         while (mbuf) {
1159                 cpu_to_hw_sg(sg);
1160                 sg++;
1161                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1162                 sg->length = mbuf->data_len;
1163                 mbuf = mbuf->next;
1164         }
1165         sg->length -= ses->digest_length;
1166
1167         if (is_encode(ses)) {
1168                 cpu_to_hw_sg(sg);
1169                 /* set auth output */
1170                 sg++;
1171                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1172                 sg->length = ses->digest_length;
1173         }
1174         sg->final = 1;
1175         cpu_to_hw_sg(sg);
1176
1177         /* input */
1178         mbuf = sym->m_src;
1179         in_sg = &cf->sg[1];
1180         in_sg->extension = 1;
1181         in_sg->final = 1;
1182         if (is_encode(ses))
1183                 in_sg->length = ses->iv.length + sym->aead.data.length
1184                                                         + ses->auth_only_len;
1185         else
1186                 in_sg->length = ses->iv.length + sym->aead.data.length
1187                                 + ses->auth_only_len + ses->digest_length;
1188
1189         /* input sg entries */
1190         sg++;
1191         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1192         cpu_to_hw_sg(in_sg);
1193
1194         /* 1st seg IV */
1195         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1196         sg->length = ses->iv.length;
1197         cpu_to_hw_sg(sg);
1198
1199         /* 2nd seg auth only */
1200         if (ses->auth_only_len) {
1201                 sg++;
1202                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(sym->aead.aad.data));
1203                 sg->length = ses->auth_only_len;
1204                 cpu_to_hw_sg(sg);
1205         }
1206
1207         /* 3rd seg */
1208         sg++;
1209         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1210         sg->length = mbuf->data_len - sym->aead.data.offset;
1211         sg->offset = sym->aead.data.offset;
1212
1213         /* Successive segs */
1214         mbuf = mbuf->next;
1215         while (mbuf) {
1216                 cpu_to_hw_sg(sg);
1217                 sg++;
1218                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1219                 sg->length = mbuf->data_len;
1220                 mbuf = mbuf->next;
1221         }
1222
1223         if (is_decode(ses)) {
1224                 cpu_to_hw_sg(sg);
1225                 sg++;
1226                 memcpy(ctx->digest, sym->aead.digest.data,
1227                         ses->digest_length);
1228                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1229                 sg->length = ses->digest_length;
1230         }
1231         sg->final = 1;
1232         cpu_to_hw_sg(sg);
1233
1234         return cf;
1235 }
1236
1237 static inline struct dpaa_sec_job *
1238 build_cipher_auth_gcm(struct rte_crypto_op *op, dpaa_sec_session *ses)
1239 {
1240         struct rte_crypto_sym_op *sym = op->sym;
1241         struct dpaa_sec_job *cf;
1242         struct dpaa_sec_op_ctx *ctx;
1243         struct qm_sg_entry *sg;
1244         uint32_t length = 0;
1245         rte_iova_t src_start_addr, dst_start_addr;
1246         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1247                         ses->iv.offset);
1248
1249         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1250
1251         if (sym->m_dst)
1252                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1253         else
1254                 dst_start_addr = src_start_addr;
1255
1256         ctx = dpaa_sec_alloc_ctx(ses, 7);
1257         if (!ctx)
1258                 return NULL;
1259
1260         cf = &ctx->job;
1261         ctx->op = op;
1262
1263         /* input */
1264         rte_prefetch0(cf->sg);
1265         sg = &cf->sg[2];
1266         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1267         if (is_encode(ses)) {
1268                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1269                 sg->length = ses->iv.length;
1270                 length += sg->length;
1271                 cpu_to_hw_sg(sg);
1272
1273                 sg++;
1274                 if (ses->auth_only_len) {
1275                         qm_sg_entry_set64(sg,
1276                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1277                         sg->length = ses->auth_only_len;
1278                         length += sg->length;
1279                         cpu_to_hw_sg(sg);
1280                         sg++;
1281                 }
1282                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1283                 sg->length = sym->aead.data.length;
1284                 length += sg->length;
1285                 sg->final = 1;
1286                 cpu_to_hw_sg(sg);
1287         } else {
1288                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1289                 sg->length = ses->iv.length;
1290                 length += sg->length;
1291                 cpu_to_hw_sg(sg);
1292
1293                 sg++;
1294                 if (ses->auth_only_len) {
1295                         qm_sg_entry_set64(sg,
1296                                           rte_dpaa_mem_vtop(sym->aead.aad.data));
1297                         sg->length = ses->auth_only_len;
1298                         length += sg->length;
1299                         cpu_to_hw_sg(sg);
1300                         sg++;
1301                 }
1302                 qm_sg_entry_set64(sg, src_start_addr + sym->aead.data.offset);
1303                 sg->length = sym->aead.data.length;
1304                 length += sg->length;
1305                 cpu_to_hw_sg(sg);
1306
1307                 memcpy(ctx->digest, sym->aead.digest.data,
1308                        ses->digest_length);
1309                 sg++;
1310
1311                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1312                 sg->length = ses->digest_length;
1313                 length += sg->length;
1314                 sg->final = 1;
1315                 cpu_to_hw_sg(sg);
1316         }
1317         /* input compound frame */
1318         cf->sg[1].length = length;
1319         cf->sg[1].extension = 1;
1320         cf->sg[1].final = 1;
1321         cpu_to_hw_sg(&cf->sg[1]);
1322
1323         /* output */
1324         sg++;
1325         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1326         qm_sg_entry_set64(sg,
1327                 dst_start_addr + sym->aead.data.offset);
1328         sg->length = sym->aead.data.length;
1329         length = sg->length;
1330         if (is_encode(ses)) {
1331                 cpu_to_hw_sg(sg);
1332                 /* set auth output */
1333                 sg++;
1334                 qm_sg_entry_set64(sg, sym->aead.digest.phys_addr);
1335                 sg->length = ses->digest_length;
1336                 length += sg->length;
1337         }
1338         sg->final = 1;
1339         cpu_to_hw_sg(sg);
1340
1341         /* output compound frame */
1342         cf->sg[0].length = length;
1343         cf->sg[0].extension = 1;
1344         cpu_to_hw_sg(&cf->sg[0]);
1345
1346         return cf;
1347 }
1348
1349 static inline struct dpaa_sec_job *
1350 build_cipher_auth_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1351 {
1352         struct rte_crypto_sym_op *sym = op->sym;
1353         struct dpaa_sec_job *cf;
1354         struct dpaa_sec_op_ctx *ctx;
1355         struct qm_sg_entry *sg, *out_sg, *in_sg;
1356         struct rte_mbuf *mbuf;
1357         uint8_t req_segs;
1358         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1359                         ses->iv.offset);
1360
1361         if (sym->m_dst) {
1362                 mbuf = sym->m_dst;
1363                 req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 4;
1364         } else {
1365                 mbuf = sym->m_src;
1366                 req_segs = mbuf->nb_segs * 2 + 4;
1367         }
1368
1369         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1370                 DPAA_SEC_DP_ERR("Cipher-Auth: Max sec segs supported is %d",
1371                                 MAX_SG_ENTRIES);
1372                 return NULL;
1373         }
1374
1375         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1376         if (!ctx)
1377                 return NULL;
1378
1379         cf = &ctx->job;
1380         ctx->op = op;
1381
1382         rte_prefetch0(cf->sg);
1383
1384         /* output */
1385         out_sg = &cf->sg[0];
1386         out_sg->extension = 1;
1387         if (is_encode(ses))
1388                 out_sg->length = sym->auth.data.length + ses->digest_length;
1389         else
1390                 out_sg->length = sym->auth.data.length;
1391
1392         /* output sg entries */
1393         sg = &cf->sg[2];
1394         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(sg));
1395         cpu_to_hw_sg(out_sg);
1396
1397         /* 1st seg */
1398         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1399         sg->length = mbuf->data_len - sym->auth.data.offset;
1400         sg->offset = sym->auth.data.offset;
1401
1402         /* Successive segs */
1403         mbuf = mbuf->next;
1404         while (mbuf) {
1405                 cpu_to_hw_sg(sg);
1406                 sg++;
1407                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1408                 sg->length = mbuf->data_len;
1409                 mbuf = mbuf->next;
1410         }
1411         sg->length -= ses->digest_length;
1412
1413         if (is_encode(ses)) {
1414                 cpu_to_hw_sg(sg);
1415                 /* set auth output */
1416                 sg++;
1417                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1418                 sg->length = ses->digest_length;
1419         }
1420         sg->final = 1;
1421         cpu_to_hw_sg(sg);
1422
1423         /* input */
1424         mbuf = sym->m_src;
1425         in_sg = &cf->sg[1];
1426         in_sg->extension = 1;
1427         in_sg->final = 1;
1428         if (is_encode(ses))
1429                 in_sg->length = ses->iv.length + sym->auth.data.length;
1430         else
1431                 in_sg->length = ses->iv.length + sym->auth.data.length
1432                                                 + ses->digest_length;
1433
1434         /* input sg entries */
1435         sg++;
1436         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1437         cpu_to_hw_sg(in_sg);
1438
1439         /* 1st seg IV */
1440         qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1441         sg->length = ses->iv.length;
1442         cpu_to_hw_sg(sg);
1443
1444         /* 2nd seg */
1445         sg++;
1446         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1447         sg->length = mbuf->data_len - sym->auth.data.offset;
1448         sg->offset = sym->auth.data.offset;
1449
1450         /* Successive segs */
1451         mbuf = mbuf->next;
1452         while (mbuf) {
1453                 cpu_to_hw_sg(sg);
1454                 sg++;
1455                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1456                 sg->length = mbuf->data_len;
1457                 mbuf = mbuf->next;
1458         }
1459
1460         sg->length -= ses->digest_length;
1461         if (is_decode(ses)) {
1462                 cpu_to_hw_sg(sg);
1463                 sg++;
1464                 memcpy(ctx->digest, sym->auth.digest.data,
1465                         ses->digest_length);
1466                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1467                 sg->length = ses->digest_length;
1468         }
1469         sg->final = 1;
1470         cpu_to_hw_sg(sg);
1471
1472         return cf;
1473 }
1474
1475 static inline struct dpaa_sec_job *
1476 build_cipher_auth(struct rte_crypto_op *op, dpaa_sec_session *ses)
1477 {
1478         struct rte_crypto_sym_op *sym = op->sym;
1479         struct dpaa_sec_job *cf;
1480         struct dpaa_sec_op_ctx *ctx;
1481         struct qm_sg_entry *sg;
1482         rte_iova_t src_start_addr, dst_start_addr;
1483         uint32_t length = 0;
1484         uint8_t *IV_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
1485                         ses->iv.offset);
1486
1487         src_start_addr = sym->m_src->buf_iova + sym->m_src->data_off;
1488         if (sym->m_dst)
1489                 dst_start_addr = sym->m_dst->buf_iova + sym->m_dst->data_off;
1490         else
1491                 dst_start_addr = src_start_addr;
1492
1493         ctx = dpaa_sec_alloc_ctx(ses, 7);
1494         if (!ctx)
1495                 return NULL;
1496
1497         cf = &ctx->job;
1498         ctx->op = op;
1499
1500         /* input */
1501         rte_prefetch0(cf->sg);
1502         sg = &cf->sg[2];
1503         qm_sg_entry_set64(&cf->sg[1], rte_dpaa_mem_vtop(sg));
1504         if (is_encode(ses)) {
1505                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1506                 sg->length = ses->iv.length;
1507                 length += sg->length;
1508                 cpu_to_hw_sg(sg);
1509
1510                 sg++;
1511                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1512                 sg->length = sym->auth.data.length;
1513                 length += sg->length;
1514                 sg->final = 1;
1515                 cpu_to_hw_sg(sg);
1516         } else {
1517                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(IV_ptr));
1518                 sg->length = ses->iv.length;
1519                 length += sg->length;
1520                 cpu_to_hw_sg(sg);
1521
1522                 sg++;
1523
1524                 qm_sg_entry_set64(sg, src_start_addr + sym->auth.data.offset);
1525                 sg->length = sym->auth.data.length;
1526                 length += sg->length;
1527                 cpu_to_hw_sg(sg);
1528
1529                 memcpy(ctx->digest, sym->auth.digest.data,
1530                        ses->digest_length);
1531                 sg++;
1532
1533                 qm_sg_entry_set64(sg, rte_dpaa_mem_vtop(ctx->digest));
1534                 sg->length = ses->digest_length;
1535                 length += sg->length;
1536                 sg->final = 1;
1537                 cpu_to_hw_sg(sg);
1538         }
1539         /* input compound frame */
1540         cf->sg[1].length = length;
1541         cf->sg[1].extension = 1;
1542         cf->sg[1].final = 1;
1543         cpu_to_hw_sg(&cf->sg[1]);
1544
1545         /* output */
1546         sg++;
1547         qm_sg_entry_set64(&cf->sg[0], rte_dpaa_mem_vtop(sg));
1548         qm_sg_entry_set64(sg, dst_start_addr + sym->cipher.data.offset);
1549         sg->length = sym->cipher.data.length;
1550         length = sg->length;
1551         if (is_encode(ses)) {
1552                 cpu_to_hw_sg(sg);
1553                 /* set auth output */
1554                 sg++;
1555                 qm_sg_entry_set64(sg, sym->auth.digest.phys_addr);
1556                 sg->length = ses->digest_length;
1557                 length += sg->length;
1558         }
1559         sg->final = 1;
1560         cpu_to_hw_sg(sg);
1561
1562         /* output compound frame */
1563         cf->sg[0].length = length;
1564         cf->sg[0].extension = 1;
1565         cpu_to_hw_sg(&cf->sg[0]);
1566
1567         return cf;
1568 }
1569
1570 #ifdef RTE_LIB_SECURITY
1571 static inline struct dpaa_sec_job *
1572 build_proto(struct rte_crypto_op *op, dpaa_sec_session *ses)
1573 {
1574         struct rte_crypto_sym_op *sym = op->sym;
1575         struct dpaa_sec_job *cf;
1576         struct dpaa_sec_op_ctx *ctx;
1577         struct qm_sg_entry *sg;
1578         phys_addr_t src_start_addr, dst_start_addr;
1579
1580         ctx = dpaa_sec_alloc_ctx(ses, 2);
1581         if (!ctx)
1582                 return NULL;
1583         cf = &ctx->job;
1584         ctx->op = op;
1585
1586         src_start_addr = rte_pktmbuf_iova(sym->m_src);
1587
1588         if (sym->m_dst)
1589                 dst_start_addr = rte_pktmbuf_iova(sym->m_dst);
1590         else
1591                 dst_start_addr = src_start_addr;
1592
1593         /* input */
1594         sg = &cf->sg[1];
1595         qm_sg_entry_set64(sg, src_start_addr);
1596         sg->length = sym->m_src->pkt_len;
1597         sg->final = 1;
1598         cpu_to_hw_sg(sg);
1599
1600         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1601         /* output */
1602         sg = &cf->sg[0];
1603         qm_sg_entry_set64(sg, dst_start_addr);
1604         sg->length = sym->m_src->buf_len - sym->m_src->data_off;
1605         cpu_to_hw_sg(sg);
1606
1607         return cf;
1608 }
1609
1610 static inline struct dpaa_sec_job *
1611 build_proto_sg(struct rte_crypto_op *op, dpaa_sec_session *ses)
1612 {
1613         struct rte_crypto_sym_op *sym = op->sym;
1614         struct dpaa_sec_job *cf;
1615         struct dpaa_sec_op_ctx *ctx;
1616         struct qm_sg_entry *sg, *out_sg, *in_sg;
1617         struct rte_mbuf *mbuf;
1618         uint8_t req_segs;
1619         uint32_t in_len = 0, out_len = 0;
1620
1621         if (sym->m_dst)
1622                 mbuf = sym->m_dst;
1623         else
1624                 mbuf = sym->m_src;
1625
1626         req_segs = mbuf->nb_segs + sym->m_src->nb_segs + 2;
1627         if (mbuf->nb_segs > MAX_SG_ENTRIES) {
1628                 DPAA_SEC_DP_ERR("Proto: Max sec segs supported is %d",
1629                                 MAX_SG_ENTRIES);
1630                 return NULL;
1631         }
1632
1633         ctx = dpaa_sec_alloc_ctx(ses, req_segs);
1634         if (!ctx)
1635                 return NULL;
1636         cf = &ctx->job;
1637         ctx->op = op;
1638         /* output */
1639         out_sg = &cf->sg[0];
1640         out_sg->extension = 1;
1641         qm_sg_entry_set64(out_sg, rte_dpaa_mem_vtop(&cf->sg[2]));
1642
1643         /* 1st seg */
1644         sg = &cf->sg[2];
1645         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1646         sg->offset = 0;
1647
1648         /* Successive segs */
1649         while (mbuf->next) {
1650                 sg->length = mbuf->data_len;
1651                 out_len += sg->length;
1652                 mbuf = mbuf->next;
1653                 cpu_to_hw_sg(sg);
1654                 sg++;
1655                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1656                 sg->offset = 0;
1657         }
1658         sg->length = mbuf->buf_len - mbuf->data_off;
1659         out_len += sg->length;
1660         sg->final = 1;
1661         cpu_to_hw_sg(sg);
1662
1663         out_sg->length = out_len;
1664         cpu_to_hw_sg(out_sg);
1665
1666         /* input */
1667         mbuf = sym->m_src;
1668         in_sg = &cf->sg[1];
1669         in_sg->extension = 1;
1670         in_sg->final = 1;
1671         in_len = mbuf->data_len;
1672
1673         sg++;
1674         qm_sg_entry_set64(in_sg, rte_dpaa_mem_vtop(sg));
1675
1676         /* 1st seg */
1677         qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1678         sg->length = mbuf->data_len;
1679         sg->offset = 0;
1680
1681         /* Successive segs */
1682         mbuf = mbuf->next;
1683         while (mbuf) {
1684                 cpu_to_hw_sg(sg);
1685                 sg++;
1686                 qm_sg_entry_set64(sg, rte_pktmbuf_iova(mbuf));
1687                 sg->length = mbuf->data_len;
1688                 sg->offset = 0;
1689                 in_len += sg->length;
1690                 mbuf = mbuf->next;
1691         }
1692         sg->final = 1;
1693         cpu_to_hw_sg(sg);
1694
1695         in_sg->length = in_len;
1696         cpu_to_hw_sg(in_sg);
1697
1698         sym->m_src->packet_type &= ~RTE_PTYPE_L4_MASK;
1699
1700         return cf;
1701 }
1702 #endif
1703
1704 static uint16_t
1705 dpaa_sec_enqueue_burst(void *qp, struct rte_crypto_op **ops,
1706                        uint16_t nb_ops)
1707 {
1708         /* Function to transmit the frames to given device and queuepair */
1709         uint32_t loop;
1710         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1711         uint16_t num_tx = 0;
1712         struct qm_fd fds[DPAA_SEC_BURST], *fd;
1713         uint32_t frames_to_send;
1714         struct rte_crypto_op *op;
1715         struct dpaa_sec_job *cf;
1716         dpaa_sec_session *ses;
1717         uint16_t auth_hdr_len, auth_tail_len;
1718         uint32_t index, flags[DPAA_SEC_BURST] = {0};
1719         struct qman_fq *inq[DPAA_SEC_BURST];
1720
1721         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1722                 if (rte_dpaa_portal_init((void *)0)) {
1723                         DPAA_SEC_ERR("Failure in affining portal");
1724                         return 0;
1725                 }
1726         }
1727
1728         while (nb_ops) {
1729                 frames_to_send = (nb_ops > DPAA_SEC_BURST) ?
1730                                 DPAA_SEC_BURST : nb_ops;
1731                 for (loop = 0; loop < frames_to_send; loop++) {
1732                         op = *(ops++);
1733                         if (*dpaa_seqn(op->sym->m_src) != 0) {
1734                                 index = *dpaa_seqn(op->sym->m_src) - 1;
1735                                 if (DPAA_PER_LCORE_DQRR_HELD & (1 << index)) {
1736                                         /* QM_EQCR_DCA_IDXMASK = 0x0f */
1737                                         flags[loop] = ((index & 0x0f) << 8);
1738                                         flags[loop] |= QMAN_ENQUEUE_FLAG_DCA;
1739                                         DPAA_PER_LCORE_DQRR_SIZE--;
1740                                         DPAA_PER_LCORE_DQRR_HELD &=
1741                                                                 ~(1 << index);
1742                                 }
1743                         }
1744
1745                         switch (op->sess_type) {
1746                         case RTE_CRYPTO_OP_WITH_SESSION:
1747                                 ses = (dpaa_sec_session *)
1748                                         get_sym_session_private_data(
1749                                                         op->sym->session,
1750                                                         cryptodev_driver_id);
1751                                 break;
1752 #ifdef RTE_LIB_SECURITY
1753                         case RTE_CRYPTO_OP_SECURITY_SESSION:
1754                                 ses = (dpaa_sec_session *)
1755                                         get_sec_session_private_data(
1756                                                         op->sym->sec_session);
1757                                 break;
1758 #endif
1759                         default:
1760                                 DPAA_SEC_DP_ERR(
1761                                         "sessionless crypto op not supported");
1762                                 frames_to_send = loop;
1763                                 nb_ops = loop;
1764                                 goto send_pkts;
1765                         }
1766
1767                         if (!ses) {
1768                                 DPAA_SEC_DP_ERR("session not available");
1769                                 frames_to_send = loop;
1770                                 nb_ops = loop;
1771                                 goto send_pkts;
1772                         }
1773
1774                         if (unlikely(!ses->qp[rte_lcore_id() % MAX_DPAA_CORES])) {
1775                                 if (dpaa_sec_attach_sess_q(qp, ses)) {
1776                                         frames_to_send = loop;
1777                                         nb_ops = loop;
1778                                         goto send_pkts;
1779                                 }
1780                         } else if (unlikely(ses->qp[rte_lcore_id() %
1781                                                 MAX_DPAA_CORES] != qp)) {
1782                                 DPAA_SEC_DP_ERR("Old:sess->qp = %p"
1783                                         " New qp = %p\n",
1784                                         ses->qp[rte_lcore_id() %
1785                                         MAX_DPAA_CORES], qp);
1786                                 frames_to_send = loop;
1787                                 nb_ops = loop;
1788                                 goto send_pkts;
1789                         }
1790
1791                         auth_hdr_len = op->sym->auth.data.length -
1792                                                 op->sym->cipher.data.length;
1793                         auth_tail_len = 0;
1794
1795                         if (rte_pktmbuf_is_contiguous(op->sym->m_src) &&
1796                                   ((op->sym->m_dst == NULL) ||
1797                                    rte_pktmbuf_is_contiguous(op->sym->m_dst))) {
1798                                 switch (ses->ctxt) {
1799 #ifdef RTE_LIB_SECURITY
1800                                 case DPAA_SEC_PDCP:
1801                                 case DPAA_SEC_IPSEC:
1802                                         cf = build_proto(op, ses);
1803                                         break;
1804 #endif
1805                                 case DPAA_SEC_AUTH:
1806                                         cf = build_auth_only(op, ses);
1807                                         break;
1808                                 case DPAA_SEC_CIPHER:
1809                                         cf = build_cipher_only(op, ses);
1810                                         break;
1811                                 case DPAA_SEC_AEAD:
1812                                         cf = build_cipher_auth_gcm(op, ses);
1813                                         auth_hdr_len = ses->auth_only_len;
1814                                         break;
1815                                 case DPAA_SEC_CIPHER_HASH:
1816                                         auth_hdr_len =
1817                                                 op->sym->cipher.data.offset
1818                                                 - op->sym->auth.data.offset;
1819                                         auth_tail_len =
1820                                                 op->sym->auth.data.length
1821                                                 - op->sym->cipher.data.length
1822                                                 - auth_hdr_len;
1823                                         cf = build_cipher_auth(op, ses);
1824                                         break;
1825                                 default:
1826                                         DPAA_SEC_DP_ERR("not supported ops");
1827                                         frames_to_send = loop;
1828                                         nb_ops = loop;
1829                                         goto send_pkts;
1830                                 }
1831                         } else {
1832                                 switch (ses->ctxt) {
1833 #ifdef RTE_LIB_SECURITY
1834                                 case DPAA_SEC_PDCP:
1835                                 case DPAA_SEC_IPSEC:
1836                                         cf = build_proto_sg(op, ses);
1837                                         break;
1838 #endif
1839                                 case DPAA_SEC_AUTH:
1840                                         cf = build_auth_only_sg(op, ses);
1841                                         break;
1842                                 case DPAA_SEC_CIPHER:
1843                                         cf = build_cipher_only_sg(op, ses);
1844                                         break;
1845                                 case DPAA_SEC_AEAD:
1846                                         cf = build_cipher_auth_gcm_sg(op, ses);
1847                                         auth_hdr_len = ses->auth_only_len;
1848                                         break;
1849                                 case DPAA_SEC_CIPHER_HASH:
1850                                         auth_hdr_len =
1851                                                 op->sym->cipher.data.offset
1852                                                 - op->sym->auth.data.offset;
1853                                         auth_tail_len =
1854                                                 op->sym->auth.data.length
1855                                                 - op->sym->cipher.data.length
1856                                                 - auth_hdr_len;
1857                                         cf = build_cipher_auth_sg(op, ses);
1858                                         break;
1859                                 default:
1860                                         DPAA_SEC_DP_ERR("not supported ops");
1861                                         frames_to_send = loop;
1862                                         nb_ops = loop;
1863                                         goto send_pkts;
1864                                 }
1865                         }
1866                         if (unlikely(!cf)) {
1867                                 frames_to_send = loop;
1868                                 nb_ops = loop;
1869                                 goto send_pkts;
1870                         }
1871
1872                         fd = &fds[loop];
1873                         inq[loop] = ses->inq[rte_lcore_id() % MAX_DPAA_CORES];
1874                         fd->opaque_addr = 0;
1875                         fd->cmd = 0;
1876                         qm_fd_addr_set64(fd, rte_dpaa_mem_vtop(cf->sg));
1877                         fd->_format1 = qm_fd_compound;
1878                         fd->length29 = 2 * sizeof(struct qm_sg_entry);
1879
1880                         /* Auth_only_len is set as 0 in descriptor and it is
1881                          * overwritten here in the fd.cmd which will update
1882                          * the DPOVRD reg.
1883                          */
1884                         if (auth_hdr_len || auth_tail_len) {
1885                                 fd->cmd = 0x80000000;
1886                                 fd->cmd |=
1887                                         ((auth_tail_len << 16) | auth_hdr_len);
1888                         }
1889
1890 #ifdef RTE_LIB_SECURITY
1891                         /* In case of PDCP, per packet HFN is stored in
1892                          * mbuf priv after sym_op.
1893                          */
1894                         if ((ses->ctxt == DPAA_SEC_PDCP) && ses->pdcp.hfn_ovd) {
1895                                 fd->cmd = 0x80000000 |
1896                                         *((uint32_t *)((uint8_t *)op +
1897                                         ses->pdcp.hfn_ovd_offset));
1898                                 DPAA_SEC_DP_DEBUG("Per packet HFN: %x, ovd:%u\n",
1899                                         *((uint32_t *)((uint8_t *)op +
1900                                         ses->pdcp.hfn_ovd_offset)),
1901                                         ses->pdcp.hfn_ovd);
1902                         }
1903 #endif
1904                 }
1905 send_pkts:
1906                 loop = 0;
1907                 while (loop < frames_to_send) {
1908                         loop += qman_enqueue_multi_fq(&inq[loop], &fds[loop],
1909                                         &flags[loop], frames_to_send - loop);
1910                 }
1911                 nb_ops -= frames_to_send;
1912                 num_tx += frames_to_send;
1913         }
1914
1915         dpaa_qp->tx_pkts += num_tx;
1916         dpaa_qp->tx_errs += nb_ops - num_tx;
1917
1918         return num_tx;
1919 }
1920
1921 static uint16_t
1922 dpaa_sec_dequeue_burst(void *qp, struct rte_crypto_op **ops,
1923                        uint16_t nb_ops)
1924 {
1925         uint16_t num_rx;
1926         struct dpaa_sec_qp *dpaa_qp = (struct dpaa_sec_qp *)qp;
1927
1928         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1929                 if (rte_dpaa_portal_init((void *)0)) {
1930                         DPAA_SEC_ERR("Failure in affining portal");
1931                         return 0;
1932                 }
1933         }
1934
1935         num_rx = dpaa_sec_deq(dpaa_qp, ops, nb_ops);
1936
1937         dpaa_qp->rx_pkts += num_rx;
1938         dpaa_qp->rx_errs += nb_ops - num_rx;
1939
1940         DPAA_SEC_DP_DEBUG("SEC Received %d Packets\n", num_rx);
1941
1942         return num_rx;
1943 }
1944
1945 /** Release queue pair */
1946 static int
1947 dpaa_sec_queue_pair_release(struct rte_cryptodev *dev,
1948                             uint16_t qp_id)
1949 {
1950         struct dpaa_sec_dev_private *internals;
1951         struct dpaa_sec_qp *qp = NULL;
1952
1953         PMD_INIT_FUNC_TRACE();
1954
1955         DPAA_SEC_DEBUG("dev =%p, queue =%d", dev, qp_id);
1956
1957         internals = dev->data->dev_private;
1958         if (qp_id >= internals->max_nb_queue_pairs) {
1959                 DPAA_SEC_ERR("Max supported qpid %d",
1960                              internals->max_nb_queue_pairs);
1961                 return -EINVAL;
1962         }
1963
1964         qp = &internals->qps[qp_id];
1965         rte_mempool_free(qp->ctx_pool);
1966         qp->internals = NULL;
1967         dev->data->queue_pairs[qp_id] = NULL;
1968
1969         return 0;
1970 }
1971
1972 /** Setup a queue pair */
1973 static int
1974 dpaa_sec_queue_pair_setup(struct rte_cryptodev *dev, uint16_t qp_id,
1975                 __rte_unused const struct rte_cryptodev_qp_conf *qp_conf,
1976                 __rte_unused int socket_id)
1977 {
1978         struct dpaa_sec_dev_private *internals;
1979         struct dpaa_sec_qp *qp = NULL;
1980         char str[20];
1981
1982         DPAA_SEC_DEBUG("dev =%p, queue =%d, conf =%p", dev, qp_id, qp_conf);
1983
1984         internals = dev->data->dev_private;
1985         if (qp_id >= internals->max_nb_queue_pairs) {
1986                 DPAA_SEC_ERR("Max supported qpid %d",
1987                              internals->max_nb_queue_pairs);
1988                 return -EINVAL;
1989         }
1990
1991         qp = &internals->qps[qp_id];
1992         qp->internals = internals;
1993         snprintf(str, sizeof(str), "ctx_pool_d%d_qp%d",
1994                         dev->data->dev_id, qp_id);
1995         if (!qp->ctx_pool) {
1996                 qp->ctx_pool = rte_mempool_create((const char *)str,
1997                                                         CTX_POOL_NUM_BUFS,
1998                                                         CTX_POOL_BUF_SIZE,
1999                                                         CTX_POOL_CACHE_SIZE, 0,
2000                                                         NULL, NULL, NULL, NULL,
2001                                                         SOCKET_ID_ANY, 0);
2002                 if (!qp->ctx_pool) {
2003                         DPAA_SEC_ERR("%s create failed\n", str);
2004                         return -ENOMEM;
2005                 }
2006         } else
2007                 DPAA_SEC_INFO("mempool already created for dev_id : %d, qp: %d",
2008                                 dev->data->dev_id, qp_id);
2009         dev->data->queue_pairs[qp_id] = qp;
2010
2011         return 0;
2012 }
2013
2014 /** Returns the size of session structure */
2015 static unsigned int
2016 dpaa_sec_sym_session_get_size(struct rte_cryptodev *dev __rte_unused)
2017 {
2018         PMD_INIT_FUNC_TRACE();
2019
2020         return sizeof(dpaa_sec_session);
2021 }
2022
2023 static int
2024 dpaa_sec_cipher_init(struct rte_cryptodev *dev __rte_unused,
2025                      struct rte_crypto_sym_xform *xform,
2026                      dpaa_sec_session *session)
2027 {
2028         session->ctxt = DPAA_SEC_CIPHER;
2029         session->cipher_alg = xform->cipher.algo;
2030         session->iv.length = xform->cipher.iv.length;
2031         session->iv.offset = xform->cipher.iv.offset;
2032         session->cipher_key.data = rte_zmalloc(NULL, xform->cipher.key.length,
2033                                                RTE_CACHE_LINE_SIZE);
2034         if (session->cipher_key.data == NULL && xform->cipher.key.length > 0) {
2035                 DPAA_SEC_ERR("No Memory for cipher key");
2036                 return -ENOMEM;
2037         }
2038         session->cipher_key.length = xform->cipher.key.length;
2039
2040         memcpy(session->cipher_key.data, xform->cipher.key.data,
2041                xform->cipher.key.length);
2042         switch (xform->cipher.algo) {
2043         case RTE_CRYPTO_CIPHER_AES_CBC:
2044                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2045                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2046                 break;
2047         case RTE_CRYPTO_CIPHER_DES_CBC:
2048                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2049                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2050                 break;
2051         case RTE_CRYPTO_CIPHER_3DES_CBC:
2052                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2053                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2054                 break;
2055         case RTE_CRYPTO_CIPHER_AES_CTR:
2056                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2057                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2058                 break;
2059         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2060                 session->cipher_key.alg = OP_ALG_ALGSEL_SNOW_F8;
2061                 break;
2062         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2063                 session->cipher_key.alg = OP_ALG_ALGSEL_ZUCE;
2064                 break;
2065         default:
2066                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2067                               xform->cipher.algo);
2068                 return -ENOTSUP;
2069         }
2070         session->dir = (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2071                         DIR_ENC : DIR_DEC;
2072
2073         return 0;
2074 }
2075
2076 static int
2077 dpaa_sec_auth_init(struct rte_cryptodev *dev __rte_unused,
2078                    struct rte_crypto_sym_xform *xform,
2079                    dpaa_sec_session *session)
2080 {
2081         session->ctxt = DPAA_SEC_AUTH;
2082         session->auth_alg = xform->auth.algo;
2083         session->auth_key.data = rte_zmalloc(NULL, xform->auth.key.length,
2084                                              RTE_CACHE_LINE_SIZE);
2085         if (session->auth_key.data == NULL && xform->auth.key.length > 0) {
2086                 DPAA_SEC_ERR("No Memory for auth key");
2087                 return -ENOMEM;
2088         }
2089         session->auth_key.length = xform->auth.key.length;
2090         session->digest_length = xform->auth.digest_length;
2091         if (session->cipher_alg == RTE_CRYPTO_CIPHER_NULL) {
2092                 session->iv.offset = xform->auth.iv.offset;
2093                 session->iv.length = xform->auth.iv.length;
2094         }
2095
2096         memcpy(session->auth_key.data, xform->auth.key.data,
2097                xform->auth.key.length);
2098
2099         switch (xform->auth.algo) {
2100         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2101                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2102                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2103                 break;
2104         case RTE_CRYPTO_AUTH_MD5_HMAC:
2105                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2106                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2107                 break;
2108         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2109                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2110                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2111                 break;
2112         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2113                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2114                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2115                 break;
2116         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2117                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2118                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2119                 break;
2120         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2121                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2122                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2123                 break;
2124         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2125                 session->auth_key.alg = OP_ALG_ALGSEL_SNOW_F9;
2126                 session->auth_key.algmode = OP_ALG_AAI_F9;
2127                 break;
2128         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2129                 session->auth_key.alg = OP_ALG_ALGSEL_ZUCA;
2130                 session->auth_key.algmode = OP_ALG_AAI_F9;
2131                 break;
2132         default:
2133                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2134                               xform->auth.algo);
2135                 return -ENOTSUP;
2136         }
2137
2138         session->dir = (xform->auth.op == RTE_CRYPTO_AUTH_OP_GENERATE) ?
2139                         DIR_ENC : DIR_DEC;
2140
2141         return 0;
2142 }
2143
2144 static int
2145 dpaa_sec_chain_init(struct rte_cryptodev *dev __rte_unused,
2146                    struct rte_crypto_sym_xform *xform,
2147                    dpaa_sec_session *session)
2148 {
2149
2150         struct rte_crypto_cipher_xform *cipher_xform;
2151         struct rte_crypto_auth_xform *auth_xform;
2152
2153         session->ctxt = DPAA_SEC_CIPHER_HASH;
2154         if (session->auth_cipher_text) {
2155                 cipher_xform = &xform->cipher;
2156                 auth_xform = &xform->next->auth;
2157         } else {
2158                 cipher_xform = &xform->next->cipher;
2159                 auth_xform = &xform->auth;
2160         }
2161
2162         /* Set IV parameters */
2163         session->iv.offset = cipher_xform->iv.offset;
2164         session->iv.length = cipher_xform->iv.length;
2165
2166         session->cipher_key.data = rte_zmalloc(NULL, cipher_xform->key.length,
2167                                                RTE_CACHE_LINE_SIZE);
2168         if (session->cipher_key.data == NULL && cipher_xform->key.length > 0) {
2169                 DPAA_SEC_ERR("No Memory for cipher key");
2170                 return -ENOMEM;
2171         }
2172         session->cipher_key.length = cipher_xform->key.length;
2173         session->auth_key.data = rte_zmalloc(NULL, auth_xform->key.length,
2174                                              RTE_CACHE_LINE_SIZE);
2175         if (session->auth_key.data == NULL && auth_xform->key.length > 0) {
2176                 DPAA_SEC_ERR("No Memory for auth key");
2177                 return -ENOMEM;
2178         }
2179         session->auth_key.length = auth_xform->key.length;
2180         memcpy(session->cipher_key.data, cipher_xform->key.data,
2181                cipher_xform->key.length);
2182         memcpy(session->auth_key.data, auth_xform->key.data,
2183                auth_xform->key.length);
2184
2185         session->digest_length = auth_xform->digest_length;
2186         session->auth_alg = auth_xform->algo;
2187
2188         switch (auth_xform->algo) {
2189         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2190                 session->auth_key.alg = OP_ALG_ALGSEL_SHA1;
2191                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2192                 break;
2193         case RTE_CRYPTO_AUTH_MD5_HMAC:
2194                 session->auth_key.alg = OP_ALG_ALGSEL_MD5;
2195                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2196                 break;
2197         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2198                 session->auth_key.alg = OP_ALG_ALGSEL_SHA224;
2199                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2200                 break;
2201         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2202                 session->auth_key.alg = OP_ALG_ALGSEL_SHA256;
2203                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2204                 break;
2205         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2206                 session->auth_key.alg = OP_ALG_ALGSEL_SHA384;
2207                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2208                 break;
2209         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2210                 session->auth_key.alg = OP_ALG_ALGSEL_SHA512;
2211                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2212                 break;
2213         default:
2214                 DPAA_SEC_ERR("Crypto: Unsupported Auth specified %u",
2215                               auth_xform->algo);
2216                 return -ENOTSUP;
2217         }
2218
2219         session->cipher_alg = cipher_xform->algo;
2220
2221         switch (cipher_xform->algo) {
2222         case RTE_CRYPTO_CIPHER_AES_CBC:
2223                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2224                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2225                 break;
2226         case RTE_CRYPTO_CIPHER_DES_CBC:
2227                 session->cipher_key.alg = OP_ALG_ALGSEL_DES;
2228                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2229                 break;
2230         case RTE_CRYPTO_CIPHER_3DES_CBC:
2231                 session->cipher_key.alg = OP_ALG_ALGSEL_3DES;
2232                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2233                 break;
2234         case RTE_CRYPTO_CIPHER_AES_CTR:
2235                 session->cipher_key.alg = OP_ALG_ALGSEL_AES;
2236                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2237                 break;
2238         default:
2239                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2240                               cipher_xform->algo);
2241                 return -ENOTSUP;
2242         }
2243         session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2244                                 DIR_ENC : DIR_DEC;
2245         return 0;
2246 }
2247
2248 static int
2249 dpaa_sec_aead_init(struct rte_cryptodev *dev __rte_unused,
2250                    struct rte_crypto_sym_xform *xform,
2251                    dpaa_sec_session *session)
2252 {
2253         session->aead_alg = xform->aead.algo;
2254         session->ctxt = DPAA_SEC_AEAD;
2255         session->iv.length = xform->aead.iv.length;
2256         session->iv.offset = xform->aead.iv.offset;
2257         session->auth_only_len = xform->aead.aad_length;
2258         session->aead_key.data = rte_zmalloc(NULL, xform->aead.key.length,
2259                                              RTE_CACHE_LINE_SIZE);
2260         if (session->aead_key.data == NULL && xform->aead.key.length > 0) {
2261                 DPAA_SEC_ERR("No Memory for aead key\n");
2262                 return -ENOMEM;
2263         }
2264         session->aead_key.length = xform->aead.key.length;
2265         session->digest_length = xform->aead.digest_length;
2266
2267         memcpy(session->aead_key.data, xform->aead.key.data,
2268                xform->aead.key.length);
2269
2270         switch (session->aead_alg) {
2271         case RTE_CRYPTO_AEAD_AES_GCM:
2272                 session->aead_key.alg = OP_ALG_ALGSEL_AES;
2273                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2274                 break;
2275         default:
2276                 DPAA_SEC_ERR("unsupported AEAD alg %d", session->aead_alg);
2277                 return -ENOTSUP;
2278         }
2279
2280         session->dir = (xform->aead.op == RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
2281                         DIR_ENC : DIR_DEC;
2282
2283         return 0;
2284 }
2285
2286 static struct qman_fq *
2287 dpaa_sec_attach_rxq(struct dpaa_sec_dev_private *qi)
2288 {
2289         unsigned int i;
2290
2291         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2292                 if (qi->inq_attach[i] == 0) {
2293                         qi->inq_attach[i] = 1;
2294                         return &qi->inq[i];
2295                 }
2296         }
2297         DPAA_SEC_WARN("All session in use %u", qi->max_nb_sessions);
2298
2299         return NULL;
2300 }
2301
2302 static int
2303 dpaa_sec_detach_rxq(struct dpaa_sec_dev_private *qi, struct qman_fq *fq)
2304 {
2305         unsigned int i;
2306
2307         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
2308                 if (&qi->inq[i] == fq) {
2309                         if (qman_retire_fq(fq, NULL) != 0)
2310                                 DPAA_SEC_DEBUG("Queue is not retired\n");
2311                         qman_oos_fq(fq);
2312                         qi->inq_attach[i] = 0;
2313                         return 0;
2314                 }
2315         }
2316         return -1;
2317 }
2318
2319 static int
2320 dpaa_sec_attach_sess_q(struct dpaa_sec_qp *qp, dpaa_sec_session *sess)
2321 {
2322         int ret;
2323
2324         sess->qp[rte_lcore_id() % MAX_DPAA_CORES] = qp;
2325         ret = dpaa_sec_prep_cdb(sess);
2326         if (ret) {
2327                 DPAA_SEC_ERR("Unable to prepare sec cdb");
2328                 return ret;
2329         }
2330         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
2331                 ret = rte_dpaa_portal_init((void *)0);
2332                 if (ret) {
2333                         DPAA_SEC_ERR("Failure in affining portal");
2334                         return ret;
2335                 }
2336         }
2337         ret = dpaa_sec_init_rx(sess->inq[rte_lcore_id() % MAX_DPAA_CORES],
2338                                rte_dpaa_mem_vtop(&sess->cdb),
2339                                qman_fq_fqid(&qp->outq));
2340         if (ret)
2341                 DPAA_SEC_ERR("Unable to init sec queue");
2342
2343         return ret;
2344 }
2345
2346 static inline void
2347 free_session_data(dpaa_sec_session *s)
2348 {
2349         if (is_aead(s))
2350                 rte_free(s->aead_key.data);
2351         else {
2352                 rte_free(s->auth_key.data);
2353                 rte_free(s->cipher_key.data);
2354         }
2355         memset(s, 0, sizeof(dpaa_sec_session));
2356 }
2357
2358 static int
2359 dpaa_sec_set_session_parameters(struct rte_cryptodev *dev,
2360                             struct rte_crypto_sym_xform *xform, void *sess)
2361 {
2362         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2363         dpaa_sec_session *session = sess;
2364         uint32_t i;
2365         int ret;
2366
2367         PMD_INIT_FUNC_TRACE();
2368
2369         if (unlikely(sess == NULL)) {
2370                 DPAA_SEC_ERR("invalid session struct");
2371                 return -EINVAL;
2372         }
2373         memset(session, 0, sizeof(dpaa_sec_session));
2374
2375         /* Default IV length = 0 */
2376         session->iv.length = 0;
2377
2378         /* Cipher Only */
2379         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER && xform->next == NULL) {
2380                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2381                 ret = dpaa_sec_cipher_init(dev, xform, session);
2382
2383         /* Authentication Only */
2384         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2385                    xform->next == NULL) {
2386                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2387                 session->ctxt = DPAA_SEC_AUTH;
2388                 ret = dpaa_sec_auth_init(dev, xform, session);
2389
2390         /* Cipher then Authenticate */
2391         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER &&
2392                    xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2393                 if (xform->cipher.op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
2394                         session->auth_cipher_text = 1;
2395                         if (xform->cipher.algo == RTE_CRYPTO_CIPHER_NULL)
2396                                 ret = dpaa_sec_auth_init(dev, xform, session);
2397                         else if (xform->next->auth.algo == RTE_CRYPTO_AUTH_NULL)
2398                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2399                         else
2400                                 ret = dpaa_sec_chain_init(dev, xform, session);
2401                 } else {
2402                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2403                         return -ENOTSUP;
2404                 }
2405         /* Authenticate then Cipher */
2406         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH &&
2407                    xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2408                 if (xform->next->cipher.op == RTE_CRYPTO_CIPHER_OP_DECRYPT) {
2409                         session->auth_cipher_text = 0;
2410                         if (xform->auth.algo == RTE_CRYPTO_AUTH_NULL)
2411                                 ret = dpaa_sec_cipher_init(dev, xform, session);
2412                         else if (xform->next->cipher.algo
2413                                         == RTE_CRYPTO_CIPHER_NULL)
2414                                 ret = dpaa_sec_auth_init(dev, xform, session);
2415                         else
2416                                 ret = dpaa_sec_chain_init(dev, xform, session);
2417                 } else {
2418                         DPAA_SEC_ERR("Not supported: Auth then Cipher");
2419                         return -ENOTSUP;
2420                 }
2421
2422         /* AEAD operation for AES-GCM kind of Algorithms */
2423         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD &&
2424                    xform->next == NULL) {
2425                 ret = dpaa_sec_aead_init(dev, xform, session);
2426
2427         } else {
2428                 DPAA_SEC_ERR("Invalid crypto type");
2429                 return -EINVAL;
2430         }
2431         if (ret) {
2432                 DPAA_SEC_ERR("unable to init session");
2433                 goto err1;
2434         }
2435
2436         rte_spinlock_lock(&internals->lock);
2437         for (i = 0; i < MAX_DPAA_CORES; i++) {
2438                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2439                 if (session->inq[i] == NULL) {
2440                         DPAA_SEC_ERR("unable to attach sec queue");
2441                         rte_spinlock_unlock(&internals->lock);
2442                         ret = -EBUSY;
2443                         goto err1;
2444                 }
2445         }
2446         rte_spinlock_unlock(&internals->lock);
2447
2448         return 0;
2449
2450 err1:
2451         free_session_data(session);
2452         return ret;
2453 }
2454
2455 static int
2456 dpaa_sec_sym_session_configure(struct rte_cryptodev *dev,
2457                 struct rte_crypto_sym_xform *xform,
2458                 struct rte_cryptodev_sym_session *sess,
2459                 struct rte_mempool *mempool)
2460 {
2461         void *sess_private_data;
2462         int ret;
2463
2464         PMD_INIT_FUNC_TRACE();
2465
2466         if (rte_mempool_get(mempool, &sess_private_data)) {
2467                 DPAA_SEC_ERR("Couldn't get object from session mempool");
2468                 return -ENOMEM;
2469         }
2470
2471         ret = dpaa_sec_set_session_parameters(dev, xform, sess_private_data);
2472         if (ret != 0) {
2473                 DPAA_SEC_ERR("failed to configure session parameters");
2474
2475                 /* Return session to mempool */
2476                 rte_mempool_put(mempool, sess_private_data);
2477                 return ret;
2478         }
2479
2480         set_sym_session_private_data(sess, dev->driver_id,
2481                         sess_private_data);
2482
2483
2484         return 0;
2485 }
2486
2487 static inline void
2488 free_session_memory(struct rte_cryptodev *dev, dpaa_sec_session *s)
2489 {
2490         struct dpaa_sec_dev_private *qi = dev->data->dev_private;
2491         struct rte_mempool *sess_mp = rte_mempool_from_obj((void *)s);
2492         uint8_t i;
2493
2494         for (i = 0; i < MAX_DPAA_CORES; i++) {
2495                 if (s->inq[i])
2496                         dpaa_sec_detach_rxq(qi, s->inq[i]);
2497                 s->inq[i] = NULL;
2498                 s->qp[i] = NULL;
2499         }
2500         free_session_data(s);
2501         rte_mempool_put(sess_mp, (void *)s);
2502 }
2503
2504 /** Clear the memory of session so it doesn't leave key material behind */
2505 static void
2506 dpaa_sec_sym_session_clear(struct rte_cryptodev *dev,
2507                 struct rte_cryptodev_sym_session *sess)
2508 {
2509         PMD_INIT_FUNC_TRACE();
2510         uint8_t index = dev->driver_id;
2511         void *sess_priv = get_sym_session_private_data(sess, index);
2512         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
2513
2514         if (sess_priv) {
2515                 free_session_memory(dev, s);
2516                 set_sym_session_private_data(sess, index, NULL);
2517         }
2518 }
2519
2520 #ifdef RTE_LIB_SECURITY
2521 static int
2522 dpaa_sec_ipsec_aead_init(struct rte_crypto_aead_xform *aead_xform,
2523                         struct rte_security_ipsec_xform *ipsec_xform,
2524                         dpaa_sec_session *session)
2525 {
2526         PMD_INIT_FUNC_TRACE();
2527
2528         session->aead_key.data = rte_zmalloc(NULL, aead_xform->key.length,
2529                                                RTE_CACHE_LINE_SIZE);
2530         if (session->aead_key.data == NULL && aead_xform->key.length > 0) {
2531                 DPAA_SEC_ERR("No Memory for aead key");
2532                 return -ENOMEM;
2533         }
2534         memcpy(session->aead_key.data, aead_xform->key.data,
2535                aead_xform->key.length);
2536
2537         session->digest_length = aead_xform->digest_length;
2538         session->aead_key.length = aead_xform->key.length;
2539
2540         switch (aead_xform->algo) {
2541         case RTE_CRYPTO_AEAD_AES_GCM:
2542                 switch (session->digest_length) {
2543                 case 8:
2544                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM8;
2545                         break;
2546                 case 12:
2547                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM12;
2548                         break;
2549                 case 16:
2550                         session->aead_key.alg = OP_PCL_IPSEC_AES_GCM16;
2551                         break;
2552                 default:
2553                         DPAA_SEC_ERR("Crypto: Undefined GCM digest %d",
2554                                      session->digest_length);
2555                         return -EINVAL;
2556                 }
2557                 if (session->dir == DIR_ENC) {
2558                         memcpy(session->encap_pdb.gcm.salt,
2559                                 (uint8_t *)&(ipsec_xform->salt), 4);
2560                 } else {
2561                         memcpy(session->decap_pdb.gcm.salt,
2562                                 (uint8_t *)&(ipsec_xform->salt), 4);
2563                 }
2564                 session->aead_key.algmode = OP_ALG_AAI_GCM;
2565                 session->aead_alg = RTE_CRYPTO_AEAD_AES_GCM;
2566                 break;
2567         default:
2568                 DPAA_SEC_ERR("Crypto: Undefined AEAD specified %u",
2569                               aead_xform->algo);
2570                 return -ENOTSUP;
2571         }
2572         return 0;
2573 }
2574
2575 static int
2576 dpaa_sec_ipsec_proto_init(struct rte_crypto_cipher_xform *cipher_xform,
2577         struct rte_crypto_auth_xform *auth_xform,
2578         struct rte_security_ipsec_xform *ipsec_xform,
2579         dpaa_sec_session *session)
2580 {
2581         if (cipher_xform) {
2582                 session->cipher_key.data = rte_zmalloc(NULL,
2583                                                        cipher_xform->key.length,
2584                                                        RTE_CACHE_LINE_SIZE);
2585                 if (session->cipher_key.data == NULL &&
2586                                 cipher_xform->key.length > 0) {
2587                         DPAA_SEC_ERR("No Memory for cipher key");
2588                         return -ENOMEM;
2589                 }
2590
2591                 session->cipher_key.length = cipher_xform->key.length;
2592                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2593                                 cipher_xform->key.length);
2594                 session->cipher_alg = cipher_xform->algo;
2595         } else {
2596                 session->cipher_key.data = NULL;
2597                 session->cipher_key.length = 0;
2598                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2599         }
2600
2601         if (auth_xform) {
2602                 session->auth_key.data = rte_zmalloc(NULL,
2603                                                 auth_xform->key.length,
2604                                                 RTE_CACHE_LINE_SIZE);
2605                 if (session->auth_key.data == NULL &&
2606                                 auth_xform->key.length > 0) {
2607                         DPAA_SEC_ERR("No Memory for auth key");
2608                         return -ENOMEM;
2609                 }
2610                 session->auth_key.length = auth_xform->key.length;
2611                 memcpy(session->auth_key.data, auth_xform->key.data,
2612                                 auth_xform->key.length);
2613                 session->auth_alg = auth_xform->algo;
2614                 session->digest_length = auth_xform->digest_length;
2615         } else {
2616                 session->auth_key.data = NULL;
2617                 session->auth_key.length = 0;
2618                 session->auth_alg = RTE_CRYPTO_AUTH_NULL;
2619         }
2620
2621         switch (session->auth_alg) {
2622         case RTE_CRYPTO_AUTH_SHA1_HMAC:
2623                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA1_96;
2624                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2625                 break;
2626         case RTE_CRYPTO_AUTH_MD5_HMAC:
2627                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_MD5_96;
2628                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2629                 break;
2630         case RTE_CRYPTO_AUTH_SHA256_HMAC:
2631                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_256_128;
2632                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2633                 if (session->digest_length != 16)
2634                         DPAA_SEC_WARN(
2635                         "+++Using sha256-hmac truncated len is non-standard,"
2636                         "it will not work with lookaside proto");
2637                 break;
2638         case RTE_CRYPTO_AUTH_SHA384_HMAC:
2639                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_384_192;
2640                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2641                 break;
2642         case RTE_CRYPTO_AUTH_SHA512_HMAC:
2643                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_SHA2_512_256;
2644                 session->auth_key.algmode = OP_ALG_AAI_HMAC;
2645                 break;
2646         case RTE_CRYPTO_AUTH_AES_CMAC:
2647                 session->auth_key.alg = OP_PCL_IPSEC_AES_CMAC_96;
2648                 break;
2649         case RTE_CRYPTO_AUTH_NULL:
2650                 session->auth_key.alg = OP_PCL_IPSEC_HMAC_NULL;
2651                 break;
2652         case RTE_CRYPTO_AUTH_SHA224_HMAC:
2653         case RTE_CRYPTO_AUTH_AES_XCBC_MAC:
2654         case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2655         case RTE_CRYPTO_AUTH_SHA1:
2656         case RTE_CRYPTO_AUTH_SHA256:
2657         case RTE_CRYPTO_AUTH_SHA512:
2658         case RTE_CRYPTO_AUTH_SHA224:
2659         case RTE_CRYPTO_AUTH_SHA384:
2660         case RTE_CRYPTO_AUTH_MD5:
2661         case RTE_CRYPTO_AUTH_AES_GMAC:
2662         case RTE_CRYPTO_AUTH_KASUMI_F9:
2663         case RTE_CRYPTO_AUTH_AES_CBC_MAC:
2664         case RTE_CRYPTO_AUTH_ZUC_EIA3:
2665                 DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2666                               session->auth_alg);
2667                 return -ENOTSUP;
2668         default:
2669                 DPAA_SEC_ERR("Crypto: Undefined Auth specified %u",
2670                               session->auth_alg);
2671                 return -ENOTSUP;
2672         }
2673
2674         switch (session->cipher_alg) {
2675         case RTE_CRYPTO_CIPHER_AES_CBC:
2676                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CBC;
2677                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2678                 break;
2679         case RTE_CRYPTO_CIPHER_DES_CBC:
2680                 session->cipher_key.alg = OP_PCL_IPSEC_DES;
2681                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2682                 break;
2683         case RTE_CRYPTO_CIPHER_3DES_CBC:
2684                 session->cipher_key.alg = OP_PCL_IPSEC_3DES;
2685                 session->cipher_key.algmode = OP_ALG_AAI_CBC;
2686                 break;
2687         case RTE_CRYPTO_CIPHER_AES_CTR:
2688                 session->cipher_key.alg = OP_PCL_IPSEC_AES_CTR;
2689                 session->cipher_key.algmode = OP_ALG_AAI_CTR;
2690                 if (session->dir == DIR_ENC) {
2691                         session->encap_pdb.ctr.ctr_initial = 0x00000001;
2692                         session->encap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2693                 } else {
2694                         session->decap_pdb.ctr.ctr_initial = 0x00000001;
2695                         session->decap_pdb.ctr.ctr_nonce = ipsec_xform->salt;
2696                 }
2697                 break;
2698         case RTE_CRYPTO_CIPHER_NULL:
2699                 session->cipher_key.alg = OP_PCL_IPSEC_NULL;
2700                 break;
2701         case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2702         case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2703         case RTE_CRYPTO_CIPHER_3DES_ECB:
2704         case RTE_CRYPTO_CIPHER_AES_ECB:
2705         case RTE_CRYPTO_CIPHER_KASUMI_F8:
2706                 DPAA_SEC_ERR("Crypto: Unsupported Cipher alg %u",
2707                               session->cipher_alg);
2708                 return -ENOTSUP;
2709         default:
2710                 DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2711                               session->cipher_alg);
2712                 return -ENOTSUP;
2713         }
2714
2715         return 0;
2716 }
2717
2718 static int
2719 dpaa_sec_set_ipsec_session(__rte_unused struct rte_cryptodev *dev,
2720                            struct rte_security_session_conf *conf,
2721                            void *sess)
2722 {
2723         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
2724         struct rte_security_ipsec_xform *ipsec_xform = &conf->ipsec;
2725         struct rte_crypto_auth_xform *auth_xform = NULL;
2726         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2727         struct rte_crypto_aead_xform *aead_xform = NULL;
2728         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2729         uint32_t i;
2730         int ret;
2731
2732         PMD_INIT_FUNC_TRACE();
2733
2734         memset(session, 0, sizeof(dpaa_sec_session));
2735         session->proto_alg = conf->protocol;
2736         session->ctxt = DPAA_SEC_IPSEC;
2737
2738         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS)
2739                 session->dir = DIR_ENC;
2740         else
2741                 session->dir = DIR_DEC;
2742
2743         if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2744                 cipher_xform = &conf->crypto_xform->cipher;
2745                 if (conf->crypto_xform->next)
2746                         auth_xform = &conf->crypto_xform->next->auth;
2747                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2748                                         ipsec_xform, session);
2749         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2750                 auth_xform = &conf->crypto_xform->auth;
2751                 if (conf->crypto_xform->next)
2752                         cipher_xform = &conf->crypto_xform->next->cipher;
2753                 ret = dpaa_sec_ipsec_proto_init(cipher_xform, auth_xform,
2754                                         ipsec_xform, session);
2755         } else if (conf->crypto_xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
2756                 aead_xform = &conf->crypto_xform->aead;
2757                 ret = dpaa_sec_ipsec_aead_init(aead_xform,
2758                                         ipsec_xform, session);
2759         } else {
2760                 DPAA_SEC_ERR("XFORM not specified");
2761                 ret = -EINVAL;
2762                 goto out;
2763         }
2764         if (ret) {
2765                 DPAA_SEC_ERR("Failed to process xform");
2766                 goto out;
2767         }
2768
2769         if (ipsec_xform->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS) {
2770                 if (ipsec_xform->tunnel.type ==
2771                                 RTE_SECURITY_IPSEC_TUNNEL_IPV4) {
2772                         session->ip4_hdr.ip_v = IPVERSION;
2773                         session->ip4_hdr.ip_hl = 5;
2774                         session->ip4_hdr.ip_len = rte_cpu_to_be_16(
2775                                                 sizeof(session->ip4_hdr));
2776                         session->ip4_hdr.ip_tos = ipsec_xform->tunnel.ipv4.dscp;
2777                         session->ip4_hdr.ip_id = 0;
2778                         session->ip4_hdr.ip_off = 0;
2779                         session->ip4_hdr.ip_ttl = ipsec_xform->tunnel.ipv4.ttl;
2780                         session->ip4_hdr.ip_p = (ipsec_xform->proto ==
2781                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2782                                         IPPROTO_ESP : IPPROTO_AH;
2783                         session->ip4_hdr.ip_sum = 0;
2784                         session->ip4_hdr.ip_src =
2785                                         ipsec_xform->tunnel.ipv4.src_ip;
2786                         session->ip4_hdr.ip_dst =
2787                                         ipsec_xform->tunnel.ipv4.dst_ip;
2788                         session->ip4_hdr.ip_sum = calc_chksum((uint16_t *)
2789                                                 (void *)&session->ip4_hdr,
2790                                                 sizeof(struct ip));
2791                         session->encap_pdb.ip_hdr_len = sizeof(struct ip);
2792                 } else if (ipsec_xform->tunnel.type ==
2793                                 RTE_SECURITY_IPSEC_TUNNEL_IPV6) {
2794                         session->ip6_hdr.vtc_flow = rte_cpu_to_be_32(
2795                                 DPAA_IPv6_DEFAULT_VTC_FLOW |
2796                                 ((ipsec_xform->tunnel.ipv6.dscp <<
2797                                         RTE_IPV6_HDR_TC_SHIFT) &
2798                                         RTE_IPV6_HDR_TC_MASK) |
2799                                 ((ipsec_xform->tunnel.ipv6.flabel <<
2800                                         RTE_IPV6_HDR_FL_SHIFT) &
2801                                         RTE_IPV6_HDR_FL_MASK));
2802                         /* Payload length will be updated by HW */
2803                         session->ip6_hdr.payload_len = 0;
2804                         session->ip6_hdr.hop_limits =
2805                                         ipsec_xform->tunnel.ipv6.hlimit;
2806                         session->ip6_hdr.proto = (ipsec_xform->proto ==
2807                                         RTE_SECURITY_IPSEC_SA_PROTO_ESP) ?
2808                                         IPPROTO_ESP : IPPROTO_AH;
2809                         memcpy(&session->ip6_hdr.src_addr,
2810                                         &ipsec_xform->tunnel.ipv6.src_addr, 16);
2811                         memcpy(&session->ip6_hdr.dst_addr,
2812                                         &ipsec_xform->tunnel.ipv6.dst_addr, 16);
2813                         session->encap_pdb.ip_hdr_len =
2814                                                 sizeof(struct rte_ipv6_hdr);
2815                 }
2816                 session->encap_pdb.options =
2817                         (IPVERSION << PDBNH_ESP_ENCAP_SHIFT) |
2818                         PDBOPTS_ESP_OIHI_PDB_INL |
2819                         PDBOPTS_ESP_IVSRC |
2820                         PDBHMO_ESP_ENCAP_DTTL |
2821                         PDBHMO_ESP_SNR;
2822                 if (ipsec_xform->options.esn)
2823                         session->encap_pdb.options |= PDBOPTS_ESP_ESN;
2824                 session->encap_pdb.spi = ipsec_xform->spi;
2825
2826         } else if (ipsec_xform->direction ==
2827                         RTE_SECURITY_IPSEC_SA_DIR_INGRESS) {
2828                 if (ipsec_xform->tunnel.type == RTE_SECURITY_IPSEC_TUNNEL_IPV4)
2829                         session->decap_pdb.options = sizeof(struct ip) << 16;
2830                 else
2831                         session->decap_pdb.options =
2832                                         sizeof(struct rte_ipv6_hdr) << 16;
2833                 if (ipsec_xform->options.esn)
2834                         session->decap_pdb.options |= PDBOPTS_ESP_ESN;
2835                 if (ipsec_xform->replay_win_sz) {
2836                         uint32_t win_sz;
2837                         win_sz = rte_align32pow2(ipsec_xform->replay_win_sz);
2838
2839                         switch (win_sz) {
2840                         case 1:
2841                         case 2:
2842                         case 4:
2843                         case 8:
2844                         case 16:
2845                         case 32:
2846                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS32;
2847                                 break;
2848                         case 64:
2849                                 session->decap_pdb.options |= PDBOPTS_ESP_ARS64;
2850                                 break;
2851                         default:
2852                                 session->decap_pdb.options |=
2853                                                         PDBOPTS_ESP_ARS128;
2854                         }
2855                 }
2856         } else
2857                 goto out;
2858         rte_spinlock_lock(&internals->lock);
2859         for (i = 0; i < MAX_DPAA_CORES; i++) {
2860                 session->inq[i] = dpaa_sec_attach_rxq(internals);
2861                 if (session->inq[i] == NULL) {
2862                         DPAA_SEC_ERR("unable to attach sec queue");
2863                         rte_spinlock_unlock(&internals->lock);
2864                         goto out;
2865                 }
2866         }
2867         rte_spinlock_unlock(&internals->lock);
2868
2869         return 0;
2870 out:
2871         free_session_data(session);
2872         return -1;
2873 }
2874
2875 static int
2876 dpaa_sec_set_pdcp_session(struct rte_cryptodev *dev,
2877                           struct rte_security_session_conf *conf,
2878                           void *sess)
2879 {
2880         struct rte_security_pdcp_xform *pdcp_xform = &conf->pdcp;
2881         struct rte_crypto_sym_xform *xform = conf->crypto_xform;
2882         struct rte_crypto_auth_xform *auth_xform = NULL;
2883         struct rte_crypto_cipher_xform *cipher_xform = NULL;
2884         dpaa_sec_session *session = (dpaa_sec_session *)sess;
2885         struct dpaa_sec_dev_private *dev_priv = dev->data->dev_private;
2886         uint32_t i;
2887         int ret;
2888
2889         PMD_INIT_FUNC_TRACE();
2890
2891         memset(session, 0, sizeof(dpaa_sec_session));
2892
2893         /* find xfrm types */
2894         if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
2895                 cipher_xform = &xform->cipher;
2896                 if (xform->next != NULL)
2897                         auth_xform = &xform->next->auth;
2898         } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
2899                 auth_xform = &xform->auth;
2900                 if (xform->next != NULL)
2901                         cipher_xform = &xform->next->cipher;
2902         } else {
2903                 DPAA_SEC_ERR("Invalid crypto type");
2904                 return -EINVAL;
2905         }
2906
2907         session->proto_alg = conf->protocol;
2908         session->ctxt = DPAA_SEC_PDCP;
2909
2910         if (cipher_xform) {
2911                 switch (cipher_xform->algo) {
2912                 case RTE_CRYPTO_CIPHER_SNOW3G_UEA2:
2913                         session->cipher_key.alg = PDCP_CIPHER_TYPE_SNOW;
2914                         break;
2915                 case RTE_CRYPTO_CIPHER_ZUC_EEA3:
2916                         session->cipher_key.alg = PDCP_CIPHER_TYPE_ZUC;
2917                         break;
2918                 case RTE_CRYPTO_CIPHER_AES_CTR:
2919                         session->cipher_key.alg = PDCP_CIPHER_TYPE_AES;
2920                         break;
2921                 case RTE_CRYPTO_CIPHER_NULL:
2922                         session->cipher_key.alg = PDCP_CIPHER_TYPE_NULL;
2923                         break;
2924                 default:
2925                         DPAA_SEC_ERR("Crypto: Undefined Cipher specified %u",
2926                                       session->cipher_alg);
2927                         return -EINVAL;
2928                 }
2929
2930                 session->cipher_key.data = rte_zmalloc(NULL,
2931                                                cipher_xform->key.length,
2932                                                RTE_CACHE_LINE_SIZE);
2933                 if (session->cipher_key.data == NULL &&
2934                                 cipher_xform->key.length > 0) {
2935                         DPAA_SEC_ERR("No Memory for cipher key");
2936                         return -ENOMEM;
2937                 }
2938                 session->cipher_key.length = cipher_xform->key.length;
2939                 memcpy(session->cipher_key.data, cipher_xform->key.data,
2940                         cipher_xform->key.length);
2941                 session->dir = (cipher_xform->op == RTE_CRYPTO_CIPHER_OP_ENCRYPT) ?
2942                                         DIR_ENC : DIR_DEC;
2943                 session->cipher_alg = cipher_xform->algo;
2944         } else {
2945                 session->cipher_key.data = NULL;
2946                 session->cipher_key.length = 0;
2947                 session->cipher_alg = RTE_CRYPTO_CIPHER_NULL;
2948                 session->dir = DIR_ENC;
2949         }
2950
2951         if (pdcp_xform->domain == RTE_SECURITY_PDCP_MODE_CONTROL) {
2952                 if (pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_5 &&
2953                     pdcp_xform->sn_size != RTE_SECURITY_PDCP_SN_SIZE_12) {
2954                         DPAA_SEC_ERR(
2955                                 "PDCP Seq Num size should be 5/12 bits for cmode");
2956                         ret = -EINVAL;
2957                         goto out;
2958                 }
2959         }
2960
2961         if (auth_xform) {
2962                 switch (auth_xform->algo) {
2963                 case RTE_CRYPTO_AUTH_SNOW3G_UIA2:
2964                         session->auth_key.alg = PDCP_AUTH_TYPE_SNOW;
2965                         break;
2966                 case RTE_CRYPTO_AUTH_ZUC_EIA3:
2967                         session->auth_key.alg = PDCP_AUTH_TYPE_ZUC;
2968                         break;
2969                 case RTE_CRYPTO_AUTH_AES_CMAC:
2970                         session->auth_key.alg = PDCP_AUTH_TYPE_AES;
2971                         break;
2972                 case RTE_CRYPTO_AUTH_NULL:
2973                         session->auth_key.alg = PDCP_AUTH_TYPE_NULL;
2974                         break;
2975                 default:
2976                         DPAA_SEC_ERR("Crypto: Unsupported auth alg %u",
2977                                       session->auth_alg);
2978                         rte_free(session->cipher_key.data);
2979                         return -EINVAL;
2980                 }
2981                 session->auth_key.data = rte_zmalloc(NULL,
2982                                                      auth_xform->key.length,
2983                                                      RTE_CACHE_LINE_SIZE);
2984                 if (!session->auth_key.data &&
2985                     auth_xform->key.length > 0) {
2986                         DPAA_SEC_ERR("No Memory for auth key");
2987                         rte_free(session->cipher_key.data);
2988                         return -ENOMEM;
2989                 }
2990                 session->auth_key.length = auth_xform->key.length;
2991                 memcpy(session->auth_key.data, auth_xform->key.data,
2992                        auth_xform->key.length);
2993                 session->auth_alg = auth_xform->algo;
2994         } else {
2995                 session->auth_key.data = NULL;
2996                 session->auth_key.length = 0;
2997                 session->auth_alg = 0;
2998         }
2999         session->pdcp.domain = pdcp_xform->domain;
3000         session->pdcp.bearer = pdcp_xform->bearer;
3001         session->pdcp.pkt_dir = pdcp_xform->pkt_dir;
3002         session->pdcp.sn_size = pdcp_xform->sn_size;
3003         session->pdcp.hfn = pdcp_xform->hfn;
3004         session->pdcp.hfn_threshold = pdcp_xform->hfn_threshold;
3005         session->pdcp.hfn_ovd = pdcp_xform->hfn_ovrd;
3006         session->pdcp.sdap_enabled = pdcp_xform->sdap_enabled;
3007         if (cipher_xform)
3008                 session->pdcp.hfn_ovd_offset = cipher_xform->iv.offset;
3009
3010         rte_spinlock_lock(&dev_priv->lock);
3011         for (i = 0; i < MAX_DPAA_CORES; i++) {
3012                 session->inq[i] = dpaa_sec_attach_rxq(dev_priv);
3013                 if (session->inq[i] == NULL) {
3014                         DPAA_SEC_ERR("unable to attach sec queue");
3015                         rte_spinlock_unlock(&dev_priv->lock);
3016                         ret = -EBUSY;
3017                         goto out;
3018                 }
3019         }
3020         rte_spinlock_unlock(&dev_priv->lock);
3021         return 0;
3022 out:
3023         rte_free(session->auth_key.data);
3024         rte_free(session->cipher_key.data);
3025         memset(session, 0, sizeof(dpaa_sec_session));
3026         return ret;
3027 }
3028
3029 static int
3030 dpaa_sec_security_session_create(void *dev,
3031                                  struct rte_security_session_conf *conf,
3032                                  struct rte_security_session *sess,
3033                                  struct rte_mempool *mempool)
3034 {
3035         void *sess_private_data;
3036         struct rte_cryptodev *cdev = (struct rte_cryptodev *)dev;
3037         int ret;
3038
3039         if (rte_mempool_get(mempool, &sess_private_data)) {
3040                 DPAA_SEC_ERR("Couldn't get object from session mempool");
3041                 return -ENOMEM;
3042         }
3043
3044         switch (conf->protocol) {
3045         case RTE_SECURITY_PROTOCOL_IPSEC:
3046                 ret = dpaa_sec_set_ipsec_session(cdev, conf,
3047                                 sess_private_data);
3048                 break;
3049         case RTE_SECURITY_PROTOCOL_PDCP:
3050                 ret = dpaa_sec_set_pdcp_session(cdev, conf,
3051                                 sess_private_data);
3052                 break;
3053         case RTE_SECURITY_PROTOCOL_MACSEC:
3054                 return -ENOTSUP;
3055         default:
3056                 return -EINVAL;
3057         }
3058         if (ret != 0) {
3059                 DPAA_SEC_ERR("failed to configure session parameters");
3060                 /* Return session to mempool */
3061                 rte_mempool_put(mempool, sess_private_data);
3062                 return ret;
3063         }
3064
3065         set_sec_session_private_data(sess, sess_private_data);
3066
3067         return ret;
3068 }
3069
3070 /** Clear the memory of session so it doesn't leave key material behind */
3071 static int
3072 dpaa_sec_security_session_destroy(void *dev __rte_unused,
3073                 struct rte_security_session *sess)
3074 {
3075         PMD_INIT_FUNC_TRACE();
3076         void *sess_priv = get_sec_session_private_data(sess);
3077         dpaa_sec_session *s = (dpaa_sec_session *)sess_priv;
3078
3079         if (sess_priv) {
3080                 free_session_memory((struct rte_cryptodev *)dev, s);
3081                 set_sec_session_private_data(sess, NULL);
3082         }
3083         return 0;
3084 }
3085 #endif
3086 static int
3087 dpaa_sec_dev_configure(struct rte_cryptodev *dev __rte_unused,
3088                        struct rte_cryptodev_config *config __rte_unused)
3089 {
3090         PMD_INIT_FUNC_TRACE();
3091
3092         return 0;
3093 }
3094
3095 static int
3096 dpaa_sec_dev_start(struct rte_cryptodev *dev __rte_unused)
3097 {
3098         PMD_INIT_FUNC_TRACE();
3099         return 0;
3100 }
3101
3102 static void
3103 dpaa_sec_dev_stop(struct rte_cryptodev *dev __rte_unused)
3104 {
3105         PMD_INIT_FUNC_TRACE();
3106 }
3107
3108 static int
3109 dpaa_sec_dev_close(struct rte_cryptodev *dev)
3110 {
3111         PMD_INIT_FUNC_TRACE();
3112
3113         if (dev == NULL)
3114                 return -ENOMEM;
3115
3116         return 0;
3117 }
3118
3119 static void
3120 dpaa_sec_dev_infos_get(struct rte_cryptodev *dev,
3121                        struct rte_cryptodev_info *info)
3122 {
3123         struct dpaa_sec_dev_private *internals = dev->data->dev_private;
3124
3125         PMD_INIT_FUNC_TRACE();
3126         if (info != NULL) {
3127                 info->max_nb_queue_pairs = internals->max_nb_queue_pairs;
3128                 info->feature_flags = dev->feature_flags;
3129                 info->capabilities = dpaa_sec_capabilities;
3130                 info->sym.max_nb_sessions = internals->max_nb_sessions;
3131                 info->driver_id = cryptodev_driver_id;
3132         }
3133 }
3134
3135 static enum qman_cb_dqrr_result
3136 dpaa_sec_process_parallel_event(void *event,
3137                         struct qman_portal *qm __always_unused,
3138                         struct qman_fq *outq,
3139                         const struct qm_dqrr_entry *dqrr,
3140                         void **bufs)
3141 {
3142         const struct qm_fd *fd;
3143         struct dpaa_sec_job *job;
3144         struct dpaa_sec_op_ctx *ctx;
3145         struct rte_event *ev = (struct rte_event *)event;
3146
3147         fd = &dqrr->fd;
3148
3149         /* sg is embedded in an op ctx,
3150          * sg[0] is for output
3151          * sg[1] for input
3152          */
3153         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3154
3155         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3156         ctx->fd_status = fd->status;
3157         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3158                 struct qm_sg_entry *sg_out;
3159                 uint32_t len;
3160
3161                 sg_out = &job->sg[0];
3162                 hw_sg_to_cpu(sg_out);
3163                 len = sg_out->length;
3164                 ctx->op->sym->m_src->pkt_len = len;
3165                 ctx->op->sym->m_src->data_len = len;
3166         }
3167         if (!ctx->fd_status) {
3168                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3169         } else {
3170                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3171                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3172         }
3173         ev->event_ptr = (void *)ctx->op;
3174
3175         ev->flow_id = outq->ev.flow_id;
3176         ev->sub_event_type = outq->ev.sub_event_type;
3177         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3178         ev->op = RTE_EVENT_OP_NEW;
3179         ev->sched_type = outq->ev.sched_type;
3180         ev->queue_id = outq->ev.queue_id;
3181         ev->priority = outq->ev.priority;
3182         *bufs = (void *)ctx->op;
3183
3184         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3185
3186         return qman_cb_dqrr_consume;
3187 }
3188
3189 static enum qman_cb_dqrr_result
3190 dpaa_sec_process_atomic_event(void *event,
3191                         struct qman_portal *qm __rte_unused,
3192                         struct qman_fq *outq,
3193                         const struct qm_dqrr_entry *dqrr,
3194                         void **bufs)
3195 {
3196         u8 index;
3197         const struct qm_fd *fd;
3198         struct dpaa_sec_job *job;
3199         struct dpaa_sec_op_ctx *ctx;
3200         struct rte_event *ev = (struct rte_event *)event;
3201
3202         fd = &dqrr->fd;
3203
3204         /* sg is embedded in an op ctx,
3205          * sg[0] is for output
3206          * sg[1] for input
3207          */
3208         job = rte_dpaa_mem_ptov(qm_fd_addr_get64(fd));
3209
3210         ctx = container_of(job, struct dpaa_sec_op_ctx, job);
3211         ctx->fd_status = fd->status;
3212         if (ctx->op->sess_type == RTE_CRYPTO_OP_SECURITY_SESSION) {
3213                 struct qm_sg_entry *sg_out;
3214                 uint32_t len;
3215
3216                 sg_out = &job->sg[0];
3217                 hw_sg_to_cpu(sg_out);
3218                 len = sg_out->length;
3219                 ctx->op->sym->m_src->pkt_len = len;
3220                 ctx->op->sym->m_src->data_len = len;
3221         }
3222         if (!ctx->fd_status) {
3223                 ctx->op->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
3224         } else {
3225                 DPAA_SEC_DP_WARN("SEC return err: 0x%x", ctx->fd_status);
3226                 ctx->op->status = RTE_CRYPTO_OP_STATUS_ERROR;
3227         }
3228         ev->event_ptr = (void *)ctx->op;
3229         ev->flow_id = outq->ev.flow_id;
3230         ev->sub_event_type = outq->ev.sub_event_type;
3231         ev->event_type = RTE_EVENT_TYPE_CRYPTODEV;
3232         ev->op = RTE_EVENT_OP_NEW;
3233         ev->sched_type = outq->ev.sched_type;
3234         ev->queue_id = outq->ev.queue_id;
3235         ev->priority = outq->ev.priority;
3236
3237         /* Save active dqrr entries */
3238         index = ((uintptr_t)dqrr >> 6) & (16/*QM_DQRR_SIZE*/ - 1);
3239         DPAA_PER_LCORE_DQRR_SIZE++;
3240         DPAA_PER_LCORE_DQRR_HELD |= 1 << index;
3241         DPAA_PER_LCORE_DQRR_MBUF(index) = ctx->op->sym->m_src;
3242         ev->impl_opaque = index + 1;
3243         *dpaa_seqn(ctx->op->sym->m_src) = (uint32_t)index + 1;
3244         *bufs = (void *)ctx->op;
3245
3246         rte_mempool_put(ctx->ctx_pool, (void *)ctx);
3247
3248         return qman_cb_dqrr_defer;
3249 }
3250
3251 int
3252 dpaa_sec_eventq_attach(const struct rte_cryptodev *dev,
3253                 int qp_id,
3254                 uint16_t ch_id,
3255                 const struct rte_event *event)
3256 {
3257         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3258         struct qm_mcc_initfq opts = {0};
3259
3260         int ret;
3261
3262         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3263                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3264         opts.fqd.dest.channel = ch_id;
3265
3266         switch (event->sched_type) {
3267         case RTE_SCHED_TYPE_ATOMIC:
3268                 opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
3269                 /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
3270                  * configuration with HOLD_ACTIVE setting
3271                  */
3272                 opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
3273                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_atomic_event;
3274                 break;
3275         case RTE_SCHED_TYPE_ORDERED:
3276                 DPAA_SEC_ERR("Ordered queue schedule type is not supported\n");
3277                 return -ENOTSUP;
3278         default:
3279                 opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
3280                 qp->outq.cb.dqrr_dpdk_cb = dpaa_sec_process_parallel_event;
3281                 break;
3282         }
3283
3284         ret = qman_init_fq(&qp->outq, QMAN_INITFQ_FLAG_SCHED, &opts);
3285         if (unlikely(ret)) {
3286                 DPAA_SEC_ERR("unable to init caam source fq!");
3287                 return ret;
3288         }
3289
3290         memcpy(&qp->outq.ev, event, sizeof(struct rte_event));
3291
3292         return 0;
3293 }
3294
3295 int
3296 dpaa_sec_eventq_detach(const struct rte_cryptodev *dev,
3297                         int qp_id)
3298 {
3299         struct qm_mcc_initfq opts = {0};
3300         int ret;
3301         struct dpaa_sec_qp *qp = dev->data->queue_pairs[qp_id];
3302
3303         opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
3304                        QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CONTEXTB;
3305         qp->outq.cb.dqrr = dqrr_out_fq_cb_rx;
3306         qp->outq.cb.ern  = ern_sec_fq_handler;
3307         qman_retire_fq(&qp->outq, NULL);
3308         qman_oos_fq(&qp->outq);
3309         ret = qman_init_fq(&qp->outq, 0, &opts);
3310         if (ret)
3311                 RTE_LOG(ERR, PMD, "Error in qman_init_fq: ret: %d\n", ret);
3312         qp->outq.cb.dqrr = NULL;
3313
3314         return ret;
3315 }
3316
3317 static struct rte_cryptodev_ops crypto_ops = {
3318         .dev_configure        = dpaa_sec_dev_configure,
3319         .dev_start            = dpaa_sec_dev_start,
3320         .dev_stop             = dpaa_sec_dev_stop,
3321         .dev_close            = dpaa_sec_dev_close,
3322         .dev_infos_get        = dpaa_sec_dev_infos_get,
3323         .queue_pair_setup     = dpaa_sec_queue_pair_setup,
3324         .queue_pair_release   = dpaa_sec_queue_pair_release,
3325         .sym_session_get_size     = dpaa_sec_sym_session_get_size,
3326         .sym_session_configure    = dpaa_sec_sym_session_configure,
3327         .sym_session_clear        = dpaa_sec_sym_session_clear
3328 };
3329
3330 #ifdef RTE_LIB_SECURITY
3331 static const struct rte_security_capability *
3332 dpaa_sec_capabilities_get(void *device __rte_unused)
3333 {
3334         return dpaa_sec_security_cap;
3335 }
3336
3337 static const struct rte_security_ops dpaa_sec_security_ops = {
3338         .session_create = dpaa_sec_security_session_create,
3339         .session_update = NULL,
3340         .session_stats_get = NULL,
3341         .session_destroy = dpaa_sec_security_session_destroy,
3342         .set_pkt_metadata = NULL,
3343         .capabilities_get = dpaa_sec_capabilities_get
3344 };
3345 #endif
3346 static int
3347 dpaa_sec_uninit(struct rte_cryptodev *dev)
3348 {
3349         struct dpaa_sec_dev_private *internals;
3350
3351         if (dev == NULL)
3352                 return -ENODEV;
3353
3354         internals = dev->data->dev_private;
3355         rte_free(dev->security_ctx);
3356
3357         rte_free(internals);
3358
3359         DPAA_SEC_INFO("Closing DPAA_SEC device %s on numa socket %u",
3360                       dev->data->name, rte_socket_id());
3361
3362         return 0;
3363 }
3364
3365 static int
3366 dpaa_sec_dev_init(struct rte_cryptodev *cryptodev)
3367 {
3368         struct dpaa_sec_dev_private *internals;
3369 #ifdef RTE_LIB_SECURITY
3370         struct rte_security_ctx *security_instance;
3371 #endif
3372         struct dpaa_sec_qp *qp;
3373         uint32_t i, flags;
3374         int ret;
3375
3376         PMD_INIT_FUNC_TRACE();
3377
3378         cryptodev->driver_id = cryptodev_driver_id;
3379         cryptodev->dev_ops = &crypto_ops;
3380
3381         cryptodev->enqueue_burst = dpaa_sec_enqueue_burst;
3382         cryptodev->dequeue_burst = dpaa_sec_dequeue_burst;
3383         cryptodev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO |
3384                         RTE_CRYPTODEV_FF_HW_ACCELERATED |
3385                         RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING |
3386                         RTE_CRYPTODEV_FF_SECURITY |
3387                         RTE_CRYPTODEV_FF_IN_PLACE_SGL |
3388                         RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT |
3389                         RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT |
3390                         RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT |
3391                         RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT;
3392
3393         internals = cryptodev->data->dev_private;
3394         internals->max_nb_queue_pairs = RTE_DPAA_MAX_NB_SEC_QPS;
3395         internals->max_nb_sessions = RTE_DPAA_SEC_PMD_MAX_NB_SESSIONS;
3396
3397         /*
3398          * For secondary processes, we don't initialise any further as primary
3399          * has already done this work. Only check we don't need a different
3400          * RX function
3401          */
3402         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3403                 DPAA_SEC_WARN("Device already init by primary process");
3404                 return 0;
3405         }
3406 #ifdef RTE_LIB_SECURITY
3407         /* Initialize security_ctx only for primary process*/
3408         security_instance = rte_malloc("rte_security_instances_ops",
3409                                 sizeof(struct rte_security_ctx), 0);
3410         if (security_instance == NULL)
3411                 return -ENOMEM;
3412         security_instance->device = (void *)cryptodev;
3413         security_instance->ops = &dpaa_sec_security_ops;
3414         security_instance->sess_cnt = 0;
3415         cryptodev->security_ctx = security_instance;
3416 #endif
3417         rte_spinlock_init(&internals->lock);
3418         for (i = 0; i < internals->max_nb_queue_pairs; i++) {
3419                 /* init qman fq for queue pair */
3420                 qp = &internals->qps[i];
3421                 ret = dpaa_sec_init_tx(&qp->outq);
3422                 if (ret) {
3423                         DPAA_SEC_ERR("config tx of queue pair  %d", i);
3424                         goto init_error;
3425                 }
3426         }
3427
3428         flags = QMAN_FQ_FLAG_LOCKED | QMAN_FQ_FLAG_DYNAMIC_FQID |
3429                 QMAN_FQ_FLAG_TO_DCPORTAL;
3430         for (i = 0; i < RTE_DPAA_MAX_RX_QUEUE; i++) {
3431                 /* create rx qman fq for sessions*/
3432                 ret = qman_create_fq(0, flags, &internals->inq[i]);
3433                 if (unlikely(ret != 0)) {
3434                         DPAA_SEC_ERR("sec qman_create_fq failed");
3435                         goto init_error;
3436                 }
3437         }
3438
3439         RTE_LOG(INFO, PMD, "%s cryptodev init\n", cryptodev->data->name);
3440         return 0;
3441
3442 init_error:
3443         DPAA_SEC_ERR("driver %s: create failed\n", cryptodev->data->name);
3444
3445         rte_free(cryptodev->security_ctx);
3446         return -EFAULT;
3447 }
3448
3449 static int
3450 cryptodev_dpaa_sec_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
3451                                 struct rte_dpaa_device *dpaa_dev)
3452 {
3453         struct rte_cryptodev *cryptodev;
3454         char cryptodev_name[RTE_CRYPTODEV_NAME_MAX_LEN];
3455
3456         int retval;
3457
3458         snprintf(cryptodev_name, sizeof(cryptodev_name), "%s", dpaa_dev->name);
3459
3460         cryptodev = rte_cryptodev_pmd_allocate(cryptodev_name, rte_socket_id());
3461         if (cryptodev == NULL)
3462                 return -ENOMEM;
3463
3464         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
3465                 cryptodev->data->dev_private = rte_zmalloc_socket(
3466                                         "cryptodev private structure",
3467                                         sizeof(struct dpaa_sec_dev_private),
3468                                         RTE_CACHE_LINE_SIZE,
3469                                         rte_socket_id());
3470
3471                 if (cryptodev->data->dev_private == NULL)
3472                         rte_panic("Cannot allocate memzone for private "
3473                                         "device data");
3474         }
3475
3476         dpaa_dev->crypto_dev = cryptodev;
3477         cryptodev->device = &dpaa_dev->device;
3478
3479         /* init user callbacks */
3480         TAILQ_INIT(&(cryptodev->link_intr_cbs));
3481
3482         /* if sec device version is not configured */
3483         if (!rta_get_sec_era()) {
3484                 const struct device_node *caam_node;
3485
3486                 for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
3487                         const uint32_t *prop = of_get_property(caam_node,
3488                                         "fsl,sec-era",
3489                                         NULL);
3490                         if (prop) {
3491                                 rta_set_sec_era(
3492                                         INTL_SEC_ERA(rte_cpu_to_be_32(*prop)));
3493                                 break;
3494                         }
3495                 }
3496         }
3497
3498         if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
3499                 retval = rte_dpaa_portal_init((void *)1);
3500                 if (retval) {
3501                         DPAA_SEC_ERR("Unable to initialize portal");
3502                         goto out;
3503                 }
3504         }
3505
3506         /* Invoke PMD device initialization function */
3507         retval = dpaa_sec_dev_init(cryptodev);
3508         if (retval == 0)
3509                 return 0;
3510
3511         retval = -ENXIO;
3512 out:
3513         /* In case of error, cleanup is done */
3514         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
3515                 rte_free(cryptodev->data->dev_private);
3516
3517         rte_cryptodev_pmd_release_device(cryptodev);
3518
3519         return retval;
3520 }
3521
3522 static int
3523 cryptodev_dpaa_sec_remove(struct rte_dpaa_device *dpaa_dev)
3524 {
3525         struct rte_cryptodev *cryptodev;
3526         int ret;
3527
3528         cryptodev = dpaa_dev->crypto_dev;
3529         if (cryptodev == NULL)
3530                 return -ENODEV;
3531
3532         ret = dpaa_sec_uninit(cryptodev);
3533         if (ret)
3534                 return ret;
3535
3536         return rte_cryptodev_pmd_destroy(cryptodev);
3537 }
3538
3539 static struct rte_dpaa_driver rte_dpaa_sec_driver = {
3540         .drv_type = FSL_DPAA_CRYPTO,
3541         .driver = {
3542                 .name = "DPAA SEC PMD"
3543         },
3544         .probe = cryptodev_dpaa_sec_probe,
3545         .remove = cryptodev_dpaa_sec_remove,
3546 };
3547
3548 static struct cryptodev_driver dpaa_sec_crypto_drv;
3549
3550 RTE_PMD_REGISTER_DPAA(CRYPTODEV_NAME_DPAA_SEC_PMD, rte_dpaa_sec_driver);
3551 RTE_PMD_REGISTER_CRYPTO_DRIVER(dpaa_sec_crypto_drv, rte_dpaa_sec_driver.driver,
3552                 cryptodev_driver_id);
3553 RTE_LOG_REGISTER(dpaa_logtype_sec, pmd.crypto.dpaa, NOTICE);